ID
int64 0
2.65k
| Language
stringclasses 1
value | Repository Name
stringclasses 14
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 11
111
⌀ | File Path for Unit Test
stringlengths 16
116
⌀ | Code
stringlengths 411
31.4k
| Unit Test - (Ground Truth)
stringlengths 40
32.1k
|
---|---|---|---|---|---|---|---|
2,000 | cpp | tensorflow/tensorflow | spmd_partitioner_util | third_party/xla/xla/service/spmd/spmd_partitioner_util.cc | third_party/xla/xla/service/spmd/spmd_partitioner_util_test.cc | #ifndef XLA_SERVICE_SPMD_SPMD_PARTITIONER_UTIL_H_
#define XLA_SERVICE_SPMD_SPMD_PARTITIONER_UTIL_H_
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <initializer_list>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/strings/str_replace.h"
#include "absl/utility/utility.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/hlo/utils/hlo_sharding_util.h"
#include "xla/literal_util.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/spmd/spmd_partitioner.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace spmd {
template <typename T>
using IsCompOrCompBuilder =
typename std::enable_if_t<std::is_same<HloComputation, T>::value ||
std::is_same<HloComputation::Builder, T>::value ||
std::is_same<SpmdBuilder, T>::value>;
struct GatherScatterParallelDimSharding {
HloSharding indices_sharding;
HloSharding operand_sharding;
};
bool HasReplicatedSharding(const HloSharding& sharding);
template <typename T, typename = IsCompOrCompBuilder<T>>
HloInstruction* CreateConstantBase(const Shape& shape, Literal value, T* b,
Literal (*literal_creator)(Literal,
PrimitiveType)) {
if (shape.IsTuple()) {
std::vector<HloInstruction*> elements;
for (int64_t i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) {
elements.push_back(
CreateConstantBase(ShapeUtil::GetTupleElementShape(shape, i),
value.Clone(), b, literal_creator));
}
return b->AddInstruction(HloInstruction::CreateTuple(elements));
}
if (shape.IsToken()) {
return b->AddInstruction(HloInstruction::CreateToken());
}
auto c = b->AddInstruction(HloInstruction::CreateConstant(
literal_creator(std::move(value), shape.element_type())));
if (shape.rank() == 0) {
return c;
}
return b->AddInstruction(HloInstruction::CreateBroadcast(shape, c, {}));
}
template <typename T, typename = IsCompOrCompBuilder<T>>
HloInstruction* CreateConstant(const Shape& shape, Literal value, T* b) {
auto identity = [](Literal value, PrimitiveType primitive_type) {
CHECK(ShapeUtil::IsScalarWithElementType(value.shape(), primitive_type));
return value;
};
return CreateConstantBase(shape, std::move(value), b, identity);
}
template <typename T, typename = IsCompOrCompBuilder<T>>
HloInstruction* CreateZero(const Shape& shape, T* b) {
auto zero = [](Literal , PrimitiveType primitive_type) {
return LiteralUtil::Zero(primitive_type);
};
return CreateConstantBase(shape, Literal(), b, zero);
}
template <typename T, typename = IsCompOrCompBuilder<T>>
HloInstruction* CreateOne(const Shape& shape, T* b) {
auto one = [](Literal , PrimitiveType primitive_type) {
return LiteralUtil::One(primitive_type);
};
return CreateConstantBase(shape, Literal(), b, one);
}
template <typename NativeT, typename T, typename = IsCompOrCompBuilder<T>>
HloInstruction* CreateR0WithType(PrimitiveType type, NativeT value, T* b) {
auto literal = LiteralUtil::CreateR0(value)
.ConvertToShape(ShapeUtil::MakeShape(type, {}))
.value();
return b->AddInstruction(HloInstruction::CreateConstant(std::move(literal)));
}
template <typename T, typename = IsCompOrCompBuilder<T>>
inline HloInstruction* CreateFirstWithType(PrimitiveType type, T* b) {
if (type == F32) {
auto float_pad_value = std::numeric_limits<float>::quiet_NaN();
return CreateR0WithType(type, -float_pad_value, b);
}
auto literal = LiteralUtil::MinValue(type);
return b->AddInstruction(HloInstruction::CreateConstant(std::move(literal)));
}
template <typename T, typename = IsCompOrCompBuilder<T>>
inline HloInstruction* CreateLastWithType(PrimitiveType type, T* b) {
if (type == F32) {
auto float_pad_value = std::numeric_limits<float>::quiet_NaN();
return CreateR0WithType(type, float_pad_value, b);
}
auto literal = LiteralUtil::MaxValue(type);
return b->AddInstruction(HloInstruction::CreateConstant(std::move(literal)));
}
HloComputation* MakeBinaryAdd(PrimitiveType type, HloModule* module);
bool EvenlyPartitions(const Shape& shape, const HloSharding& sharding);
Shape MakePartitionedShape(const Shape& shape, const HloSharding& sharding);
int64_t ShapeSizeInBytes(const Shape& shape);
template <typename NativeT>
HloInstruction* TableLookup(absl::Span<const NativeT> table, PrimitiveType type,
HloInstruction* ordinal, SpmdBuilder* b) {
HloInstruction* table_hlo = b->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<NativeT>(table)));
HloInstruction* value = b->AddInstruction(HloInstruction::CreateDynamicSlice(
ShapeUtil::MakeShape(type, {1}), table_hlo, {ordinal}, {1}));
return b->AddInstruction(
HloInstruction::CreateReshape(ShapeUtil::MakeShape(type, {}), value));
}
Shape MakeNonPaddedShapeForGivenPartition(const Shape& shape,
const HloSharding& sharding,
int64_t partition_id);
std::vector<HloInstruction*> MakePartitionOffsets(
const Shape& shape, const HloSharding& sharding,
HloInstruction* partition_id, SpmdBuilder* b,
absl::Span<const int64_t> dims = {});
std::vector<HloInstruction*> MakeTiledPartitionOrdinals(
const HloSharding& sharding, HloInstruction* partition_id, SpmdBuilder* b);
template <typename T, typename = IsCompOrCompBuilder<T>>
HloInstruction* PadToShape(HloInstruction* hlo, const Shape& padded_shape, T* b,
std::optional<Literal> value = std::nullopt) {
if (ShapeUtil::Compatible(hlo->shape(), padded_shape)) {
return hlo;
}
PaddingConfig padding_config;
for (int64_t i = 0; i < padded_shape.rank(); ++i) {
auto padding_config_dim = padding_config.add_dimensions();
padding_config_dim->set_edge_padding_low(0);
padding_config_dim->set_interior_padding(0);
padding_config_dim->set_edge_padding_high(padded_shape.dimensions(i) -
hlo->shape().dimensions(i));
}
const Shape padding_shape =
ShapeUtil::MakeScalarShape(hlo->shape().element_type());
HloInstruction* padding =
value.has_value() ? CreateConstant(padding_shape, std::move(*value), b)
: CreateZero(padding_shape, b);
return b->AddInstruction(
HloInstruction::CreatePad(padded_shape, hlo, padding, padding_config));
}
Shape GetPaddedShapeForUnevenPartitioning(const Shape& base_shape,
const HloSharding& sharding);
template <typename T, typename = IsCompOrCompBuilder<T>>
HloInstruction* PadBaseShapeBeforeUnevenTiledSharding(
HloInstruction* hlo, const HloSharding& sharding, T* b,
std::optional<Literal> value = std::nullopt) {
auto padded_base_shape =
GetPaddedShapeForUnevenPartitioning(hlo->shape(), sharding);
if (ShapeUtil::Compatible(padded_base_shape, hlo->shape())) {
return hlo;
}
return PadToShape(hlo, padded_base_shape, b, std::move(value));
}
std::optional<int64_t> UniqueTiledDim(const HloSharding& sharding);
class OffsetCalculation;
class MultiplyAddDivideOffsetCalculation {
public:
MultiplyAddDivideOffsetCalculation()
: multiplier_(0), offset_(0), divisor_(1) {}
MultiplyAddDivideOffsetCalculation(int64_t multiplier, int64_t offset,
int64_t divisor);
OffsetCalculation operator-(
const MultiplyAddDivideOffsetCalculation& other) const;
OffsetCalculation operator+(
const MultiplyAddDivideOffsetCalculation& other) const;
bool operator==(const MultiplyAddDivideOffsetCalculation& other) const {
return multiplier_ == other.multiplier_ && offset_ == other.offset_ &&
divisor_ == other.divisor_;
}
bool IsConstant() const { return multiplier_ == 0; }
void Simplify();
int64_t Calculate(int64_t shard_ordinal) const;
HloInstruction* Calculate(HloInstruction* shard_ordinal,
SpmdBuilder* b) const;
int64_t MaxInRange(int64_t start_ordinal, int64_t limit_ordinal) const;
private:
int64_t multiplier_;
int64_t offset_;
int64_t divisor_;
};
class OffsetCalculation {
public:
OffsetCalculation() : opcode_(HloOpcode::kCopy), copy_from_() {}
explicit OffsetCalculation(
const MultiplyAddDivideOffsetCalculation& copy_from)
: opcode_(HloOpcode::kCopy), copy_from_(copy_from) {}
OffsetCalculation(const OffsetCalculation& copy_from) { *this = copy_from; }
OffsetCalculation(HloOpcode opcode,
const MultiplyAddDivideOffsetCalculation& lhs,
const MultiplyAddDivideOffsetCalculation& rhs)
: opcode_(opcode),
lhs_(std::make_unique<OffsetCalculation>(lhs)),
rhs_(std::make_unique<OffsetCalculation>(rhs)) {}
OffsetCalculation(HloOpcode opcode, const OffsetCalculation& lhs,
const OffsetCalculation& rhs)
: opcode_(opcode),
lhs_(std::make_unique<OffsetCalculation>(lhs)),
rhs_(std::make_unique<OffsetCalculation>(rhs)) {}
OffsetCalculation& operator=(const OffsetCalculation& other);
bool IsConstant() const;
OffsetCalculation operator-(const OffsetCalculation& other) const;
OffsetCalculation operator+(const OffsetCalculation& other) const;
bool operator==(const OffsetCalculation& other) const;
int64_t Calculate(int64_t shard_ordinal) const;
HloInstruction* Calculate(HloInstruction* shard_ordinal,
SpmdBuilder* b) const;
int64_t MaxInRange(int64_t start_ordinal, int64_t limit_ordinal) const;
private:
HloOpcode opcode_;
std::unique_ptr<OffsetCalculation> lhs_;
std::unique_ptr<OffsetCalculation> rhs_;
MultiplyAddDivideOffsetCalculation copy_from_;
};
std::optional<HloInstruction*> ExchangeHalo(
HloInstruction* hlo, const OffsetCalculation& left_halo_size_function,
const OffsetCalculation& right_halo_size_function, int64_t dim,
const HloSharding& target,
const SPMDCollectiveOpsCreator& collective_ops_creator,
int64_t* next_channel_id, SpmdBuilder* b);
std::optional<HloInstruction*> ExchangeHalo(
HloInstruction* hlo,
std::vector<OffsetCalculation> left_halo_size_functions,
std::vector<OffsetCalculation> right_halo_size_functions,
const HloSharding& target,
const SPMDCollectiveOpsCreator& collective_ops_creator,
int64_t* next_channel_id, SpmdBuilder* b);
HloInstruction* ExchangeHaloCompact(
HloInstruction* hlo, const Shape& base_shape,
const OffsetCalculation& left_halo_size_function,
const OffsetCalculation& right_halo_size_function,
HloInstruction* pad_value, int64_t dim, const HloSharding& sharding,
HloInstruction* shard_ordinal,
const SPMDCollectiveOpsCreator& collective_ops_creator,
int64_t* next_channel_id, SpmdBuilder* b);
std::optional<HloInstruction*> ExchangeHaloAndGetValidData(
HloInstruction* hlo, const Shape& base_shape,
const OffsetCalculation& left_halo_size_function,
const OffsetCalculation& right_halo_size_function,
int64_t explicit_left_padding_on_full_shape, int64_t padded_full_shape_size,
int64_t shard_size_with_halo, int64_t dim, const HloSharding& target,
HloInstruction* offset_on_padded_shape, HloInstruction* pad_value,
HloInstruction* partition_ordinal,
const SPMDCollectiveOpsCreator& collective_ops_creator,
int64_t* next_channel_id, SpmdBuilder* b, bool mask_invalid_region = true,
bool force_mask_in_compact = false);
HloInstruction* HaloExchangeToPadOnLeft(PartitionedHlo& original,
absl::Span<const int64_t> dims);
bool IsNanSafeGt(HloComputation* computation);
std::optional<int64_t> GetKValueInTopKWhenPartitionSortDim(HloInstruction* hlo);
HloInstruction* SliceFirstK(HloInstruction* hlo, SpmdBuilder* builder,
int64_t slice_dim, int64_t k);
int64_t ShardCountAtDim(const HloSharding& sharding, int64_t dim);
std::optional<std::vector<std::pair<int64_t, int64_t>>>
GetReshardAllToAllSourceTargetDims(const HloSharding& source,
const HloSharding& target);
bool CanReshardWithCollectivePermute(const HloSharding& source,
const HloSharding& target);
hlo_sharding_util::GroupedSharding AlignGroupsWith(
hlo_sharding_util::GroupedSharding grouped_sharding,
const hlo_sharding_util::GroupedSharding& reference,
bool ignore_group_order = false);
HloSharding AlignShardingOnDims(const HloSharding& sharding,
absl::Span<const int64_t> sharding_dims,
const HloSharding& reference,
absl::Span<const int64_t> reference_dims);
std::optional<hlo_sharding_util::GroupedSharding> AlignGroupsWithIfCompatible(
hlo_sharding_util::GroupedSharding grouped_sharding,
const hlo_sharding_util::GroupedSharding& reference);
Shape GetPerGroupBaseShape(
const hlo_sharding_util::GroupedSharding& grouped_sharding,
const Shape& original_base_shape);
HloInstruction* GetInGroupPartitionId(
HloInstruction* partition_id,
const std::vector<std::vector<int64_t>>& device_groups, SpmdBuilder* b);
PartitionedHlo::PartitioningState CreatePerGroupPartitioningState(
const PartitionedHlo::PartitioningState& state,
const std::vector<std::vector<int64_t>>& device_groups, SpmdBuilder* b);
HloInstruction* PerGroupSliceFromReplicated(
HloInstruction* replicated, HloInstruction* partition_id,
const std::vector<std::vector<int64_t>>& device_groups,
absl::Span<const int64_t> group_dims,
absl::Span<const int64_t> group_dim_sizes, SpmdBuilder* b);
std::optional<HloInstruction*> PadFromPartialReplicateShape(
HloInstruction* hlo, const Shape& base_shape,
const HloSharding& src_sharding, const HloSharding& dst_sharding,
const std::vector<int64_t>& expand_tile_dims,
const SPMDCollectiveOpsCreator& collective_ops_creator,
int64_t* next_channel_id, HloInstruction* partition_id, SpmdBuilder* b);
std::optional<HloSharding> PartialReplicateReshardCompatibleSharding(
const HloSharding& partial_sharding, const HloSharding& target_sharding);
std::optional<HloInstruction*> TileToPartialReplicateHaloExchange(
HloInstruction* hlo, const Shape& base_shape,
const HloSharding& src_sharding, const HloSharding& dst_sharding,
const std::vector<int64_t>& replicate_dims,
const SPMDCollectiveOpsCreator& collective_ops_creator,
int64_t* next_channel_id, HloInstruction* partition_id, SpmdBuilder* b);
std::optional<std::vector<int64_t>> FindMatchingPartitionedDimsForGrouping(
const HloSharding& sharding,
const std::vector<std::vector<int64_t>>& device_groups);
HloSharding CreateMatchingShardingOnDims(const Shape& target_shape,
const HloSharding& source_sharding,
absl::Span<const int64_t> target_dims,
absl::Span<const int64_t> source_dims);
std::optional<GatherScatterParallelDimSharding>
GatherScatterOperandsShardedAcrossParallelDims(
const HloInstruction& operand, const HloInstruction& indices,
const hlo_sharding_util::GatherScatterParallelDims& parallel_dims);
int64_t FindRotateRightPattern(const HloInstruction* concat,
const HloInstruction* lhs,
const HloInstruction* rhs);
struct PadWithWrapPattern {
int64_t lhs_slice_start;
int64_t rhs_slice_start;
std::vector<const HloInstruction*> lhs_modifiers;
std::vector<const HloInstruction*> rhs_modifiers;
};
std::optional<PadWithWrapPattern> FindPadWithWrapPattern(
const HloInstruction* concat, const HloInstruction* lhs,
const HloInstruction* mid, const HloInstruction* rhs);
std::optional<PartitionedHlo::WindowedInputShardReturnValue>
ReshardDataForSlicing(absl::Span<const int64_t> strides,
absl::Span<const int64_t> starts,
absl::Span<const int64_t> limits,
PartitionedHlo to_reshard,
const HloSharding& target_sharding, SpmdBuilder* b);
HloInstruction* SliceDataFromWindowReshard(
const PartitionedHlo::WindowedInputShardReturnValue& reshard_operand,
absl::Span<const int64_t> strides, const Shape& base_shape,
const HloSharding& target_sharding, SpmdBuilder* b);
std::optional<PartitionedHlo::WindowedInputShardReturnValue> ReshardDataForPad(
HloInstruction* pad_value, PaddingConfig pc, PartitionedHlo to_reshard,
const HloSharding& target_sharding, SpmdBuilder* b);
HloInstruction* PadDataFromWindowReshard(
const PartitionedHlo::WindowedInputShardReturnValue& reshard_operand,
HloInstruction* pad_value, SpmdBuilder* b);
std::vector<std::vector<int64_t>> GetPartitionGroupsForReplication(
const HloSharding& sharding, absl::Span<const int64_t> replication_dims);
std::optional<IotaReplicaGroupList> GetIotaPartitionGroupsForReplication(
const HloSharding& sharding, absl::Span<const int64_t> replication_dims,
int64_t num_partitions);
CollectiveDeviceList ExpandPartitionGroupListAcrossReplicas(
IotaReplicaGroupList partition_group_list, int num_replicas,
int num_partitions);
namespace detail {
template <typename T, typename = void>
struct IsSpmdPartitioningVisitorPointerType : std::false_type {};
template <typename T>
struct IsSpmdPartitioningVisitorPointerType<
T, std::enable_if_t<std::is_same_v<std::remove_reference_t<T>,
SpmdPartitioningVisitor*>>>
: std::true_type {};
template <typename T>
constexpr bool IsSpmdPartitioningVisitorPointerType_v =
IsSpmdPartitioningVisitorPointerType<T>::value;
template <typename T>
using IsSpmdPartitioningVisitorPointer =
std::enable_if_t<IsSpmdPartitioningVisitorPointerType_v<T>, int>;
template <typename T>
using IsNotSpmdPartitioningVisitorPointer =
std::enable_if_t<!IsSpmdPartitioningVisitorPointerType_v<T>, int>;
template <typename T, typename = void>
struct IsSpmdBuilderPointerType : std::false_type {};
template <typename T>
struct IsSpmdBuilderPointerType<
T,
std::enable_if_t<std::is_same_v<std::remove_reference_t<T>, SpmdBuilder*>>>
: std::true_type {};
template <typename T>
constexpr bool IsSpmdBuilderPointerType_v = IsSpmdBuilderPointerType<T>::value;
template <typename T>
using IsSpmdBuilderPointer =
std::enable_if_t<IsSpmdBuilderPointerType_v<T>, int>;
template <typename T>
using IsNotSpmdBuilderPointer =
std::enable_if_t<!IsSpmdBuilderPointerType_v<T>, int>;
template <typename T, typename = void>
struct IsHloModulePointerType : std::false_type {};
template <typename T>
struct IsHloModulePointerType<
T, std::enable_if_t<std::is_same_v<std::remove_reference_t<T>, HloModule*>>>
: std::true_type {};
template <typename T>
constexpr bool IsHloModulePointerType_v = IsHloModulePointerType<T>::value;
template <typename T>
using IsHloModulePointer = std::enable_if_t<IsHloModulePointerType_v<T>, int>;
template <typename T>
using IsNotHloModulePointer =
std::enable_if_t<!IsHloModulePointerType_v<T>, int>;
template <typename T, typename = void>
struct IsPartitionedHloType : std::false_type {};
template <typename T>
struct IsPartitionedHloType<
T, std::enable_if_t<std::is_same_v<std::decay_t<T>, PartitionedHlo>>>
: std::true_type {};
template <typename T>
constexpr bool IsPartitionedHloType_v = IsPartitionedHloType<T>::value;
template <typename T>
using IsPartitionedHlo = std::enable_if_t<IsPartitionedHloType_v<T>, int>;
template <typename T>
using IsNotPartitionedHlo = std::enable_if_t<!IsPartitionedHloType_v<T>, int>;
template <typename T, typename = void>
struct is_iterable : std::false_type {};
template <typename T>
struct is_iterable<T, std::void_t<decltype(std::declval<T>().begin()),
decltype(std::declval<T>().end())>>
: std::true_type {};
template <typename T>
constexpr bool is_iterable_v = is_iterable<T>::value;
template <typename T>
using iterable_element_type =
std::decay_t<decltype(*std::declval<T>().begin())>;
template <typename T, typename = void>
struct IsIterablePartitionedHloContainerType : std::false_type {};
template <typename T>
struct IsIterablePartitionedHloContainerType<
T,
std::enable_if_t<is_iterable_v<T> &&
std::is_same_v<iterable_element_type<T>, PartitionedHlo>>>
: std::true_type {};
template <typename T>
constexpr bool IsIterablePartitionedHloContainerType_v =
IsIterablePartitionedHloContainerType<T>::value;
template <typename T>
using IsIterablePartitionedHloContainer =
std::enable_if_t<IsIterablePartitionedHloContainerType_v<T>, int>;
template <typename T>
using IsNotIterablePartitionedHloContainer =
std::enable_if_t<!IsIterablePartitionedHloContainerType_v<T>, int>;
template <typename Arg, IsPartitionedHlo<Arg> = 0>
std::decay_t<Arg> FakePartitionedHlo(Arg&& phlo, HloModule* module,
int* parameter_count,
SpmdPartitioningVisitor* fake_visitor) {
HloInstruction* param =
fake_visitor->builder()
->AddParameter(HloInstruction::CreateParameter( | #include "xla/service/spmd/spmd_partitioner_util.h"
#include <cstdint>
#include <optional>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/ir/tile_assignment.h"
namespace xla {
namespace spmd {
namespace {
TEST(SPMDPartitionerUtilTest, PartialReplicateReshardCompatibleSharding1) {
HloSharding partial_sharding =
HloSharding::PartialTile(TileAssignment({1, 2, 2}));
const std::vector<HloSharding> target_shardings = {
HloSharding::IotaTile({2, 2}),
HloSharding::IotaTile({2, 2}, {2, 2}, {1, 0})};
for (const auto& target_sharding : target_shardings) {
auto result = PartialReplicateReshardCompatibleSharding(partial_sharding,
target_sharding);
EXPECT_EQ(result, target_shardings[1]);
}
partial_sharding =
HloSharding::PartialTile(TileAssignment({1, 2, 2}, {2, 2}, {1, 0}));
for (const auto& target_sharding : target_shardings) {
auto result = PartialReplicateReshardCompatibleSharding(partial_sharding,
target_sharding);
EXPECT_EQ(result, target_shardings[0]);
}
}
TEST(SPMDPartitionerUtilTest, PartialReplicateReshardCompatibleSharding2) {
HloSharding partial_sharding =
HloSharding::PartialTile(TileAssignment({2, 2, 8}));
const std::vector<HloSharding> target_shardings = {
HloSharding::PartialTile(
TileAssignment({4, 4, 2}, {2, 2, 2, 2, 2}, {0, 2, 1, 3, 4})),
HloSharding::PartialTile(
TileAssignment({4, 4, 2}, {2, 2, 2, 2, 2}, {0, 2, 1, 4, 3})),
HloSharding::PartialTile(
TileAssignment({4, 4, 2}, {2, 2, 2, 2, 2}, {0, 3, 1, 2, 4})),
HloSharding::PartialTile(
TileAssignment({4, 4, 2}, {2, 2, 2, 2, 2}, {0, 3, 1, 4, 2})),
HloSharding::PartialTile(
TileAssignment({4, 4, 2}, {2, 2, 2, 2, 2}, {0, 4, 1, 2, 3})),
HloSharding::PartialTile(
TileAssignment({4, 4, 2}, {2, 2, 2, 2, 2}, {0, 4, 1, 3, 2}))};
for (const auto& target_sharding : target_shardings) {
auto result = PartialReplicateReshardCompatibleSharding(partial_sharding,
target_sharding);
EXPECT_EQ(result, target_sharding);
}
}
TEST(SPMDPartitionerUtilTest, GetPartitionGroupsForReplication) {
HloSharding sharding = HloSharding::IotaTile({2, 2, 2});
std::vector<std::vector<int64_t>> actual_partition_groups =
GetPartitionGroupsForReplication(sharding, {1});
std::vector<std::vector<int64_t>> expected_partition_groups = {
{0, 2}, {1, 3}, {4, 6}, {5, 7}};
EXPECT_THAT(actual_partition_groups,
testing::ContainerEq(expected_partition_groups));
}
TEST(SPMDPartitionerUtilTest, GetPartitionGroupsForReplication2) {
HloSharding sharding = HloSharding::IotaTile({2, 2, 2}, {2, 2, 2}, {0, 2, 1});
std::vector<std::vector<int64_t>> actual_partition_groups =
GetPartitionGroupsForReplication(sharding, {0, 2});
std::vector<std::vector<int64_t>> expected_partition_groups = {{0, 2, 4, 6},
{1, 3, 5, 7}};
EXPECT_THAT(actual_partition_groups,
testing::ContainerEq(expected_partition_groups));
}
TEST(SPMDPartitionerUtilTest, GetIotaPartitionGroupsForReplication) {
HloSharding sharding = HloSharding::IotaTile({2, 2, 2});
std::optional<IotaReplicaGroupList> actual_partition_group_list =
GetIotaPartitionGroupsForReplication(sharding, {1}, 8);
EXPECT_TRUE(actual_partition_group_list.has_value());
EXPECT_EQ(actual_partition_group_list->num_replica_groups(), 4);
EXPECT_EQ(actual_partition_group_list->num_devices_per_group(), 2);
EXPECT_THAT(actual_partition_group_list->reshape_dims(),
testing::ElementsAre(2, 2, 2));
EXPECT_THAT(actual_partition_group_list->transpose_perm(),
testing::ElementsAre(0, 2, 1));
}
TEST(SPMDPartitionerUtilTest, GetIotaPartitionGroupsForReplication2) {
HloSharding sharding = HloSharding::IotaTile({2, 2, 2}, {2, 2, 2}, {0, 2, 1});
std::optional<IotaReplicaGroupList> actual_partition_group_list =
GetIotaPartitionGroupsForReplication(sharding, {0, 2}, 8);
EXPECT_TRUE(actual_partition_group_list.has_value());
EXPECT_EQ(actual_partition_group_list->num_replica_groups(), 2);
EXPECT_EQ(actual_partition_group_list->num_devices_per_group(), 4);
EXPECT_THAT(actual_partition_group_list->reshape_dims(),
testing::ElementsAre(4, 2));
EXPECT_THAT(actual_partition_group_list->transpose_perm(),
testing::ElementsAre(1, 0));
}
TEST(SPMDPartitionerUtilTest,
GetIotaPartitionGroupsForReplicationSkipWhenNotUsingAllPartitions) {
HloSharding simple_sharding = HloSharding::IotaTile({2, 2, 2});
std::optional<IotaReplicaGroupList> actual_partition_group_list =
GetIotaPartitionGroupsForReplication(simple_sharding, {1}, 16);
EXPECT_FALSE(actual_partition_group_list.has_value());
}
TEST(SPMDPartitionerUtilTest, ExpandPartitionGroupListAcrossReplicas) {
IotaReplicaGroupList partition_group_list =
IotaReplicaGroupList(10, 5, {2, 5, 5}, {0, 2, 1});
IotaReplicaGroupList expanded_partition_group_list =
ExpandPartitionGroupListAcrossReplicas(partition_group_list, 2, 50)
.iota_replica_group_list()
.value();
EXPECT_EQ(expanded_partition_group_list.num_replica_groups(), 20);
EXPECT_EQ(expanded_partition_group_list.num_devices_per_group(), 5);
EXPECT_THAT(expanded_partition_group_list.reshape_dims(),
testing::ElementsAre(4, 5, 5));
EXPECT_THAT(expanded_partition_group_list.transpose_perm(),
testing::ElementsAre(0, 2, 1));
}
TEST(SPMDPartitionerUtilDeathTest, ExpandPartitionGroupListAcrossReplicas) {
IotaReplicaGroupList partition_group_list =
IotaReplicaGroupList(10, 5, {2, 5, 5}, {0, 2, 1});
ASSERT_DEATH(
{
auto expanded_partition_group_list =
ExpandPartitionGroupListAcrossReplicas(partition_group_list, 2, 60);
},
"Check failed: \\(partition_group_count \\* partition_group_size\\) == "
"num_partitions \\(50 vs\\. 60\\)");
}
}
}
} |
2,001 | cpp | tensorflow/tensorflow | spmd_prepare | third_party/xla/xla/service/spmd/spmd_prepare.cc | third_party/xla/xla/service/spmd/spmd_prepare_test.cc | #ifndef XLA_SERVICE_SPMD_SPMD_PREPARE_H_
#define XLA_SERVICE_SPMD_SPMD_PREPARE_H_
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace spmd {
class SpmdPrepare : public HloModulePass {
public:
explicit SpmdPrepare() = default;
~SpmdPrepare() override = default;
absl::string_view name() const override { return "spmd-prepare"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
}
#endif
#include "xla/service/spmd/spmd_prepare.h"
#include <memory>
#include <optional>
#include <vector>
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/utils/hlo_sharding_util.h"
#include "xla/service/pattern_matcher.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace spmd {
namespace {
absl::StatusOr<bool> ProcessScatter(HloInstruction* hlo,
const CallGraph& call_graph) {
if (hlo->opcode() != HloOpcode::kScatter) {
return false;
}
HloScatterInstruction* scatter = Cast<HloScatterInstruction>(hlo);
HloComputation* computation = hlo->parent();
if (scatter->scatter_operand_count() > 1) {
return false;
}
ScatterDimensionNumbers scatt_dim = scatter->scatter_dimension_numbers();
HloInstruction* operand = scatter->scatter_operands()[0];
HloInstruction* indices = scatter->scatter_indices();
HloInstruction* updates = scatter->scatter_updates()[0];
if (operand->opcode() != HloOpcode::kAdd ||
indices->opcode() != HloOpcode::kConcatenate ||
indices->operand_count() != 2 ||
updates->opcode() != HloOpcode::kConcatenate ||
updates->operand_count() != 2 ||
!Match(scatter->to_apply()->root_instruction(),
match::AddAnyOrder(match::Parameter(0), match::Parameter(1)))) {
return false;
}
const auto& dnums = scatter->scatter_dimension_numbers();
auto get_parallel_dims_for_scatter = [&dnums, &call_graph](
const HloInstruction* operand,
const HloInstruction* indices,
const HloInstruction* updates) {
std::vector<int64_t> slice_sizes = hlo_sharding_util::GetScatterSliceSize(
operand->shape(), updates->shape(), dnums);
int64_t index_vector_dim = dnums.index_vector_dim();
const auto& index_map = dnums.scatter_dims_to_operand_dims();
return hlo_sharding_util::GetGatherScatterBatchParallelDims(
indices, slice_sizes, index_vector_dim, index_map, call_graph);
};
if (get_parallel_dims_for_scatter(operand, indices, updates).has_value()) {
return false;
}
HloInstruction* lhs_indices = indices->mutable_operand(0);
HloInstruction* rhs_indices = indices->mutable_operand(1);
HloInstruction* lhs_updates = updates->mutable_operand(0);
HloInstruction* rhs_updates = updates->mutable_operand(1);
std::optional<hlo_sharding_util::GatherScatterParallelDims> lhs_parallel_dims;
std::optional<hlo_sharding_util::GatherScatterParallelDims> rhs_parallel_dims;
lhs_parallel_dims =
get_parallel_dims_for_scatter(operand, lhs_indices, lhs_updates);
if (!lhs_parallel_dims.has_value()) {
return false;
}
rhs_parallel_dims =
get_parallel_dims_for_scatter(operand, rhs_indices, rhs_updates);
if (!rhs_parallel_dims.has_value()) {
return false;
}
if (lhs_parallel_dims->operand_parallel_dims !=
rhs_parallel_dims->operand_parallel_dims ||
lhs_parallel_dims->indices_parallel_dims !=
rhs_parallel_dims->indices_parallel_dims ||
lhs_parallel_dims->index_parallel_in_dim !=
rhs_parallel_dims->index_parallel_in_dim) {
return false;
}
if (lhs_parallel_dims->operand_parallel_dims.size() !=
lhs_parallel_dims->indices_parallel_dims.size()) {
return false;
}
HloInstruction* lhs_operand = operand->mutable_operand(0);
HloInstruction* rhs_operand = operand->mutable_operand(1);
bool any_sharded_parallel_dim = false;
if (!lhs_operand->has_sharding() || !rhs_operand->has_sharding() ||
!lhs_indices->has_sharding() || !rhs_indices->has_sharding()) {
return false;
}
for (int i = 0; i < lhs_parallel_dims->operand_parallel_dims.size(); ++i) {
if (lhs_operand->sharding().IsTiled() &&
lhs_operand->sharding().tile_assignment().dim(
lhs_parallel_dims->operand_parallel_dims[i]) != 1 &&
lhs_indices->sharding().tile_assignment().dim(
lhs_parallel_dims->indices_parallel_dims[i]) != 1) {
any_sharded_parallel_dim = true;
break;
}
}
if (!any_sharded_parallel_dim) {
return false;
}
HloInstruction* scatter0 =
computation->AddInstruction(HloInstruction::CreateScatter(
scatter->shape(), operand, lhs_indices, lhs_updates,
scatter->to_apply(), dnums, false, false));
scatter0->set_metadata(scatter->metadata());
scatter0->set_sharding(scatter->sharding());
HloInstruction* scatter1 =
computation->AddInstruction(HloInstruction::CreateScatter(
scatter->shape(), scatter0, rhs_indices, rhs_updates,
scatter->to_apply(), dnums, false, false));
scatter1->set_metadata(scatter->metadata());
scatter1->set_sharding(scatter->sharding());
TF_RETURN_IF_ERROR(scatter->ReplaceAllUsesWith(scatter1));
return true;
}
absl::StatusOr<bool> RunOnComputation(HloComputation* computation,
const CallGraph& call_graph) {
bool changed = false;
for (HloInstruction* hlo : computation->MakeInstructionPostOrder()) {
if (!hlo->has_sharding()) {
continue;
}
TF_ASSIGN_OR_RETURN(bool scatter_changed, ProcessScatter(hlo, call_graph));
if (scatter_changed) {
changed = true;
continue;
}
}
return changed;
}
}
absl::StatusOr<bool> SpmdPrepare::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
for (auto comp : module->computations(execution_threads)) {
TF_ASSIGN_OR_RETURN(bool comp_changed, RunOnComputation(comp, *call_graph));
changed |= comp_changed;
}
return changed;
}
}
} | #include "xla/service/spmd/spmd_prepare.h"
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/hlo_verifier.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace spmd {
namespace {
namespace op = xla::testing::opcode_matchers;
class SpmdPrepareTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> RunPass(
absl::string_view hlo_module, int64_t distance_threshold = 100) {
TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(
hlo_module, GetModuleConfigForTest()));
HloPassPipeline pipeline("spmd-prepare");
pipeline.AddPass<SpmdPrepare>();
TF_RETURN_IF_ERROR(pipeline.Run(module.get()).status());
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
};
TEST_F(SpmdPrepareTest, ScatterParallelIndexSplit) {
absl::string_view hlo_string = R"(
HloModule module
region_157.5067 {
Arg_0.5068 = f32[] parameter(0)
Arg_1.5069 = f32[] parameter(1)
ROOT add.5070 = f32[] add(Arg_0.5068, Arg_1.5069)
}
ENTRY entry {
p0 = f32[16,1000,2000]{2,1,0} parameter(0), sharding={devices=[4,2,1]<=[8]}
p1 = f32[16,1000,2000]{2,1,0} parameter(1), sharding={devices=[4,2,1]<=[8]}
p2 = s32[16,1000,64,1]{3,2,1,0} parameter(2), sharding={devices=[4,2,1,1]<=[8]}
p3 = f32[16,1000,64]{2,1,0} parameter(3), sharding={devices=[4,2,1]<=[8]}
p4 = f32[16,1000,64]{2,1,0} parameter(4), sharding={devices=[4,2,1]<=[8]}
iota.0 = s32[16,1000,64,1]{3,2,1,0} iota(), iota_dimension=0, sharding={devices=[4,2,1,1]<=[8]}
iota.1 = s32[16,1000,64,1]{3,2,1,0} iota(), iota_dimension=1, sharding={devices=[4,2,1,1]<=[8]}
iota.2 = s32[16,1000,64,1]{3,2,1,0} iota(), iota_dimension=0, sharding={devices=[4,2,1,1]<=[8]}
iota.3 = s32[16,1000,64,1]{3,2,1,0} iota(), iota_dimension=1, sharding={devices=[4,2,1,1]<=[8]}
concatenate.0 = s32[16,1000,64,3]{3,2,1,0} concatenate(iota.0, iota.1, p2), dimensions={3}, sharding={devices=[4,2,1,1]<=[8]}
concatenate.1 = s32[16,1000,64,3]{3,2,1,0} concatenate(iota.2, iota.3, p2), dimensions={3}, sharding={devices=[4,2,1,1]<=[8]}
concatenate.130 = s32[32,1000,64,3]{3,2,1,0} concatenate(concatenate.0, concatenate.1), dimensions={0}, sharding={devices=[4,2,1,1]<=[8]}
concatenate.131 = f32[32,1000,64]{2,1,0} concatenate(p3, p4), dimensions={0}, sharding={devices=[4,2,1]<=[8]}
add.190 = f32[16,1000,2000]{2,1,0} add(p0, p1), sharding={devices=[4,2,1]<=[8]}
ROOT scatter.2 = f32[16,1000,2000]{2,1,0} scatter(add.190, concatenate.130, concatenate.131), update_window_dims={}, inserted_window_dims={0,1,2}, scatter_dims_to_operand_dims={0,1,2}, index_vector_dim=3, to_apply=region_157.5067, sharding={devices=[4,2,1]<=[8]}
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
HloInstruction* root = module->entry_computation()->root_instruction();
XLA_VLOG_LINES(1, module->ToString());
EXPECT_THAT(
root,
op::Scatter(
op::Scatter(op::Add(),
op::Concatenate(op::Iota(), op::Iota(), op::Parameter()),
op::Parameter()),
op::Concatenate(op::Iota(), op::Iota(), op::Parameter()),
op::Parameter()));
}
}
}
} |
2,002 | cpp | tensorflow/tensorflow | stateful_rng_spmd_partitioner | third_party/xla/xla/service/spmd/stateful_rng_spmd_partitioner.cc | third_party/xla/xla/service/spmd/stateful_rng_spmd_partitioner_test.cc | #ifndef XLA_SERVICE_SPMD_STATEFUL_RNG_SPMD_PARTITIONER_H_
#define XLA_SERVICE_SPMD_STATEFUL_RNG_SPMD_PARTITIONER_H_
#include <utility>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/service/spmd/spmd_partitioner.h"
namespace xla {
namespace spmd {
class StatefulRngSpmdPartitioningVisitor
: public spmd::SpmdPartitioningVisitor {
public:
StatefulRngSpmdPartitioningVisitor(
HloComputation* computation, int64_t num_partitions, int64_t num_replicas,
const spmd::SPMDCollectiveOpsCreator& collective_ops_creator,
int64_t* next_channel_id, spmd::SpmdLogger* logger,
spmd::SpmdPartitionerOptions options, spmd::SpmdPartitioner* partitioner,
const CallGraph& call_graph)
: spmd::SpmdPartitioningVisitor(computation, num_partitions, num_replicas,
collective_ops_creator, next_channel_id,
logger, std::move(options), partitioner,
call_graph) {}
absl::Status HandleRngGetAndUpdateState(HloInstruction* hlo) override;
};
class StatefulRngSpmdPartitioner : public spmd::SpmdPartitioner {
public:
StatefulRngSpmdPartitioner(
int64_t num_partitions, int64_t num_replicas,
int64_t threshold_for_windowed_einsum_mib = 100000,
bool windowed_einsum_use_multiple_streams = false,
bool skip_checking_windowed_einsum_users = false,
bool disable_ag_rewrite_for_multiple_consumers = false)
: spmd::SpmdPartitioner(num_partitions, num_replicas,
GetSpmdPartitionerOptions(
threshold_for_windowed_einsum_mib,
windowed_einsum_use_multiple_streams,
skip_checking_windowed_einsum_users,
disable_ag_rewrite_for_multiple_consumers)) {}
protected:
std::unique_ptr<spmd::SpmdPartitioningVisitor> CreateVisitor(
HloComputation* computation, int64_t num_partitions, int64_t num_replicas,
const spmd::SPMDCollectiveOpsCreator& collective_ops_creator,
int64_t* next_channel_id, spmd::SpmdLogger* logger,
spmd::SpmdPartitionerOptions options,
const CallGraph& call_graph) override;
absl::Status PreprocessSharding(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
absl::Status HandleRotateRightWhilePreprocessing(
HloComputation* computation) override;
bool CanSideEffectingHaveReplicatedSharding(
const HloInstruction* hlo) override;
private:
static spmd::SpmdPartitionerOptions GetSpmdPartitionerOptions(
int64_t threshold_for_windowed_einsum_mib,
bool windowed_einsum_use_multiple_streams = false,
bool skip_checking_windowed_einsum_users = false,
bool disable_ag_rewrite_for_multiple_consumers = false) {
spmd::SpmdPartitionerOptions options;
options.allow_module_signature_change = true;
options.threshold_for_windowed_einsum_mib =
threshold_for_windowed_einsum_mib;
options.unroll_windowed_einsum = windowed_einsum_use_multiple_streams;
options.skip_checking_windowed_einsum_users =
skip_checking_windowed_einsum_users;
options.disable_ag_rewrite_for_multiple_consumers =
disable_ag_rewrite_for_multiple_consumers;
return options;
}
};
}
}
#endif
#include "xla/service/spmd/stateful_rng_spmd_partitioner.h"
#include <memory>
#include <utility>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
namespace xla {
namespace spmd {
absl::Status StatefulRngSpmdPartitioningVisitor::HandleRngGetAndUpdateState(
HloInstruction* hlo) {
if (hlo->sharding().HasUniqueDevice()) {
return HandleSingleDevice(hlo);
}
TF_RET_CHECK(hlo->sharding().IsReplicated());
auto clone =
builder()->AddInstruction(hlo->CloneWithNewOperands(hlo->shape(), {}));
clone->set_sharding(hlo->sharding());
SetPartitionedHlo(
hlo, spmd::PartitionedHlo(clone, hlo->shape(), MakePartitioningState())
.Reshard(hlo->sharding()));
return absl::OkStatus();
}
std::unique_ptr<spmd::SpmdPartitioningVisitor>
StatefulRngSpmdPartitioner::CreateVisitor(
HloComputation* computation, int64_t num_partitions, int64_t num_replicas,
const spmd::SPMDCollectiveOpsCreator& collective_ops_creator,
int64_t* next_channel_id, spmd::SpmdLogger* logger,
spmd::SpmdPartitionerOptions options, const CallGraph& call_graph) {
return std::make_unique<StatefulRngSpmdPartitioningVisitor>(
computation, num_partitions, num_replicas, collective_ops_creator,
next_channel_id, logger, std::move(options), this, call_graph);
}
absl::Status StatefulRngSpmdPartitioner::PreprocessSharding(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
for (HloComputation* computation : module->computations(execution_threads)) {
for (HloInstruction* hlo : computation->instructions()) {
if (hlo->opcode() == HloOpcode::kRngGetAndUpdateState &&
!hlo->has_sharding()) {
hlo->set_sharding(HloSharding::Replicate());
}
}
}
return spmd::SpmdPartitioner::PreprocessSharding(module, execution_threads);
}
bool StatefulRngSpmdPartitioner::CanSideEffectingHaveReplicatedSharding(
const HloInstruction* hlo) {
if (hlo->opcode() == HloOpcode::kRngGetAndUpdateState) return true;
return spmd::SpmdPartitioner::CanSideEffectingHaveReplicatedSharding(hlo);
}
absl::Status StatefulRngSpmdPartitioner::HandleRotateRightWhilePreprocessing(
HloComputation* computation) {
if (!computation->IsWhileBodyComputation()) {
return absl::OkStatus();
}
HloInstruction* while_loop = computation->WhileCallInstruction();
TF_RET_CHECK(while_loop);
if (computation->parent()
->config()
.debug_options()
.xla_gpu_unsafe_pipelined_loop_annotator()) {
xla::FrontendAttributes attributes;
(*attributes.mutable_map())["is_pipelined_while_loop"] = "true";
while_loop->add_frontend_attributes(attributes);
}
return absl::OkStatus();
}
}
} | #include "xla/service/spmd/stateful_rng_spmd_partitioner.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/hlo_verifier.h"
#include "xla/service/rng_expander.h"
#include "xla/service/sharding_propagation.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace spmd {
namespace {
namespace op = xla::testing::opcode_matchers;
int64_t CountInstructions(const HloComputation &computation, HloOpcode opcode) {
int64_t count = 0;
for (const auto &instruction : computation.instructions()) {
if (instruction->opcode() == opcode) {
count++;
}
}
return count;
}
class StatefulRngSpmdPartitionerTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> PartitionComputation(
absl::string_view hlo_module, int64_t num_partitions,
DebugOptions debug_options,
std::function<void(HloPassPipeline &pipeline)> add_passes = nullptr,
bool skip_checking_windowed_einsum_users = false,
bool disable_ag_rewrite_for_multiple_consumers = false) {
HloModuleConfig config = GetModuleConfigForTest(1, num_partitions);
config.set_use_spmd_partitioning(true);
config.set_debug_options(debug_options);
TF_ASSIGN_OR_RETURN(auto module,
ParseAndReturnVerifiedModule(hlo_module, config));
HloPassPipeline pass("partitioning");
pass.AddPass<HloVerifier>(false,
false);
if (add_passes) {
add_passes(pass);
}
pass.AddPass<ShardingPropagation>(true);
pass.AddPass<StatefulRngSpmdPartitioner>(
num_partitions,
1,
debug_options.xla_gpu_threshold_for_windowed_einsum_mib(),
debug_options.xla_gpu_multi_streamed_windowed_einsum(),
skip_checking_windowed_einsum_users,
disable_ag_rewrite_for_multiple_consumers);
pass.AddPass<HloVerifier>(false,
false);
TF_RETURN_IF_ERROR(pass.Run(module.get()).status());
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
void VerifyNoAllReduce(HloModule *module) {
for (HloComputation *computation : module->computations()) {
for (HloInstruction *hlo : computation->instructions()) {
EXPECT_NE(hlo->opcode(), HloOpcode::kAllReduce);
}
}
}
DebugOptions GetDefaultDebugOptions() {
DebugOptions debug_options = GetDebugOptionsForTest();
debug_options.set_xla_gpu_threshold_for_windowed_einsum_mib(1000000);
debug_options.set_xla_gpu_multi_streamed_windowed_einsum(false);
debug_options.set_xla_gpu_unsafe_pipelined_loop_annotator(false);
return debug_options;
}
};
TEST_F(StatefulRngSpmdPartitionerTest, RngReplicatedConsumer) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%p0 = f32[50,100] parameter(0), sharding={replicated}
%mu = f32[] constant(0)
%sigma = f32[] constant(1)
%rng = f32[50,100] rng(f32[] %mu, f32[] %sigma), distribution=rng_uniform
ROOT %add = f32[50,100] add(%rng, %p0), sharding={replicated}
}
)";
auto add_passes = [](HloPassPipeline &pipeline) {
pipeline.AddPass<RngExpander>();
};
DebugOptions debug_options = GetDebugOptionsForTest();
TF_ASSERT_OK_AND_ASSIGN(
auto module, PartitionComputation(hlo_string, 2,
GetDefaultDebugOptions(), add_passes));
XLA_VLOG_LINES(1, module->ToString());
VerifyNoAllReduce(module.get());
}
TEST_F(StatefulRngSpmdPartitionerTest, RngPartitionedConsumer) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%p0 = f32[50,100] parameter(0), sharding={replicated}
%mu = f32[] constant(0)
%sigma = f32[] constant(1)
%rng = f32[50,100] rng(f32[] %mu, f32[] %sigma), distribution=rng_uniform
ROOT %add = f32[50,100] add(%rng, %p0), sharding={devices=[2,1]0,1}
}
)";
auto add_passes = [](HloPassPipeline &pipeline) {
pipeline.AddPass<RngExpander>();
};
TF_ASSERT_OK_AND_ASSIGN(
auto module, PartitionComputation(hlo_string, 2,
GetDefaultDebugOptions(), add_passes));
XLA_VLOG_LINES(1, module->ToString());
VerifyNoAllReduce(module.get());
}
TEST_F(StatefulRngSpmdPartitionerTest,
EinsumDisableRewriteForAgWithMultipleConsumers) {
absl::string_view hlo_string = R"(
HloModule test, entry_computation_layout={(bf16[2,2048,24576]{2,1,0}, bf16[24576,98304]{1,0}, bf16[24576,98304]{1,0})->bf16[2,2048,98304]{2,1,0}}, num_partitions=4
ENTRY main {
Arg_0.1 = bf16[2,2048,24576]{2,1,0} parameter(0), sharding={devices=[1,4,1]<=[4]}
Arg_1.2 = bf16[24576,98304]{1,0} parameter(1), sharding={devices=[1,4]<=[4]}
dot.5 = bf16[2,2048,98304]{2,1,0} dot(Arg_0.1, Arg_1.2), lhs_contracting_dims={2}, rhs_contracting_dims={0}, sharding={devices=[1,1,4]<=[4]}
Arg_2.3 = bf16[24576,98304]{1,0} parameter(2), sharding={devices=[1,4]<=[4]}
dot.6 = bf16[2,2048,98304]{2,1,0} dot(Arg_0.1, Arg_2.3), lhs_contracting_dims={2}, rhs_contracting_dims={0}, sharding={devices=[1,1,4]<=[4]}
ROOT add.8 = bf16[2,2048,98304]{2,1,0} add(dot.5, dot.6), sharding={devices=[1,1,4]<=[4]}
}
)";
DebugOptions debug_options = GetDefaultDebugOptions();
debug_options.set_xla_gpu_threshold_for_windowed_einsum_mib(0);
debug_options.set_xla_gpu_multi_streamed_windowed_einsum(true);
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 4, debug_options,
nullptr,
true,
true));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_EQ(CountInstructions(*module->entry_computation(), HloOpcode::kWhile),
1);
EXPECT_EQ(CountInstructions(*module->entry_computation(), HloOpcode::kDot),
1);
EXPECT_EQ(
CountInstructions(*module->entry_computation(), HloOpcode::kAllGather),
1);
}
TEST_F(StatefulRngSpmdPartitionerTest, VerifyThresholdSetCorrectly) {
auto debug_options = HloTestBase::GetDebugOptionsForTest();
int64_t threshold = 400;
debug_options.set_xla_gpu_threshold_for_windowed_einsum_mib(threshold);
debug_options.set_xla_gpu_multi_streamed_windowed_einsum(true);
StatefulRngSpmdPartitioner rng_spmd_partitioner(
2, 1,
debug_options.xla_gpu_threshold_for_windowed_einsum_mib(),
debug_options.xla_gpu_multi_streamed_windowed_einsum());
EXPECT_EQ(rng_spmd_partitioner.options().threshold_for_windowed_einsum_mib,
threshold);
EXPECT_EQ(rng_spmd_partitioner.options().unroll_windowed_einsum, true);
}
TEST_F(StatefulRngSpmdPartitionerTest,
MergedSliceThenConcatRotateRightWhileOp) {
absl::string_view hlo_string = R"(
HloModule test
%Body {
%param = (f32[12], s32[]) parameter(0)
%i = s32[] get-tuple-element(%param), index=1
%one = s32[] constant(1)
%i_plus_one = s32[] add(s32[] %i, s32[] %one)
%param0 = f32[12] get-tuple-element(%param), index=0, sharding={devices=[4]<=[4]}
%slice0 = f32[2] slice(%param0), slice={[10:12]}, sharding={devices=[4]<=[4]}
%slice1 = f32[10] slice(%param0), slice={[0:10]}, sharding={devices=[4]<=[4]}
%concat = f32[12] concatenate(%slice0, %slice1), dimensions={0}, sharding={devices=[4]<=[4]}
ROOT %tuple = (f32[12], s32[]) tuple(%concat, %i_plus_one)
}
%Cond {
%param.1 = (f32[12], s32[]) parameter(0)
%i.1 = s32[] get-tuple-element(%param.1), index=1
%trip_count = s32[] constant(11)
ROOT %done = pred[] compare(%i.1, %trip_count), direction=LT
}
ENTRY %test {
%i_start = f32[12] parameter(0)
%p_start = s32[] constant(0)
%initial_tuple = (f32[12], s32[]) tuple(%i_start, %p_start)
ROOT %while = (f32[12], s32[]) while(%initial_tuple), condition=%Cond, body=%Body
}
)";
DebugOptions debug_options = GetDefaultDebugOptions();
debug_options.set_xla_gpu_unsafe_pipelined_loop_annotator(true);
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 4, debug_options));
const HloInstruction *whileOp =
module->entry_computation()->root_instruction();
const HloInstruction *root =
whileOp->while_body()->GetInstructionWithName("concatenate");
auto rotate =
op::Concatenate(op::CollectivePermute(op::Slice()), op::Slice());
EXPECT_THAT(root, AllOf(rotate, op::Shape("f32[3]")));
EXPECT_TRUE(
whileOp->frontend_attributes().map().contains("is_pipelined_while_loop"));
debug_options.set_xla_gpu_unsafe_pipelined_loop_annotator(false);
TF_ASSERT_OK_AND_ASSIGN(
module,
PartitionComputation(hlo_string, 4, debug_options));
whileOp = module->entry_computation()->root_instruction();
root = whileOp->while_body()->GetInstructionWithName("concatenate");
rotate = op::Concatenate(op::CollectivePermute(op::Slice()), op::Slice());
EXPECT_THAT(root, AllOf(rotate, op::Shape("f32[3]")));
}
}
}
} |
2,003 | cpp | tensorflow/tensorflow | spmd_partitioner | third_party/xla/xla/service/spmd/spmd_partitioner.cc | third_party/xla/xla/service/spmd/spmd_partitioner_test.cc | #ifndef XLA_SERVICE_SPMD_SPMD_PARTITIONER_H_
#define XLA_SERVICE_SPMD_SPMD_PARTITIONER_H_
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/node_hash_map.h"
#include "absl/functional/function_ref.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/service/call_graph.h"
#include "xla/service/custom_call_sharding_helper.h"
#include "xla/service/dot_as_convolution_util.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace spmd {
struct SpmdPartitionerOptions {
bool conv_halo_exchange_always_on_lhs = true;
int64_t report_instruction_count = 5;
int64_t threshold_for_windowed_einsum_mib = 256;
bool unroll_windowed_einsum = false;
bool bidirectional_windowed_einsum = false;
bool allow_module_signature_change = false;
bool cache_all_gather = true;
bool choose_faster_windowed_einsum_over_mem = false;
bool bidirectional_decomposed_all_gather = false;
bool skip_checking_windowed_einsum_users = false;
bool enable_windowed_einsum_for_all_gather = true;
bool enable_windowed_einsum_for_reduce_scatter = true;
bool disable_ag_rewrite_for_multiple_consumers = false;
};
class SpmdBuilder : public HloComputation::Builder {
public:
SpmdBuilder(const std::string& name, HloInstruction* hlo)
: HloComputation::Builder(name) {
visiting_hlo_ = hlo;
}
HloInstruction* AddInstruction(
std::unique_ptr<HloInstruction> instruction) override;
const std::vector<HloInstruction*>& derived_instructions(
HloInstruction* hlo) {
return instructions_.at(hlo);
}
void set_visiting_hlo(HloInstruction* hlo) {
visiting_hlo_ = hlo;
instructions_[hlo];
}
HloInstruction* visiting_hlo() const { return visiting_hlo_; }
std::optional<const absl::flat_hash_set<int64_t>*> BroadcastDimsForCreatedHlo(
const HloInstruction* hlo) {
auto it = broadcast_dims_.find(hlo);
if (it == broadcast_dims_.end()) {
return std::nullopt;
}
return &it->second;
}
private:
HloInstruction* visiting_hlo_;
HloInstructionMap<std::vector<HloInstruction*>> instructions_;
absl::flat_hash_map<const HloInstruction*, absl::flat_hash_set<int64_t>>
broadcast_dims_;
};
struct SPMDCollectiveOpsCreator {
std::function<HloInstruction*(SpmdBuilder*)> create_partition_id;
std::function<HloInstruction*(
SpmdBuilder*, HloInstruction* operand, HloComputation* reduction,
const std::vector<std::vector<int64_t>>& partition_subgroups,
int64_t channel_id)>
create_cross_partition_all_reduce;
std::function<HloInstruction*(
SpmdBuilder*, HloInstruction* operand, HloComputation* reduction,
const IotaReplicaGroupList& partition_group_list, int64_t channel_id)>
create_cross_partition_all_reduce_with_iota_device_list;
std::function<HloInstruction*(
SpmdBuilder*, HloInstruction* operand,
std::vector<std::pair<int64_t, int64_t>>& src_dst_pairs,
int64_t next_channel_id)>
create_cross_partition_collective_permute;
std::function<HloInstruction*(
SpmdBuilder*, absl::Span<HloInstruction* const> operands,
const std::vector<std::vector<int64_t>>& partition_subgroups,
int64_t channel_id, std::optional<int64_t> split_dimension)>
create_cross_partition_all_to_all;
std::function<HloInstruction*(
SpmdBuilder*, HloInstruction* operand, const Shape& ag_shape,
const std::vector<std::vector<int64_t>>& partition_subgroups,
int64_t channel_id, int64_t all_gather_dimension)>
create_cross_partition_all_gather;
std::function<HloInstruction*(
SpmdBuilder*, HloInstruction* operand, const Shape& ag_shape,
const IotaReplicaGroupList& partition_group_list, int64_t channel_id,
int64_t all_gather_dimension)>
create_cross_partition_all_gather_with_iota_device_list;
};
SPMDCollectiveOpsCreator GetDefaultCollectiveOpsCreator(int64_t num_partitions,
int64_t num_replicas);
class SpmdLogger {
public:
SpmdLogger(int64_t report_instruction_count, bool disabled)
: report_instruction_count_(report_instruction_count),
disabled_(disabled) {}
static std::string ReportBeforePartition(const HloModule& module,
int64_t report_instruction_count);
static std::string ReportAfterPartition(const HloModule& module,
int64_t report_instruction_count);
void RegisterLogEntry(HloInstruction* hlo,
const std::vector<HloInstruction*>& group);
std::string MakeReport();
private:
template <typename F>
static std::string ReportMemoryUsage(const HloModule& module, const F& filter,
int64_t report_instruction_count);
std::vector<std::pair<int64_t, std::string>> entries_;
int64_t report_instruction_count_;
const bool disabled_;
};
class SpmdPartitioningVisitor;
class SpmdPartitioner : public HloModulePass {
public:
SpmdPartitioner(int64_t num_partitions, int64_t num_replicas,
SpmdPartitionerOptions options);
SpmdPartitioner(int64_t num_partitions, int64_t num_replicas,
SpmdPartitionerOptions options,
SPMDCollectiveOpsCreator collective_ops_creator)
: num_partitions_(num_partitions),
num_replicas_(num_replicas),
options_(std::move(options)),
collective_ops_creator_(std::move(collective_ops_creator)) {}
absl::string_view name() const override { return "spmd-partitioning"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
absl::StatusOr<bool> PartitionComputation(HloComputation* computation,
const HloSharding& root_sharding,
int64_t* next_channel_id,
SpmdLogger* logger,
const CallGraph& call_graph);
virtual HloInstruction* AllGatherShards(
SpmdBuilder* b, HloInstruction* operand, const HloSharding& sharding,
int64_t* next_channel_id, absl::Span<const int64_t> selected_dims,
const SPMDCollectiveOpsCreator& collectives_creator);
virtual HloInstruction* AllReduceAlongShardingDims(
SpmdBuilder* b, HloInstruction* operand, const HloSharding& sharding,
int64_t* next_channel_id, absl::Span<const int64_t> selected_dims,
const SPMDCollectiveOpsCreator& collectives_creator,
HloComputation* reduction);
const SpmdPartitionerOptions& options() { return options_; }
virtual std::unique_ptr<SpmdPartitioningVisitor> CreateVisitor(
HloComputation* computation, int64_t num_partitions, int64_t num_replicas,
const SPMDCollectiveOpsCreator& collective_ops_creator,
int64_t* next_channel_id, SpmdLogger* logger,
SpmdPartitionerOptions options, const CallGraph& call_graph);
virtual int64_t MemoryCostInBytes(HloInstruction* hlo);
virtual int64_t CommunicationCostInBytes(HloInstruction* hlo);
const absl::flat_hash_set<absl::string_view>& execution_threads() const {
return execution_threads_;
}
protected:
std::pair<HloInstruction*, HloInstruction*> AllGatherShardsInternal(
SpmdBuilder* b, HloInstruction* operand, const HloSharding& sharding,
int64_t* next_channel_id, absl::Span<const int64_t> selected_dims,
const SPMDCollectiveOpsCreator& collectives_creator, bool per_dim_ag);
HloInstruction* AllReduceAlongShardingDimsInternal(
SpmdBuilder* b, HloInstruction* operand, const HloSharding& sharding,
int64_t* next_channel_id, absl::Span<const int64_t> selected_dims,
const SPMDCollectiveOpsCreator& collectives_creator,
HloComputation* reduction, bool per_dim_ar);
virtual absl::Status PreprocessSharding(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads);
virtual bool CanSideEffectingHaveReplicatedSharding(
const HloInstruction* hlo) {
if (hlo->opcode() == HloOpcode::kCustomCall) {
if (auto* partitioner =
GetCustomCallPartitioner(hlo->custom_call_target())) {
return partitioner->CanSideEffectingHaveReplicatedSharding();
}
}
return hlo->opcode() == HloOpcode::kInfeed ||
hlo->opcode() == HloOpcode::kOutfeed;
}
absl::Status PreprocessHlos(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads);
virtual absl::Status HandleRotateRightWhilePreprocessing(
HloComputation* computation) {
return absl::OkStatus();
};
void set_execution_threads(
const absl::flat_hash_set<absl::string_view>& execution_threads) {
execution_threads_ = execution_threads;
}
const int64_t num_partitions_;
const int64_t num_replicas_;
SpmdPartitionerOptions options_;
SPMDCollectiveOpsCreator collective_ops_creator_;
std::vector<std::vector<int64_t>> device_groups_;
absl::flat_hash_set<absl::string_view> execution_threads_;
};
class PartitionedHlo {
public:
struct WindowedInputShardReturnValue {
HloInstruction* sharded_input;
Window shard_window;
std::optional<std::vector<HloInstruction*>> dynamic_slice_index_on_output;
};
struct ReshardCache {
struct PerHloCache {
absl::flat_hash_map<HloSharding, PartitionedHlo> reshard_cache;
std::vector<
std::tuple<HloSharding, Window, WindowedInputShardReturnValue>>
window_reshard_cache;
};
absl::node_hash_map<HloInstruction*, PerHloCache> per_hlo_cache;
absl::flat_hash_map<std::string, std::unique_ptr<ReshardCache>>
groupd_caches;
};
struct PartitioningState {
SpmdBuilder* b;
HloModule* module;
int64_t num_replicas;
HloInstruction* partition_id;
SPMDCollectiveOpsCreator collective_ops_creator;
int64_t* next_channel_id;
ReshardCache* reshard_cache;
SpmdPartitioner* partitioner;
};
PartitionedHlo(HloInstruction* hlo, Shape base_shape, PartitioningState state)
: hlo_(hlo), base_shape_(base_shape), state_(std::move(state)) {
CHECK(hlo->has_sharding())
<< "PartitionedHlo is missing sharding:" << hlo->ToString();
}
PartitionedHlo CloneWithNewHlo(HloInstruction* hlo) const {
PartitionedHlo new_phlo = *this;
new_phlo.hlo_ = hlo;
if (!hlo->has_sharding() && hlo_->has_sharding()) {
hlo->copy_sharding(hlo_);
}
return new_phlo;
}
PartitionedHlo Reshard(const HloSharding& target,
std::optional<Literal> pad_value = std::nullopt) const;
PartitionedHlo PadWithValue(
HloInstruction* pad_value,
absl::Span<const int64_t> left_padded_dims = {},
absl::Span<const int64_t> skipped_dims = {}) const;
HloInstruction* PadWithValueHlo(
HloInstruction* pad_value,
absl::Span<const int64_t> left_padded_dims = {},
absl::Span<const int64_t> skipped_dims = {}) const;
PartitionedHlo PadWithZero(absl::Span<const int64_t> left_padded_dims = {},
absl::Span<const int64_t> skipped_dims = {}) const;
HloInstruction* hlo() const { return hlo_; }
const HloSharding& sharding() const { return hlo_->sharding(); }
const int64_t rank() const { return base_shape_.rank(); }
const Shape& base_shape() const { return base_shape_; }
int64_t NewChannel() const { return (*state_.next_channel_id)++; }
std::optional<WindowedInputShardReturnValue> ReshardAsWindowedInput(
const Window& window, const HloSharding& target,
HloInstruction* pad_value, bool mask_invalid_region = true,
bool force_mask_in_compact = false);
const PartitioningState& state() const { return state_; }
PartitionedHlo Replicate() const;
HloInstruction* ReplicatePartial(absl::Span<const int64_t> dims) const;
void set_state(PartitioningState state) { state_ = std::move(state); }
private:
PartitionedHlo ReshardNoCache(const HloSharding& target,
std::optional<Literal> pad_value = std::nullopt,
bool allow_full_replication = true) const;
PartitionedHlo Broadcast() const;
std::optional<PartitionedHlo> TryComplexReshardHandling(
const HloSharding& target) const;
PartitionedHlo ReshardWithAllToAll(
const HloSharding& target,
absl::Span<const std::pair<int64_t, int64_t>> source_target_dims) const;
PartitionedHlo ReshardWithCollectivePermute(const HloSharding& target) const;
std::optional<PartitionedHlo> ReshardToPartialReplicateWithAllGather(
const HloSharding& target) const;
std::optional<PartitionedHlo> ReshardFromPartialReplicateWithDynamicSlice(
const HloSharding& target) const;
std::optional<PartitionedHlo> ReshardPartialReplicateWithAllToAll(
const HloSharding& target) const;
HloInstruction* hlo_;
Shape base_shape_;
PartitioningState state_;
};
class SpmdPartitioningVisitor : public DfsHloVisitorWithDefault {
public:
SpmdPartitioningVisitor(
HloComputation* computation, int64_t num_partitions, int64_t num_replicas,
const SPMDCollectiveOpsCreator& collective_ops_creator,
int64_t* next_channel_id, SpmdLogger* logger,
SpmdPartitionerOptions options, SpmdPartitioner* partitioner,
const CallGraph& call_graph);
SpmdPartitioningVisitor(const SpmdPartitioningVisitor& src);
absl::Status DefaultAction(HloInstruction* hlo) override;
absl::Status HandleAllReduce(HloInstruction* hlo) override;
absl::Status HandleBroadcast(HloInstruction* hlo) override;
absl::Status HandleCall(HloInstruction* hlo) override;
absl::Status HandleConstant(HloInstruction* hlo) override;
absl::Status HandleCustomCall(HloInstruction* hlo) override;
absl::Status HandleDot(HloInstruction* hlo) override;
absl::Status HandleDynamicSlice(HloInstruction* hlo) override;
absl::Status HandleDynamicUpdateSlice(HloInstruction* hlo) override;
absl::Status HandleFft(HloInstruction* hlo) override;
absl::Status HandleGather(HloInstruction* hlo) override;
absl::Status HandleGetTupleElement(HloInstruction* hlo) override;
absl::Status HandleInfeed(HloInstruction* hlo) override;
absl::Status HandleOptimizationBarrier(HloInstruction* hlo) override;
absl::Status HandleOutfeed(HloInstruction* hlo) override;
absl::Status HandlePad(HloInstruction* hlo) override;
absl::Status HandleParameter(HloInstruction* hlo) override;
absl::Status HandleReduce(HloInstruction* hlo) override;
absl::Status HandleReverse(HloInstruction* hlo) override;
absl::Status HandleWhile(HloInstruction* hlo) override;
absl::Status HandleConditional(HloInstruction* hlo) override;
absl::Status HandleReduceWindow(HloInstruction* hlo) override;
absl::Status HandleSelectAndScatter(HloInstruction* hlo) override;
absl::Status HandleTuple(HloInstruction* hlo) override;
absl::Status HandleRng(HloInstruction* hlo) override;
absl::Status HandleConvolution(HloInstruction* hlo) override;
absl::Status HandleConcatenate(HloInstruction* hlo) override;
absl::Status HandleScatter(HloInstruction* hlo) override;
absl::Status HandleSlice(HloInstruction* hlo) override;
absl::Status HandleSort(HloInstruction* hlo) override;
absl::Status HandleTranspose(HloInstruction* hlo) override;
absl::Status HandleReshape(HloInstruction* hlo) override;
absl::Status HandleIota(HloInstruction* hlo) override;
absl::Status HandlePartitionId(HloInstruction* hlo) override;
absl::Status HandleDotHelper(
HloInstruction* hlo,
const dot_as_convolution_util::DotConvolutionDimsInfo& dims_mapping,
absl::FunctionRef<absl::StatusOr<HloInstruction*>(
HloInstruction*, HloInstruction*, SpmdBuilder*,
const Window& conv_window)>
create_sharded_dot);
absl::Status HandleElementwise(HloInstruction* hlo);
absl::Status HandleSingleDevice(const HloInstruction* hlo);
absl::Status HandleCustomCallTopK(HloInstruction* hlo);
absl::Status HandleCustomCallSPMDInternal_RotateRight(HloInstruction* hlo);
virtual std::unique_ptr<SpmdPartitioningVisitor> Clone() const;
PartitionedHlo& GetPartitionedHlo(const HloInstruction* hlo) {
CHECK_EQ(partitioned_instructions_.count(hlo), 1);
return partitioned_instructions_.find(hlo)->second;
}
void SetPartitionedHlo(const HloInstruction* hlo,
const PartitionedHlo& partitioned_hlo) {
CHECK_EQ(partitioned_instructions_.count(hlo), 0);
partitioned_instructions_.emplace(hlo, partitioned_hlo);
changed_ = true;
}
void SetPartitionedHlo(const HloInstruction* hlo,
absl::FunctionRef<HloInstruction*()> func) {
HloInstruction* new_hlo = func();
new_hlo->set_sharding(hlo->sharding());
SetPartitionedHlo(
hlo, PartitionedHlo(new_hlo, hlo->shape(), MakePartitioningState()));
changed_ = true;
}
int64_t NewChannel() { return (*next_channel_id_)++; }
PartitionedHlo::PartitioningState MakePartitioningState();
SpmdBuilder* builder() { return &b_; }
virtual absl::StatusOr<bool> DoPartition(
HloComputation* computation, const HloSharding& root_sharding,
const SpmdPartitionerOptions& options);
virtual double GetComputationTimeInMilliSec(HloInstruction* hlo) {
return 0.0;
}
virtual double GetCommunicationTimeInMilliSec(
int64_t bytes, absl::Span<const ReplicaGroup> device_groups) {
return 0.0;
}
virtual int GetCommunicationMultiplier(
absl::Span<const ReplicaGroup> device_groups) {
return 1;
}
std::vector<ReplicaGroup> CreateReplicaGroups(
std::vector<std::vector<int64_t>>& groups);
const CallGraph& call_graph() { return call_graph_; }
int64_t num_partitions() const { return num_partitions_; }
int64_t num_replicas() const { return num_replicas_; }
SpmdLogger* logger() { return logger_; }
const SpmdLogger* logger() const { return logger_; }
const SpmdPartitionerOptions& options() const { return options_; }
SpmdPartitioner* partitioner() { return partitioner_; }
const SpmdPartitioner* partitioner() const { return partitioner_; }
SPMDCollectiveOpsCreator& collective_ops_creator() {
return collective_ops_creator_;
}
const SPMDCollectiveOpsCreator& collective_ops_creator() const {
return collective_ops_creator_;
}
HloModule* module() { return module_; }
const HloModule* module() const { return module_; }
void set_module(HloModule* module) { module_ = module; }
struct WindowedDotGeneralLoop {
HloInstruction* while_loop;
int64_t windowed_operand;
bool windowed_in_contracting_dims;
bool windowed_in_batch_dims;
bool operands_sharded_at_contracting_dims;
int64_t num_partitions;
std::vector<ReplicaGroup> loop_replica_groups;
};
protected:
absl::Status Preprocess(HloInstruction* hlo) override;
absl::Status Postprocess(HloInstruction* hlo) override;
absl::Status DoCodeMotionForWindowedDotGeneralLoops(
HloComputation* computation, const SpmdPartitionerOptions& options);
bool changed_;
HloModule* module_;
int64_t num_partitions_;
int64_t num_replicas_;
SPMDCollectiveOpsCreator collective_ops_creator_;
int64_t* next_channel_id_;
SpmdBuilder b_;
std::vector<WindowedDotGeneralLoop> windowed_dot_general_loops_;
HloInstruction* partition_id_;
private:
PartitionedHlo::ReshardCache reshard_cache_;
ConstHloInstructionMap<PartitionedHlo> partitioned_instructions_;
HloInstruction* visiting_hlo_;
SpmdLogger* logger_;
const SpmdPartitionerOptions options_;
SpmdPartitioner* partitioner_;
std::vector<HloSharding> visiting_hlo_operand_shardings_;
std::optional<HloSharding> visiting_hlo_sharding_;
std::optional<int64_t> visiting_num_partitions_;
std::optional<SPMDCollectiveOpsCreator> visiting_collective_ops_creator_;
std::optional<HloInstruction*> visiting_partition_id_;
std::vector<PartitionedHlo::PartitioningState> visiting_state_;
std::vector<std::vector<int64_t>> device_groups_;
const CallGraph& call_graph_;
};
}
}
#endif
#include "xla/service/spmd/spmd_partitioner.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <functional>
#include <limits>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/array.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/ir/tile_assignment.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/hlo/utils/hlo_sharding_util.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/protobuf_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/computation_layout.h"
#include "xla/service/flatten_call_graph.h"
#include "xla/service/hlo_cse.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/shape_inference.h"
#include "xla/service/spmd/custom_call_handler.h"
#include "xla/service/spmd/spmd_partitioner_util.h"
#include "xla/ser | #include "xla/service/spmd/spmd_partitioner.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/hlo/utils/hlo_sharding_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/hlo_verifier.h"
#include "xla/service/sharding_format_picker.h"
#include "xla/service/spmd/spmd_prepare.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace spmd {
namespace {
using ::testing::_;
using ::testing::AllOf;
namespace op = xla::testing::opcode_matchers;
class SpmdPartitioningTest
: public HloTestBase,
public ::testing::WithParamInterface<ShardingFormatPicker::ShardingType> {
public:
absl::StatusOr<std::unique_ptr<HloModule>> PartitionComputation(
absl::string_view hlo_module, int64_t num_devices,
bool conv_halo_exchange_always_on_lhs = true,
bool choose_faster_windowed_einsum = false,
bool unroll_windowed_einsum = false,
bool bidirectional_windowed_einsum = false,
int64_t threshold_for_windowed_einsum_mib = -1) {
SpmdPartitionerOptions options;
options.conv_halo_exchange_always_on_lhs = conv_halo_exchange_always_on_lhs;
options.allow_module_signature_change = true;
options.choose_faster_windowed_einsum_over_mem =
choose_faster_windowed_einsum;
options.unroll_windowed_einsum = unroll_windowed_einsum;
options.bidirectional_windowed_einsum = bidirectional_windowed_einsum;
if (threshold_for_windowed_einsum_mib >= 0) {
options.threshold_for_windowed_einsum_mib =
threshold_for_windowed_einsum_mib;
}
auto collective_ops_creator =
GetDefaultCollectiveOpsCreator(num_devices, 1);
collective_ops_creator.create_cross_partition_all_gather = nullptr;
HloModuleConfig config = GetModuleConfigForTest();
config.set_use_spmd_partitioning(true);
config.set_num_partitions(num_devices);
TF_ASSIGN_OR_RETURN(auto module,
ParseAndReturnVerifiedModule(hlo_module, config));
ShardingFormatPicker format_picker(GetParam());
TF_ASSIGN_OR_RETURN(bool changed, format_picker.Run(module.get()));
if (changed) {
VLOG(1) << "Sharding format changed: "
<< module->ToString(HloPrintOptions()
.set_print_program_shape(false)
.set_print_operand_shape(false));
}
HloPassPipeline pass("spmd-partitioning");
pass.AddPass<HloVerifier>(false,
false);
pass.AddPass<SpmdPrepare>();
pass.AddPass<SpmdPartitioner>(num_devices, 1, options,
collective_ops_creator);
pass.AddPass<HloVerifier>(false,
false);
TF_RETURN_IF_ERROR(pass.Run(module.get()).status());
VerifyNoShardingOnCollectives(module.get());
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
void VerifyNoShardingOnCollectives(HloModule* module) {
for (const HloComputation* c : module->computations()) {
for (const HloInstruction* inst : c->instructions()) {
if (!absl::c_linear_search(
std::vector<HloOpcode>{
HloOpcode::kAllToAll, HloOpcode::kAllReduce,
HloOpcode::kAllGather, HloOpcode::kCollectivePermute,
HloOpcode::kReduceScatter},
inst->opcode())) {
continue;
}
EXPECT_FALSE(inst->has_sharding());
}
}
}
};
std::string TestParamToString(
const ::testing::TestParamInfo<ShardingFormatPicker::ShardingType>& data) {
switch (data.param) {
case ShardingFormatPicker::ShardingType::kV1:
return "V1";
case ShardingFormatPicker::ShardingType::kBestEffortV2:
return "BestEffortV2";
}
}
INSTANTIATE_TEST_SUITE_P(
All, SpmdPartitioningTest,
::testing::Values(ShardingFormatPicker::ShardingType::kV1,
ShardingFormatPicker::ShardingType::kBestEffortV2),
TestParamToString);
TEST_P(SpmdPartitioningTest, SingleDeviceToReplicated) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%constant = s32[2,3]{1,0} constant({{1,1,1},{1,1,1}}),
sharding={maximal device=0}
ROOT %copy = s32[2,3]{1,0} copy(%constant), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Copy(op::AllReduce(
op::Select(op::Broadcast(op::Compare()),
op::Constant(), op::Broadcast()))),
op::Shape("s32[2,3]")));
}
TEST_P(SpmdPartitioningTest, SingleDeviceCustomCall) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%constant = s32[2,3]{1,0} constant({{1,1,1},{1,1,1}}),
sharding={maximal device=0}
%cc = s32[2,3] custom-call(%constant), custom_call_target="SomeCustomCall",
sharding={maximal device=0}
ROOT %copy = s32[2,3]{1,0} copy(%cc), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* custom_call = FindInstruction(module.get(), "cc.1");
EXPECT_NE(custom_call, nullptr);
EXPECT_NE(custom_call->parent(), module->entry_computation());
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Copy(op::AllReduce(
op::Select(op::Broadcast(op::Compare()),
op::Conditional(), op::Broadcast()))),
op::Shape("s32[2,3]")));
}
TEST_P(SpmdPartitioningTest, SingleDeviceToSingleDevice) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%constant = s32[2,3]{1,0} constant({{1,1,1},{1,1,1}}),
sharding={maximal device=0}
ROOT %copy = s32[2,3]{1,0} copy(%constant), sharding={maximal device=1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
HloInstruction* root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
EXPECT_THAT(root, op::Copy(AllOf(op::Copy(op::AllReduce(op::Select(
op::Broadcast(op::Compare()),
op::Constant(), op::Broadcast()))),
op::Shape("s32[2,3]"))));
}
TEST_P(SpmdPartitioningTest, SingleDeviceToTiled) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%constant = s32[2,3]{1,0} constant({{1,1,1},{1,1,1}}),
sharding={maximal device=0}
ROOT %copy = s32[2,3]{1,0} copy(%constant),
sharding={devices=[2,1]1,0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(
op::Copy(op::DynamicSlice(
op::AllReduce(op::Select(
op::Broadcast(op::Compare(op::PartitionId(), op::Constant())),
op::Constant(), op::Broadcast())),
op::Reshape(op::DynamicSlice(op::Constant(), op::PartitionId())),
op::Constant())),
op::Shape("s32[1,3]")));
}
TEST_P(SpmdPartitioningTest, PartitionCall) {
absl::string_view hlo_string = R"(
HloModule jit_f
g {
Arg_0.6 = s32[8,2]{1,0} parameter(0), sharding={devices=[2,2]<=[4]}
constant.0 = s32[] constant(2), sharding={replicated}
broadcast.0 = s32[8,2]{1,0} broadcast(constant.0), dimensions={}, sharding={devices=[2,2]<=[4]}
ROOT multiply.9 = s32[8,2]{1,0} multiply(Arg_0.6, broadcast.0), sharding={devices=[2,2]<=[4]}
}
ENTRY main {
Arg_0.1 = s32[8,2]{1,0} parameter(0), sharding={devices=[2,2]<=[4]}
constant.1 = s32[] constant(3), sharding={replicated}
broadcast.1 = s32[8,2]{1,0} broadcast(constant.1), dimensions={}, sharding={devices=[2,2]<=[4]}
multiply.4 = s32[8,2]{1,0} multiply(Arg_0.1, broadcast.1), sharding={devices=[2,2]<=[4]}
ROOT call = s32[8,2]{1,0} call(multiply.4), to_apply=g, sharding={devices=[2,2]<=[4]}, backend_config={"flag_configs":[],"scoped_memory_configs":[],"compute_type":"COMPUTE_TYPE_DEFAULT","device_type":"DEVICE_TYPE_HOST","used_scoped_memory_configs":[]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Call(), op::Shape("s32[4,1]")));
HloInstruction* call_comp_root =
root->called_computations()[0]->root_instruction();
EXPECT_THAT(call_comp_root, AllOf(op::Multiply(op::Parameter(0),
op::Broadcast(op::Constant())),
op::Shape("s32[4,1]")));
}
TEST_P(SpmdPartitioningTest, TiledToReplicated) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%constant = s32[2,3]{1,0} constant({{1,1,1},{1,1,1}}),
sharding={devices=[2,1]0,1}
ROOT %copy = s32[2,3]{1,0} copy(%constant), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
op::Copy(op::AllReduce(AllOf(
op::DynamicUpdateSlice(
op::Broadcast(), AllOf(op::Constant(), op::Shape("s32[1,3]")),
op::Reshape(op::DynamicSlice(op::Constant(), op::PartitionId())),
op::Constant()),
op::Shape("s32[2,3]")))));
}
TEST_P(SpmdPartitioningTest,
TiledToReplicatedWhenV2ShardingGeneratesReplicaGroupV2) {
if (GetParam() != ShardingFormatPicker::ShardingType::kBestEffortV2) {
GTEST_SKIP() << "This test only runs when input sharding is in V2 format.";
}
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%constant = s32[4,3]{1,0} constant({{1,1,1},{1,1,1},{1,1,1},{1,1,1}}),
sharding={devices=[4,1]<=[4]}
ROOT %copy = s32[4,3]{1,0} copy(%constant), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto all_reduce_instruction =
std::find_if(module->entry_computation()->instructions().begin(),
module->entry_computation()->instructions().end(),
HloPredicateIsOp<HloOpcode::kAllReduce>);
EXPECT_NE(all_reduce_instruction,
module->entry_computation()->instructions().end());
EXPECT_TRUE((*all_reduce_instruction)
->device_list()
.iota_replica_group_list()
.has_value());
IotaReplicaGroupList list = (*all_reduce_instruction)
->device_list()
.iota_replica_group_list()
.value();
EXPECT_EQ(list.num_replica_groups(), 1);
EXPECT_EQ(list.num_devices_per_group(), 4);
EXPECT_THAT(list.reshape_dims(), ::testing::ElementsAre(4));
EXPECT_THAT(list.transpose_perm(), ::testing::ElementsAre(0));
}
TEST_P(SpmdPartitioningTest, TiledToSingleDevice) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%constant = s32[2,3]{1,0} constant({{1,1,1},{1,1,1}}),
sharding={devices=[2,1]0,1}
ROOT %copy = s32[2,3]{1,0} copy(%constant), sharding={maximal device=0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
op::Copy(op::Copy(op::AllReduce(AllOf(
op::DynamicUpdateSlice(
op::Broadcast(), AllOf(op::Constant(), op::Shape("s32[1,3]")),
op::Reshape(op::DynamicSlice(op::Constant(), op::PartitionId())),
op::Constant()),
op::Shape("s32[2,3]"))))));
}
TEST_P(SpmdPartitioningTest, TiledToTiledEven) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param= s32[8,2]{1,0} parameter(0), sharding={devices=[2,1]0,1}
ROOT %copy = s32[8,2]{1,0} copy(%param), sharding={devices=[1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(op::Copy(op::Reshape(op::Transpose(op::AllToAll(AllOf(
op::Reshape(op::Parameter()), op::Shape("s32[4,2,1]")))))),
op::Shape("s32[8,1]")));
}
TEST_P(SpmdPartitioningTest, TiledToTiledUneven) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param= f32[7,31,128]{2,1,0} parameter(0), sharding={devices=[1,2,1]0,1}
ROOT %copy = f32[7,31,128]{2,1,0} copy(%param), sharding={devices=[2,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(op::Copy(op::Slice(op::Reshape(AllOf(op::Transpose(op::AllToAll(
op::Reshape(AllOf(op::Pad(), op::Shape("f32[8,16,128]")))))))))));
}
TEST_P(SpmdPartitioningTest, GetTupleElementSwapDevice) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param.0 = (f32[2,3]{1,0}, u32[]) parameter(0),
sharding={{maximal device=1}, {maximal device=1}}
%gte.0 = f32[2,3]{1,0} get-tuple-element(%param.0), index=0,
sharding={maximal device=0}
%gte.1 = u32[] get-tuple-element(%param.0), index=1,
sharding={maximal device=0}
ROOT %tuple = (f32[2,3]{1,0}, u32[]) tuple(%gte.0, %gte.1),
sharding={{maximal device=0},{maximal device=0}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_THAT(root, op::Tuple());
EXPECT_THAT(root->operand(0),
op::Copy(op::AllReduce(op::Select(
op::Broadcast(op::Compare(op::PartitionId(), op::Constant())),
op::GetTupleElement(op::Parameter()), op::Broadcast()))));
EXPECT_THAT(root->operand(1),
op::Copy(op::AllReduce(op::Select(
op::Broadcast(op::Compare(op::PartitionId(), op::Constant())),
op::GetTupleElement(op::Parameter()), op::Broadcast()))));
}
TEST_P(SpmdPartitioningTest, GetTupleElementTiled) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param.0 = (f32[2,3]{1,0}, u32[2,3]{1,0}) parameter(0),
sharding={{replicated}, {replicated}}
gte.0 = f32[2,3]{1,0} get-tuple-element(param.0), index=0,
sharding={devices=[2,1]0,1}
gte.1 = u32[2,3]{1,0} get-tuple-element(param.0), index=1,
sharding={devices=[2,1]0,1}
ROOT %tuple = (f32[2,3]{1,0}, u32[2,3]{1,0}) tuple(gte.0, gte.1),
sharding={{devices=[2,1]0,1},{devices=[2,1]0,1}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_THAT(root, op::Tuple());
auto offset =
op::Reshape(op::DynamicSlice(op::Constant(), op::PartitionId()));
EXPECT_THAT(root->operand(0),
op::DynamicSlice(op::GetTupleElement(op::Parameter()), offset,
op::Constant()));
EXPECT_THAT(root->operand(1),
op::DynamicSlice(op::GetTupleElement(op::Parameter()), offset,
op::Constant()));
}
TEST_P(SpmdPartitioningTest, TiledInfeed) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
token0 = token[] after-all(), sharding={maximal device=0}
infeed = (f32[8,2]{1,0}, token[]) infeed(token0),
sharding={{devices=[2,1]0,1}, {maximal device=0}}
ROOT infeed.data = f32[8,2]{1,0} get-tuple-element(infeed), index=0,
sharding={maximal device=0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
op::Copy(op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(),
op::GetTupleElement(
AllOf(op::Infeed(), op::Shape("(f32[4,2]{1,0}, token[])"))),
op::Reshape(op::DynamicSlice(op::Constant(), op::PartitionId())),
op::Constant()))));
}
TEST_P(SpmdPartitioningTest, UnevenTiledInfeed) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
token0 = token[] after-all(), sharding={maximal device=0}
infeed = (f32[9,2]{1,0}, token[]) infeed(token0),
sharding={{devices=[2,1]0,1}, {maximal device=0}}
ROOT infeed.data = f32[9,2]{1,0} get-tuple-element(infeed), index=0,
sharding={devices=[2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root, AllOf(op::Shape("f32[5,2]"), op::GetTupleElement(op::Conditional(
op::Convert(op::PartitionId()),
op::AfterAll(), op::AfterAll()))));
EXPECT_THAT(
root->operand(0)->called_computations()[0]->root_instruction(),
AllOf(op::Shape("(f32[5,2], token[])"), op::Infeed(op::Parameter())));
auto second_infeed =
AllOf(op::Shape("(f32[4,2], token[])"), op::Infeed(op::Parameter()));
EXPECT_THAT(root->operand(0)->called_computations()[1]->root_instruction(),
AllOf(op::Shape("(f32[5,2], token[])"),
op::Tuple(op::Pad(op::GetTupleElement(second_infeed),
op::Constant()),
op::GetTupleElement(second_infeed))));
}
TEST_P(SpmdPartitioningTest, UnevenTiledTupleInfeed) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
token0 = token[] after-all(), sharding={maximal device=0}
infeed = ((f32[9,2]{1,0}, f32[2]{0}), token[]) infeed(token0),
sharding={{devices=[2,1]0,1}, {replicated}, {maximal device=0}}
ROOT infeed.data = (f32[9,2]{1,0}, f32[2]{0}) get-tuple-element(infeed),
index=0, sharding={{devices=[2,1]0,1}, {replicated}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("(f32[5,2], f32[2])"),
op::GetTupleElement(op::Conditional(
op::Convert(op::PartitionId()), op::AfterAll(),
op::AfterAll()))));
EXPECT_THAT(root->operand(0)->called_computations()[0]->root_instruction(),
AllOf(op::Shape("((f32[5,2], f32[2]), token[])"),
op::Infeed(op::Parameter())));
auto second_infeed = AllOf(op::Shape("((f32[4,2], f32[2]), token[])"),
op::Infeed(op::Parameter()));
EXPECT_THAT(
root->operand(0)->called_computations()[1]->root_instruction(),
AllOf(op::Shape("((f32[5,2], f32[2]), token[])"),
op::Tuple(op::Tuple(op::Pad(op::GetTupleElement(
op::GetTupleElement(second_infeed)),
op::Constant()),
op::GetTupleElement(
op::GetTupleElement(second_infeed))),
op::GetTupleElement(second_infeed))));
}
TEST_P(SpmdPartitioningTest, MixedTupleInfeed) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
token0 = token[] after-all(), sharding={maximal device=0}
infeed = ((f32[9,2]{1,0}, f32[2]{0}), token[]) infeed(token0),
sharding={{maximal device=0}, {maximal device=1}, {maximal device=0}}
ROOT infeed.data = (f32[9,2]{1,0}, f32[2]{0}) get-tuple-element(infeed),
index=0, sharding={{maximal device=0}, {maximal device=1}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("(f32[9,2], f32[2])"),
op::GetTupleElement(op::Conditional(
op::Convert(op::PartitionId()), op::AfterAll(),
op::AfterAll()))));
auto first_infeed = AllOf(op::Shape("((f32[9,2], ()), token[])"),
op::Infeed(op::Parameter()));
EXPECT_THAT(root->operand(0)->called_computations()[0]->root_instruction(),
AllOf(op::Shape("((f32[9,2], f32[2]), token[])"),
op::Tuple(op::Tuple(op::GetTupleElement(
op::GetTupleElement(first_infeed)),
op::Broadcast(op::Constant())),
op::GetTupleElement(first_infeed))));
auto second_infeed =
AllOf(op::Shape("(((), f32[2]), token[])"), op::Infeed(op::Parameter()));
EXPECT_THAT(root->operand(0)->called_computations()[1]->root_instruction(),
AllOf(op::Shape("((f32[9,2], f32[2]), token[])"),
op::Tuple(op::Tuple(op::Broadcast(op::Constant()),
op::GetTupleElement(op::GetTupleElement(
second_infeed))),
op::GetTupleElement(second_infeed))));
}
TEST_P(SpmdPartitioningTest, TiledToReplicatedReduce) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
constant = f32[3,3]{1,0} constant({{1,1,1},{1,1,1},{1,1,1}}),
sharding={devices=[2,1]0,1}
constant.1 = f32[] constant(0), sharding={replicated}
ROOT reduce = f32[] reduce(constant, constant.1), dimensions={0,1},
to_apply=sum, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
op::AllReduce(op::Reduce(
op::Select(
op::Compare(op::Add(op::Iota(), op::Broadcast(op::Reshape())),
op::Broadcast(op::Constant())),
AllOf(op::Shape("f32[2,3]{1,0}"),
op::DynamicSlice(op::Pad(op::Constant(), op::Constant()),
op::Reshape(), op::Constant())),
op::Broadcast(op::Constant())),
op::Constant())));
}
TEST_P(SpmdPartitioningTest, TiledElementwise) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
constant = f32[3,3]{1,0} constant({{1,1,1},{1,1,1},{1,1,1}}),
sharding={devices=[2,1]0,1}
constant.1 = f32[3,3]{1,0} constant({{2,2,2},{2,2,2},{2,2,2}}),
sharding={replicated}
multiply = f32[3,3]{1,0} multiply(constant, constant.1),
sharding={devices=[2,1]0,1}
ROOT add = f32[3,3]{1,0} add(multiply, constant.1),
sharding={devices=[2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(
op::Shape("f32[2,3]{1,0}"),
op::Add(op::Multiply(
op::DynamicSlice(op::Pad(op::Constant(), op::Constant()),
op::Reshape(), op::Constant()),
op::DynamicSlice(op::Pad(op::Constant(), op::Constant()),
op::Reshape(), op::Constant())),
op::DynamicSlice(op::Pad(op::Constant(), op::Constant()),
op::Reshape(), op::Constant()))));
}
TEST_P(SpmdPartitioningTest, TiledAllReduce) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
parameter = f32[3,3]{1,0} parameter(0), sharding={devices=[2,1]0,1}
ROOT all-reduce = f32[3,3]{1,0} all-reduce(parameter), to_apply=sum,
replica_groups={}, sharding={devices=[2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root, AllOf(op::Shape("f32[2,3]{1,0}"), op::AllReduce(op::Parameter(0))));
}
TEST_P(SpmdPartitioningTest, BroadcastOnlyNewDimsSharded) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
constant = f32[4,3]{1,0} constant({{1,1,1},{1,1,1},{1,1,1},{1,1,1}}),
sharding={replicated}
ROOT broadcast = f32[3,4,3]{2,1,0} broadcast(constant), dimensions={1,2},
sharding={devices=[2,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[2,4,3]{2,1,0}"),
op::Broadcast(op::Constant())));
}
TEST_P(SpmdPartitioningTest, BroadcastOnlyOldDimsSharded) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
constant = f32[4,3]{1,0} constant({{1,1,1},{1,1,1},{1,1,1},{1,1,1}}),
sharding={replicated}
ROOT broadcast = f32[4,4,3]{2,1,0} broadcast(constant), dimensions={1,2},
sharding={devices=[1,2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[4,2,3]{2,1,0}"),
op::Broadcast(op::DynamicSlice(
op::Constant(), op::Reshape(), op::Constant()))));
}
TEST_P(SpmdPartitioningTest, BroadcastBothOldAndNewDimsSharded) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
constant = f32[4,3]{1,0} constant({{1,1,1},{1,1,1},{1,1,1},{1,1,1}}),
sharding={replicated}
ROOT broadcast = f32[4,4,3]{2,1,0} broadcast(constant), dimensions={1,2},
sharding={devices=[2,2,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(op::Shape("f32[2,2,3]{2,1,0}"),
op::Broadcast(AllOf(op::Shape("f32[2,3]{1,0}"),
op::DynamicSlice(op::Constant(), op::Reshape(),
op::Constant())))));
}
TEST_P(SpmdPartitioningTest,
BroadcastBothOldAndNewDimsShardedPartiallySharded) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %entry {
%param = f32[4,3]{1,0} parameter(0),
sharding={devices=[1,2,4]<=[2,2,2]T(1,0,2) last_tile_dim_replicate}
ROOT %broadcast = f32[4,4,3]{2,1,0} broadcast(%param), dimensions={1,2},
sharding={devices=[2,1,2,2]<=[8] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(op::Shape("f32[2,4,2]"),
op::Broadcast(AllOf(op::Shape("f32[4,2]"), op::Parameter(0)))));
}
TEST_P(SpmdPartitioningTest,
ConvWithParallelDimAndNonParallelSpatialDimPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,12,12,24,32] parameter(0)
%lhs.copy = f32[32,12,12,24,32] copy(%lhs),
sharding={devices=[2,2,1,1,1]<=[4]}
%rhs = f32[32,6,6,16,32] parameter(1)
%rhs.copy = f32[32,6,6,16,32] copy(%rhs),
sharding={devices=[2,2,1,1,1]<=[4]}
ROOT %conv = f32[32,7,7,24,16] convolution(%lhs.copy, %rhs.copy),
dim_labels=012bf_012oi->012bf,
window={size=32x6x6 stride=31x1x1 lhs_dilate=32x1x1},
sharding={devices=[2,2,1,1,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(op::Copy(op::DynamicSlice(
op::Parameter(), op::Reshape(), op::Reshape(),
op::Constant(), op::Constant(), op::Constant())),
op::Shape("f32[16,6,12,24,32]"));
const auto rhs = AllOf(op::Copy(op::DynamicSlice(
op::Parameter(), op::Reshape(), op::Reshape(),
op::Constant(), op::Constant(), op::Constant())),
op::Shape("f32[16,3,6,16,32]"));
auto resharded_rhs =
AllOf(op::Shape("f32[16,6,6,16,32]"),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(), rhs, op::Constant(), op::Reshape(),
op::Constant(), op::Constant(), op::Constant())));
auto left_halo = AllOf(op::CollectivePermute(op::Slice(lhs)),
op::Shape("f32[16,2,12,24,32]"));
auto right_halo = AllOf(op::CollectivePermute(op::Slice(lhs)),
op::Shape("f32[16,3,12,24,32]"));
EXPECT_THAT(
root,
AllOf(op::Convolution(
op::Select(op::Compare(),
op::DynamicSlice(
op::Concatenate(left_halo, lhs, right_halo),
op::Constant(), op::Add(), op::Constant(), |
2,004 | cpp | tensorflow/tensorflow | canonicalize_all_gather_for_cse | third_party/xla/xla/service/spmd/canonicalize_all_gather_for_cse.cc | third_party/xla/xla/service/spmd/canonicalize_all_gather_for_cse_test.cc | #ifndef XLA_SERVICE_SPMD_CANONICALIZE_ALL_GATHER_FOR_CSE_H_
#define XLA_SERVICE_SPMD_CANONICALIZE_ALL_GATHER_FOR_CSE_H_
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class CanonicalizeAllGatherForCSE : public HloModulePass {
public:
CanonicalizeAllGatherForCSE() : next_channel_id_(0) {}
~CanonicalizeAllGatherForCSE() override = default;
absl::string_view name() const override { return "canon-all-gather-for-cse"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
absl::StatusOr<bool> RunOnComputation(HloComputation* comp);
int64_t NextChannelId() { return next_channel_id_++; }
int64_t next_channel_id_;
};
}
#endif
#include "xla/service/spmd/canonicalize_all_gather_for_cse.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/utils/hlo_query.h"
namespace xla {
absl::StatusOr<bool> CanonicalizeAllGatherForCSE::RunOnComputation(
HloComputation* comp) {
bool changed = false;
std::vector<HloInstruction*> ordered_hlos = comp->MakeInstructionPostOrder();
for (HloInstruction* hlo : ordered_hlos) {
HloAllGatherInstruction* ag = DynCast<HloAllGatherInstruction>(hlo);
if (!ag || ag->operand_count() > 1) {
continue;
}
HloInstruction* real_data = ag->mutable_operand(0);
while (real_data->ReshapeMerelyInsertsOrDeletes1SizedDimensions()
.has_value()) {
real_data = real_data->mutable_operand(0);
}
if (real_data == ag->operand(0)) {
continue;
}
const int64_t ag_dim = ag->all_gather_dimension();
int64_t new_ag_dim;
if (auto dims = ShapeUtil::ReshapeLeavesDimensionsUnmodified(
ag->operand(0)->shape(), real_data->shape(), {ag_dim})) {
new_ag_dim = dims->at(0);
} else {
int64_t major_elements =
Product(absl::MakeConstSpan(ag->operand(0)->shape().dimensions())
.subspan(0, ag_dim));
new_ag_dim = 0;
while (major_elements > 1) {
major_elements /= real_data->shape().dimensions(new_ag_dim++);
}
}
if (new_ag_dim == real_data->shape().rank()) {
continue;
}
const int64_t all_gather_participants =
ShapeUtil::ElementsIn(ag->shape()) /
ShapeUtil::ElementsIn(ag->operand(0)->shape());
Shape new_ag_shape = real_data->shape();
new_ag_shape.set_dimensions(
new_ag_dim,
all_gather_participants * new_ag_shape.dimensions(new_ag_dim));
std::optional<int64_t> new_channel_id =
ag->channel_id() ? std::make_optional(this->NextChannelId())
: std::nullopt;
HloInstruction* new_ag =
comp->AddInstruction(HloInstruction::CreateAllGather(
new_ag_shape, {real_data}, new_ag_dim,
ag->device_list(), ag->constrain_layout(), new_channel_id,
ag->use_global_device_ids()));
ag->SetupDerivedInstruction(new_ag);
HloInstruction* new_formatting = comp->AddInstruction(
HloInstruction::CreateReshape(ag->shape(), new_ag));
TF_RETURN_IF_ERROR(comp->ReplaceInstruction(ag, new_formatting));
changed = true;
}
return changed;
}
absl::StatusOr<bool> CanonicalizeAllGatherForCSE::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
next_channel_id_ = hlo_query::NextChannelId(*module);
for (HloComputation* comp : module->computations(execution_threads)) {
TF_ASSIGN_OR_RETURN(bool comp_changed, RunOnComputation(comp));
changed |= comp_changed;
}
return changed;
}
} | #include "xla/service/spmd/canonicalize_all_gather_for_cse.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/hlo_verifier.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace spmd {
namespace {
using ::testing::_;
using ::testing::AllOf;
namespace op = xla::testing::opcode_matchers;
class AllGatherCanonicalizeTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> RunPass(
absl::string_view hlo_module) {
TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(
hlo_module, GetModuleConfigForTest()));
HloPassPipeline pipeline("all-gather-cse");
pipeline.AddPass<CanonicalizeAllGatherForCSE>();
TF_RETURN_IF_ERROR(pipeline.Run(module.get()).status());
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
absl::Status RunPassOnModule(HloModule* module,
int64_t distance_threshold = 100) {
HloPassPipeline pipeline("all-gather-cse");
pipeline.AddPass<CanonicalizeAllGatherForCSE>();
TF_RETURN_IF_ERROR(pipeline.Run(module).status());
return absl::OkStatus();
}
};
TEST_F(AllGatherCanonicalizeTest, SimpleReshape) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[8]{0} parameter(0)
resh = s32[1,8]{1,0} reshape(param0)
ROOT ag = s32[2,8]{1,0} all-gather(resh), replica_groups={{0,1}},
dimensions={0}, channel_id=0, use_global_device_ids=true
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
const HloInstruction* const reshape =
module->entry_computation()->root_instruction();
EXPECT_THAT(reshape,
AllOf(op::Reshape(op::AllGather(_)), op::Shape("s32[2,8]")));
}
TEST_F(AllGatherCanonicalizeTest, MultipleDegenerateReshapes) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[8]{0} parameter(0)
resh = s32[1,8]{1,0} reshape(param0)
resh2 = s32[1,8,1,1]{3,2,1,0} reshape(resh)
ROOT ag = s32[2,8,1,1]{3,2,1,0} all-gather(resh2), replica_groups={{0,1}},
dimensions={0}, channel_id=0, use_global_device_ids=true
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
const HloInstruction* const reshape =
module->entry_computation()->root_instruction();
EXPECT_THAT(reshape, op::Reshape(op::AllGather(op::Parameter())));
}
TEST_F(AllGatherCanonicalizeTest, MultipleDegenerateReshapes2) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[8]{0} parameter(0)
resh = s32[8,1,1]{2,1,0} reshape(param0)
resh2 = s32[1,8,1,1]{3,2,1,0} reshape(resh)
ROOT ag = s32[2,8,1,1]{3,2,1,0} all-gather(resh2), replica_groups={{0,1}},
dimensions={0}, channel_id=0, use_global_device_ids=true
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
const HloInstruction* const reshape =
module->entry_computation()->root_instruction();
EXPECT_THAT(reshape, op::Reshape(op::AllGather(op::Parameter())));
}
TEST_F(AllGatherCanonicalizeTest, MultipleDegenerateReshapesNoDim0) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[8]{0} parameter(0)
resh = s32[8,1,1]{2,1,0} reshape(param0)
resh2 = s32[1,8,1,1]{3,2,1,0} reshape(resh)
ROOT ag = s32[1,16,1,1]{3,2,1,0} all-gather(resh2), replica_groups={{0,1}},
dimensions={1}, channel_id=0, use_global_device_ids=true
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
const HloInstruction* const reshape =
module->entry_computation()->root_instruction();
EXPECT_THAT(reshape, op::Reshape(op::AllGather(op::Parameter())));
}
TEST_F(AllGatherCanonicalizeTest, NonDegenerateReshape) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[8]{0} parameter(0)
resh = s32[8,1,1]{2,1,0} reshape(param0)
resh2 = s32[1,4,2,1,1]{4,3,2,1,0} reshape(resh)
ROOT ag = s32[2,4,2,1,1]{4,3,2,1,0} all-gather(resh2), replica_groups={{0,1}},
dimensions={0}, channel_id=0, use_global_device_ids=true
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
const HloInstruction* const reshape =
module->entry_computation()->root_instruction();
EXPECT_THAT(reshape, AllOf(op::AllGather(op::Reshape(op::Reshape(_))),
op::Shape("s32[2,4,2,1,1]")));
}
}
}
} |
2,005 | cpp | tensorflow/tensorflow | partition_assignment | third_party/xla/xla/service/spmd/partition_assignment.cc | third_party/xla/xla/service/spmd/partition_assignment_test.cc | #ifndef XLA_SERVICE_SPMD_PARTITION_ASSIGNMENT_H_
#define XLA_SERVICE_SPMD_PARTITION_ASSIGNMENT_H_
#include <cstdint>
#include <memory>
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class PartitioningAlgorithm {
public:
enum class AlgorithmKind {
kNoop,
kExp0,
kExp1,
kExp2,
};
PartitioningAlgorithm() = delete;
PartitioningAlgorithm(const PartitioningAlgorithm&) = delete;
PartitioningAlgorithm& operator=(const PartitioningAlgorithm&) = delete;
virtual ~PartitioningAlgorithm() = default;
static std::unique_ptr<PartitioningAlgorithm> CreateNoopPartitioning(
int64_t num_partitions);
const AlgorithmKind& kind() const;
absl::string_view name() const;
int64_t num_partitions() const;
virtual absl::StatusOr<bool> Run(HloModule* module) const = 0;
protected:
explicit PartitioningAlgorithm(AlgorithmKind kind, int64_t num_partitions);
private:
AlgorithmKind kind_ = AlgorithmKind::kNoop;
int64_t num_partitions_;
};
class NoopPartitioning : public PartitioningAlgorithm {
public:
explicit NoopPartitioning(int64_t num_partitions);
absl::StatusOr<bool> Run(HloModule* module) const override;
};
class PartitionAssignment : public HloModulePass {
public:
explicit PartitionAssignment(int64_t num_partitions);
absl::string_view name() const override;
virtual std::unique_ptr<PartitioningAlgorithm> ChoosePartitioningAlgorithm(
const HloModule& module) const;
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
const PartitioningAlgorithm* algorithm();
int64_t num_partitions() const;
private:
std::unique_ptr<PartitioningAlgorithm> algorithm_ = nullptr;
int64_t num_partitions_;
};
}
#endif
#include "xla/service/spmd/partition_assignment.h"
#include <cstdint>
#include <memory>
namespace xla {
PartitioningAlgorithm::PartitioningAlgorithm(AlgorithmKind kind,
int64_t num_partitions) {
kind_ = kind;
CHECK_GT(num_partitions, 1) << "Number of partitions must be at least two.";
num_partitions_ = num_partitions;
}
absl::string_view PartitioningAlgorithm::name() const {
switch (kind_) {
case AlgorithmKind::kNoop:
default:
return "Noop";
}
}
const PartitioningAlgorithm::AlgorithmKind& PartitioningAlgorithm::kind()
const {
return kind_;
}
int64_t PartitioningAlgorithm::num_partitions() const {
return num_partitions_;
}
std::unique_ptr<PartitioningAlgorithm>
PartitioningAlgorithm::CreateNoopPartitioning(int64_t num_partitions) {
return std::make_unique<NoopPartitioning>(num_partitions);
}
NoopPartitioning::NoopPartitioning(int64_t num_partitions)
: PartitioningAlgorithm(AlgorithmKind::kNoop, num_partitions) {
VLOG(2) << "Created a no-op algorithm with the number of partitions: "
<< num_partitions;
}
absl::StatusOr<bool> NoopPartitioning::Run(HloModule* module) const {
VLOG(2) << "No-op algorithm was called to partition module: "
<< module->name();
return false;
}
PartitionAssignment::PartitionAssignment(int64_t num_partitions) {
CHECK_GT(num_partitions, 1) << "Number of partitions must be at least two.";
num_partitions_ = num_partitions;
}
absl::string_view PartitionAssignment::name() const {
return "partitioning-assignment";
}
const PartitioningAlgorithm* PartitionAssignment::algorithm() {
return algorithm_.get();
}
int64_t PartitionAssignment::num_partitions() const { return num_partitions_; }
std::unique_ptr<PartitioningAlgorithm>
PartitionAssignment::ChoosePartitioningAlgorithm(
const HloModule& module) const {
auto algo = module.config().debug_options().xla_partitioning_algorithm();
CHECK_EQ(algo, DebugOptions::PARTITIONING_ALGORITHM_NOOP);
return PartitioningAlgorithm::CreateNoopPartitioning(num_partitions());
}
absl::StatusOr<bool> PartitionAssignment::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(2) << "Running partition assignment on module " << module->name();
algorithm_ = ChoosePartitioningAlgorithm(*module);
return algorithm()->Run(module);
}
} | #include "xla/service/spmd/partition_assignment.h"
#include <memory>
#include "xla/tests/hlo_test_base.h"
#include "xla/xla.pb.h"
namespace xla {
namespace {
using PartitionAssignmentTest = HloTestBase;
TEST_F(PartitionAssignmentTest, NoopAlg) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[16,16]{1,0} parameter(0)
ROOT %copy = f32[16,16]{1,0} copy(%param0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
DebugOptions debug_options = GetDebugOptionsForTest();
debug_options.set_xla_partitioning_algorithm(
DebugOptions::PARTITIONING_ALGORITHM_NOOP);
PartitionAssignment partition_assignment(16);
EXPECT_EQ(partition_assignment.algorithm(), nullptr);
TF_ASSERT_OK_AND_ASSIGN(bool changed, partition_assignment.Run(module.get()));
EXPECT_FALSE(changed);
EXPECT_NE(partition_assignment.algorithm(), nullptr);
EXPECT_EQ(partition_assignment.algorithm()->kind(),
PartitioningAlgorithm::AlgorithmKind::kNoop);
}
}
} |
2,006 | cpp | tensorflow/tensorflow | whole_graph_manual_pass | third_party/xla/xla/service/spmd/whole_graph_manual_pass.cc | third_party/xla/xla/service/spmd/whole_graph_manual_pass_test.cc | #ifndef XLA_SERVICE_SPMD_WHOLE_GRAPH_MANUAL_PASS_H_
#define XLA_SERVICE_SPMD_WHOLE_GRAPH_MANUAL_PASS_H_
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class WholeGraphManualPass : public HloModulePass {
public:
WholeGraphManualPass() : HloModulePass() {}
absl::string_view name() const override { return "whole-graph-manual-pass"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/spmd/whole_graph_manual_pass.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
namespace xla {
namespace {
bool ShouldClearInstruction(HloInstruction* inst) {
return inst->opcode() != HloOpcode::kParameter &&
inst != inst->parent()->root_instruction() &&
inst->opcode() != HloOpcode::kPartitionId &&
DynCast<HloCollectiveInstruction>(inst) == nullptr &&
!inst->HasSideEffectNoRecurse();
}
absl::StatusOr<bool> RunOnComputation(HloComputation* computation) {
bool changed = false;
for (HloInstruction* inst : computation->instructions()) {
if (ShouldClearInstruction(inst)) {
inst->clear_sharding();
changed = true;
continue;
}
if (inst->shape().IsTuple()) {
inst->set_sharding(
HloSharding::SingleTuple(inst->shape(), HloSharding::Manual()));
changed = true;
} else {
inst->set_sharding(HloSharding::Manual());
changed = true;
}
}
return changed;
}
}
absl::StatusOr<bool> WholeGraphManualPass::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (auto* comp : module->computations()) {
TF_ASSIGN_OR_RETURN(bool comp_changed, RunOnComputation(comp));
changed |= comp_changed;
}
return changed;
}
} | #include "xla/service/spmd/whole_graph_manual_pass.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/hlo_verifier.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace spmd {
namespace {
using ::testing::_;
using ::testing::AllOf;
namespace op = xla::testing::opcode_matchers;
class WholeGraphManualPassTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> RunPass(
absl::string_view hlo_module) {
TF_ASSIGN_OR_RETURN(
auto module,
ParseAndReturnVerifiedModule(
hlo_module,
GetModuleConfigForTest(1, 4)));
HloPassPipeline pipeline("whole-graph-manual-pass");
pipeline.AddPass<WholeGraphManualPass>();
TF_RETURN_IF_ERROR(pipeline.Run(module.get()).status());
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
absl::Status RunPassOnModule(HloModule* module,
int64_t distance_threshold = 100) {
HloPassPipeline pipeline("all-gather-cse");
pipeline.AddPass<WholeGraphManualPass>();
TF_RETURN_IF_ERROR(pipeline.Run(module).status());
return absl::OkStatus();
}
};
TEST_F(WholeGraphManualPassTest, SimpleRewrite) {
absl::string_view hlo_string = R"(
HloModule module
body {
p_body = (f32[2], f32[2], f32[2], s32[]) parameter(0)
val.0 = f32[2] get-tuple-element(p_body), index=0
val.1 = f32[2] get-tuple-element(p_body), index=1
add = f32[2] add(val.0, val.1)
const = s32[] constant(-1)
ROOT root = (f32[2], f32[2], f32[2], s32[]) tuple(val.0, val.1, add, const)
}
condition {
p_cond = (f32[2], f32[2], f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=3
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param0 = (s32[8]{0}, s32[8]{0}) parameter(0)
g1 = s32[8]{0} get-tuple-element(param0), index=0
g2 = s32[8]{0} get-tuple-element(param0), index=1
resh1 = s32[1,8]{1,0} reshape(g1)
resh2 = s32[1,8]{1,0} reshape(g2)
param1 = f32[2] parameter(1)
param2 = s32[] parameter(2)
while_init = (f32[2], f32[2], f32[2], s32[]) tuple(param1, param1, param1, param2)
while = (f32[2], f32[2], f32[2], s32[]) while(while_init), condition=condition, body=body
g3 = f32[2] get-tuple-element(while), index=0
ROOT t = (s32[1,8]{1,0}, s32[1,8]{1,0}, f32[2]) tuple(resh1, resh2, g3), sharding={{devices=[1,4]0,1,2,3}, {devices=[1,4]0,1,2,3}, {replicated}}
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
for (auto* i : module->entry_computation()->instructions()) {
if (module->entry_computation()->root_instruction() == i) {
EXPECT_THAT(i, op::Sharding("{{manual}, "
"{manual}, {manual}}"));
} else if (i->opcode() == HloOpcode::kParameter) {
EXPECT_THAT(i, AnyOf(op::Sharding("{manual}"),
op::Sharding("{{manual},{manual}}")));
}
}
}
TEST_F(WholeGraphManualPassTest, SimplePartitionIdCollectives) {
absl::string_view hlo_string = R"(
HloModule module
body {
p_body = (f32[2], f32[2], f32[2], s32[]) parameter(0)
val.0 = f32[2] get-tuple-element(p_body), index=0
val.1 = f32[2] get-tuple-element(p_body), index=1
t = token[] after-all()
p = u32[] partition-id()
ag = f32[8] all-gather(val.1), dimensions={0}, replica_groups={{0,1,2,3}}, use_global_device_ids=true, channel_id=1
s = (f32[8], s32[], token[]) send(ag, t), channel_id=2
sd = token[] send-done(s), channel_id=2
add = f32[2] add(val.0, val.1)
const = s32[] constant(-1)
ROOT root = (f32[2], f32[2], f32[2], s32[]) tuple(val.0, val.1, add, const)
}
condition {
p_cond = (f32[2], f32[2], f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=3
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param0 = (s32[8]{0}, s32[8]{0}) parameter(0)
g1 = s32[8]{0} get-tuple-element(param0), index=0
g2 = s32[8]{0} get-tuple-element(param0), index=1
resh1 = s32[1,8]{1,0} reshape(g1)
resh2 = s32[1,8]{1,0} reshape(g2)
param1 = f32[2] parameter(1)
param2 = s32[] parameter(2)
while_init = (f32[2], f32[2], f32[2], s32[]) tuple(param1, param1, param1, param2)
while = (f32[2], f32[2], f32[2], s32[]) while(while_init), condition=condition, body=body
g3 = f32[2] get-tuple-element(while), index=0
ROOT t = (s32[1,8]{1,0}, s32[1,8]{1,0}, f32[2]) tuple(resh1, resh2, g3), sharding={{devices=[1,4]0,1,2,3}, {devices=[1,4]0,1,2,3}, {replicated}}
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
for (auto* c : module->computations()) {
for (auto* i : c->instructions()) {
if (c->root_instruction() == i) {
EXPECT_THAT(
i, AnyOf(op::Sharding("{manual}"),
op::Sharding("{{manual},{manual},{manual}}"),
op::Sharding("{{manual}, {manual}, {manual}, {manual}}")));
} else if (i->opcode() == HloOpcode::kParameter) {
EXPECT_THAT(
i,
AnyOf(op::Sharding("{manual}"), op::Sharding("{{manual},{manual}}"),
op::Sharding("{{manual},{manual},{manual},{manual}}")));
} else if (i->opcode() == HloOpcode::kPartitionId ||
i->opcode() == HloOpcode::kAllGather ||
i->opcode() == HloOpcode::kSendDone) {
EXPECT_THAT(i, op::Sharding("{manual}"));
} else if (i->opcode() == HloOpcode::kSend) {
EXPECT_THAT(i, op::Sharding("{{manual},{manual},{manual}}"));
} else {
EXPECT_FALSE(i->has_sharding());
}
}
}
}
}
}
} |
2,007 | cpp | tensorflow/tensorflow | collective_permute_motion | third_party/xla/xla/service/spmd/collective_permute_motion.cc | third_party/xla/xla/service/spmd/collective_permute_motion_test.cc | #ifndef XLA_SERVICE_SPMD_COLLECTIVE_PERMUTE_MOTION_H_
#define XLA_SERVICE_SPMD_COLLECTIVE_PERMUTE_MOTION_H_
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class CollectivePermuteMotion : public HloModulePass {
public:
CollectivePermuteMotion() = default;
absl::string_view name() const override {
return "collective-permute-motion";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/spmd/collective_permute_motion.h"
#include <cstdint>
#include <deque>
#include <optional>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/while_loop_analysis.h"
#include "xla/shape_util.h"
namespace xla {
absl::flat_hash_set<HloInstruction*> FindLoopConsts(HloComputation* body) {
HloInstruction* root = body->root_instruction();
CHECK_EQ(root->opcode(), HloOpcode::kTuple);
absl::flat_hash_set<HloInstruction*> loop_consts;
for (int64_t i = 0; i < root->operand_count(); ++i) {
HloInstruction* output = root->mutable_operand(i);
while (output->opcode() == HloOpcode::kReshape ||
output->opcode() == HloOpcode::kCopy) {
output = output->mutable_operand(0);
}
if (output->opcode() == HloOpcode::kGetTupleElement &&
output->tuple_index() == i &&
output->operand(0) == body->parameter_instruction(0)) {
loop_consts.insert(output);
}
}
for (HloInstruction* inst : body->MakeInstructionPostOrder()) {
if (inst->IsConstant() || inst->opcode() == HloOpcode::kIota ||
inst->opcode() == HloOpcode::kReplicaId ||
inst->opcode() == HloOpcode::kPartitionId) {
loop_consts.insert(inst);
continue;
}
if (!inst->IsElementwise() && inst->opcode() != HloOpcode::kBroadcast &&
inst->opcode() != HloOpcode::kReduce &&
inst->opcode() != HloOpcode::kReshape &&
inst->opcode() != HloOpcode::kDynamicSlice &&
inst->opcode() != HloOpcode::kTranspose) {
continue;
}
if (inst->HasSideEffectNoRecurse()) {
continue;
}
if (absl::c_all_of(inst->operands(), [&](const HloInstruction* operand) {
return loop_consts.contains(operand);
})) {
loop_consts.insert(inst);
}
}
return loop_consts;
}
constexpr int64_t kMaxMovableClusterSize = 8;
struct MovableCluster {
int64_t root_tuple_index;
std::vector<HloInstruction*> reverse_order_instructions;
HloInstruction* collective_permute = nullptr;
};
std::optional<MovableCluster> FindMovableClusterAtBodyRoot(
HloComputation* body, int64_t root_tuple_index,
const absl::flat_hash_set<HloInstruction*>& loop_consts) {
HloInstruction* root = body->root_instruction();
CHECK_EQ(root->opcode(), HloOpcode::kTuple);
MovableCluster cluster;
cluster.root_tuple_index = root_tuple_index;
std::deque<HloInstruction*> queue;
queue.push_back(root->mutable_operand(root_tuple_index));
while (!queue.empty()) {
HloInstruction* visiting = queue.front();
queue.pop_front();
if (cluster.reverse_order_instructions.size() >= kMaxMovableClusterSize) {
VLOG(2) << "Cannot move: too many instructions to move";
return std::nullopt;
}
if (visiting->user_count() > 1) {
VLOG(2) << "Cannot move: " << visiting->name() << " used multiple times";
return std::nullopt;
}
cluster.reverse_order_instructions.push_back(visiting);
if (visiting->opcode() == HloOpcode::kCollectivePermute) {
if (cluster.collective_permute != nullptr) {
VLOG(2) << "Cannot move: " << visiting->name()
<< " multiple collective permutes";
return std::nullopt;
}
cluster.collective_permute = visiting;
continue;
}
if (!visiting->IsElementwise() || visiting->HasSideEffectNoRecurse()) {
VLOG(2) << "Cannot move: " << visiting->name() << " unsupported op";
return std::nullopt;
}
for (HloInstruction* operand : visiting->mutable_operands()) {
if (!loop_consts.contains(operand)) {
queue.push_back(operand);
}
}
}
if (cluster.collective_permute == nullptr) {
return std::nullopt;
}
return cluster;
}
absl::flat_hash_set<int64_t> FindIndicesUnusedAfterLoop(HloInstruction* loop) {
absl::flat_hash_set<int64_t> indices;
int64_t count = loop->shape().tuple_shapes_size();
for (int64_t i = 0; i < count; ++i) {
indices.insert(i);
}
for (HloInstruction* user : loop->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
indices.clear();
break;
}
indices.erase(user->tuple_index());
}
return indices;
}
absl::StatusOr<bool> MoveCollectivePermutes(HloComputation* computation,
HloInstruction* loop) {
HloComputation* body = loop->while_body();
HloInstruction* root = body->root_instruction();
if (root->opcode() != HloOpcode::kTuple ||
loop->operand(0)->opcode() != HloOpcode::kTuple) {
return false;
}
auto maybe_induction_var_idx = GetLoopInductionVarTupleIdx(loop);
if (!maybe_induction_var_idx.has_value()) {
VLOG(2) << "Skip " << loop->name() << ", no induction var";
return false;
}
absl::flat_hash_map<const HloInstruction*, int64_t> output_appear_counts;
for (const HloInstruction* operand : root->operands()) {
auto res = output_appear_counts.emplace(operand, 1);
if (!res.second) {
res.first->second++;
}
}
absl::flat_hash_set<int64_t> unused_indices_after_loop =
FindIndicesUnusedAfterLoop(loop);
const absl::flat_hash_set<HloInstruction*> loop_consts = FindLoopConsts(body);
int64_t induction_var_idx = *maybe_induction_var_idx;
std::vector<HloInstruction*> input_gtes(root->operand_count(), nullptr);
absl::flat_hash_set<int64_t> multi_use_indices;
for (HloInstruction* user : body->parameter_instruction(0)->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
VLOG(2) << "Skip " << loop->name() << ", non-GTE input use";
return false;
}
if (multi_use_indices.contains(user->tuple_index())) {
continue;
}
if (input_gtes[user->tuple_index()] != nullptr) {
multi_use_indices.insert(user->tuple_index());
input_gtes[user->tuple_index()] = nullptr;
} else {
input_gtes[user->tuple_index()] = user;
}
}
HloInstruction* ind_var = input_gtes[induction_var_idx];
if (ind_var == nullptr || ind_var->shape().rank() > 0) {
VLOG(2) << "Skip " << loop->name() << ", non-scalar induction var";
return false;
}
if (root->operand(induction_var_idx)->opcode() != HloOpcode::kAdd &&
root->operand(induction_var_idx)->opcode() != HloOpcode::kSubtract) {
VLOG(2) << "Skip " << loop->name() << ", non-add/sub induction var";
return false;
}
if (root->operand(induction_var_idx)->operand(0) == ind_var) {
if (!root->operand(induction_var_idx)->operand(1)->IsConstant()) {
VLOG(2) << "Skip " << loop->name() << ", non-add/sub const induction var";
return false;
}
} else if (root->operand(induction_var_idx)->operand(1) == ind_var) {
if (!root->operand(induction_var_idx)->operand(0)->IsConstant()) {
VLOG(2) << "Skip " << loop->name() << ", non-add/sub const induction var";
return false;
}
} else {
return false;
}
HloInstruction* ind_var_orig =
loop->mutable_operand(0)->mutable_operand(induction_var_idx);
if (!ind_var_orig->IsConstant()) {
VLOG(2) << "Skip " << loop->name()
<< ", non-constant initial induction var";
return false;
}
bool changed = false;
std::vector<MovableCluster> movable_outputs;
for (int64_t i = 0; i < root->operand_count(); ++i) {
if (output_appear_counts[root->operand(i)] > 1) {
VLOG(2) << "Skip " << loop->name() << " index " << i
<< " appears multiple times in output.";
continue;
}
if (!unused_indices_after_loop.contains(i)) {
VLOG(2) << "Skip " << loop->name() << " index " << i
<< " used after loop.";
continue;
}
auto cluster = FindMovableClusterAtBodyRoot(body, i, loop_consts);
if (!cluster.has_value()) {
VLOG(2) << "Skip " << loop->name() << " index " << i
<< " did not find a movable cluster.";
continue;
}
HloInstruction* input = input_gtes[cluster->root_tuple_index];
HloInstruction* cp = cluster->collective_permute;
if (input == nullptr || cp->operand(0) == input) {
VLOG(2) << "Skip " << loop->name() << " index " << i
<< " collective-permute already at top.";
continue;
}
const std::vector<HloInstruction*> original_input_users = input->users();
absl::flat_hash_map<const HloInstruction*, HloInstruction*> replacement;
replacement[cp->operand(0)] = input;
for (auto it = cluster->reverse_order_instructions.rbegin();
it != cluster->reverse_order_instructions.rend(); ++it) {
HloInstruction* inst = *it;
std::vector<HloInstruction*> new_operands;
for (HloInstruction* operand : inst->mutable_operands()) {
auto rit = replacement.find(operand);
if (rit != replacement.end()) {
new_operands.push_back(rit->second);
} else {
new_operands.push_back(operand);
}
}
HloInstruction* clone = body->AddInstruction(
inst->CloneWithNewOperands(inst->shape(), new_operands));
replacement[inst] = clone;
}
HloInstruction* new_input =
replacement[cluster->reverse_order_instructions[0]];
if (ind_var_orig->parent() != body) {
ind_var_orig = body->AddInstruction(ind_var_orig->Clone());
}
HloInstruction* is_first_iter =
body->AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::ChangeElementType(new_input->shape(), PRED),
body->AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeScalarShape(PRED), ind_var, ind_var_orig,
Comparison::Direction::kEq)),
{}));
new_input = body->AddInstruction(
HloInstruction::CreateTernary(new_input->shape(), HloOpcode::kSelect,
is_first_iter, input, new_input));
for (HloInstruction* user : original_input_users) {
TF_RETURN_IF_ERROR(input->ReplaceUseWith(user, new_input));
}
TF_RETURN_IF_ERROR(root->ReplaceOperandWith(cluster->root_tuple_index,
cp->mutable_operand(0)));
TF_RETURN_IF_ERROR(body->RemoveInstructionAndUnusedOperands(
cluster->reverse_order_instructions[0]));
VLOG(2) << "Moved " << loop->name() << " index " << i;
changed = true;
}
return changed;
}
absl::StatusOr<bool> CollectivePermuteMotion::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instr : computation->MakeInstructionPostOrder()) {
if (instr->opcode() == HloOpcode::kWhile) {
TF_ASSIGN_OR_RETURN(bool moved,
MoveCollectivePermutes(computation, instr));
changed |= moved;
}
}
}
return changed;
}
} | #include "xla/service/spmd/collective_permute_motion.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
using CollectivePermuteMotionTest = HloTestBase;
namespace op = xla::testing::opcode_matchers;
TEST_F(CollectivePermuteMotionTest, SimpleMove) {
absl::string_view hlo_string = R"(
HloModule test
body {
loop_var = (s32[], f32[4,4]) parameter(0)
constant.1 = s32[] constant(1)
gte0 = s32[] get-tuple-element(loop_var), index=0
add = s32[] add(gte0, constant.1)
gte1 = f32[4,4] get-tuple-element(loop_var), index=1
mul = f32[4,4] multiply(gte1, gte1)
cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}}
ROOT tuple = (s32[], f32[4,4]) tuple(add, cp)
}
cond {
loop_var = (s32[], f32[4,4]) parameter(0)
gte.cond = s32[] get-tuple-element(loop_var), index=0
constant.3 = s32[] constant(5)
ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT
}
ENTRY main {
constant.2 = s32[] constant(0)
param = f32[4,4] parameter(0)
tuple.1 = (s32[], f32[4,4]) tuple(constant.2, param)
while = (s32[], f32[4,4]) while(tuple.1), condition=cond, body=body
ROOT result = s32[] get-tuple-element(while), index=0
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
CollectivePermuteMotion pass;
ASSERT_TRUE(pass.Run(&*module).value());
VLOG(1) << module->ToString();
const HloInstruction* loop = FindInstruction(module.get(), "while");
const HloInstruction* output =
loop->while_body()->root_instruction()->operand(1);
auto input =
AllOf(op::Shape("f32[4,4]"), op::GetTupleElement(op::Parameter(0)));
auto cp = op::CollectivePermute(input);
auto select = op::Select(op::Broadcast(op::Compare()), input, cp);
EXPECT_THAT(output, op::Multiply(select, select));
}
TEST_F(CollectivePermuteMotionTest, NoCollectivePermute) {
absl::string_view hlo_string = R"(
HloModule test
body {
loop_var = (s32[], f32[], f32[]) parameter(0)
constant.1 = s32[] constant(1)
gte0 = s32[] get-tuple-element(loop_var), index=0
add = s32[] add(gte0, constant.1)
gte1 = f32[] get-tuple-element(loop_var), index=1
constant.4 = f32[] constant(4.0)
ROOT tuple = (s32[], f32[], f32[]) tuple(add, constant.4, gte1)
}
cond {
loop_var = (s32[], f32[], f32[]) parameter(0)
gte.cond = s32[] get-tuple-element(loop_var), index=0
constant.3 = s32[] constant(5)
ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT
}
ENTRY main {
constant.2 = s32[] constant(0)
param = f32[] parameter(0)
param.1 = f32[] parameter(1)
tuple.1 = (s32[], f32[], f32[]) tuple(constant.2, param, param.1)
while = (s32[], f32[], f32[]) while(tuple.1), condition=cond, body=body
ROOT result = s32[] get-tuple-element(while), index=0
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
CollectivePermuteMotion pass;
ASSERT_FALSE(pass.Run(&*module).value());
}
TEST_F(CollectivePermuteMotionTest, MoveWithElementwise) {
absl::string_view hlo_string = R"(
HloModule test
body {
loop_var = (s32[], f32[4,4]) parameter(0)
constant.1 = s32[] constant(1)
gte0 = s32[] get-tuple-element(loop_var), index=0
add = s32[] add(gte0, constant.1)
gte1 = f32[4,4] get-tuple-element(loop_var), index=1
mul = f32[4,4] multiply(gte1, gte1)
cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}}
constant.4 = f32[] constant(1)
broadcast = f32[4,4] broadcast(constant.4), dimensions={}
add1 = f32[4,4] add(cp, broadcast)
ROOT tuple = (s32[], f32[4,4]) tuple(add, add1)
}
cond {
loop_var = (s32[], f32[4,4]) parameter(0)
gte.cond = s32[] get-tuple-element(loop_var), index=0
constant.3 = s32[] constant(5)
ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT
}
ENTRY main {
constant.2 = s32[] constant(0)
param = f32[4,4] parameter(0)
tuple.1 = (s32[], f32[4,4]) tuple(constant.2, param)
while = (s32[], f32[4,4]) while(tuple.1), condition=cond, body=body
ROOT result = s32[] get-tuple-element(while), index=0
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
CollectivePermuteMotion pass;
ASSERT_TRUE(pass.Run(&*module).value());
VLOG(1) << module->ToString();
const HloInstruction* loop = FindInstruction(module.get(), "while");
const HloInstruction* output =
loop->while_body()->root_instruction()->operand(1);
auto input =
AllOf(op::Shape("f32[4,4]"), op::GetTupleElement(op::Parameter(0)));
auto moved =
op::Add(op::CollectivePermute(input), op::Broadcast(op::Constant()));
auto select = op::Select(op::Broadcast(op::Compare()), input, moved);
EXPECT_THAT(output, op::Multiply(select, select));
}
TEST_F(CollectivePermuteMotionTest, DoNotMoveWithNonConstElementwise) {
absl::string_view hlo_string = R"(
HloModule test
body {
loop_var = (s32[], f32[4,4]) parameter(0)
constant.1 = s32[] constant(1)
gte0 = s32[] get-tuple-element(loop_var), index=0
add = s32[] add(gte0, constant.1)
gte1 = f32[4,4] get-tuple-element(loop_var), index=1
mul = f32[4,4] multiply(gte1, gte1)
cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}}
constant.4 = f32[] constant(1)
nonconst = f32[4,4] custom-call(), custom_call_target="unknown"
add1 = f32[4,4] add(cp, nonconst)
ROOT tuple = (s32[], f32[4,4]) tuple(add, add1)
}
cond {
loop_var = (s32[], f32[4,4]) parameter(0)
gte.cond = s32[] get-tuple-element(loop_var), index=0
constant.3 = s32[] constant(5)
ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT
}
ENTRY main {
constant.2 = s32[] constant(0)
param = f32[4,4] parameter(0)
tuple.1 = (s32[], f32[4,4]) tuple(constant.2, param)
while = (s32[], f32[4,4]) while(tuple.1), condition=cond, body=body
ROOT result = s32[] get-tuple-element(while), index=0
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
CollectivePermuteMotion pass;
ASSERT_FALSE(pass.Run(&*module).value());
}
TEST_F(CollectivePermuteMotionTest, DoNotMoveIfOutputUsed) {
absl::string_view hlo_string = R"(
HloModule test
body {
loop_var = (s32[], f32[4,4]) parameter(0)
constant.1 = s32[] constant(1)
gte0 = s32[] get-tuple-element(loop_var), index=0
add = s32[] add(gte0, constant.1)
gte1 = f32[4,4] get-tuple-element(loop_var), index=1
mul = f32[4,4] multiply(gte1, gte1)
cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}}
ROOT tuple = (s32[], f32[4,4]) tuple(add, cp)
}
cond {
loop_var = (s32[], f32[4,4]) parameter(0)
gte.cond = s32[] get-tuple-element(loop_var), index=0
constant.3 = s32[] constant(5)
ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT
}
ENTRY main {
constant.2 = s32[] constant(0)
param = f32[4,4] parameter(0)
tuple.1 = (s32[], f32[4,4]) tuple(constant.2, param)
while = (s32[], f32[4,4]) while(tuple.1), condition=cond, body=body
ROOT result = f32[4,4] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
CollectivePermuteMotion pass;
ASSERT_FALSE(pass.Run(&*module).value());
}
TEST_F(CollectivePermuteMotionTest, DoNotMoveIfIndictionVarUnknown) {
absl::string_view hlo_string = R"(
HloModule test
body {
loop_var = (s32[], f32[4,4]) parameter(0)
constant.1 = s32[] constant(1)
gte0 = s32[] get-tuple-element(loop_var), index=0
custom = s32[] custom-call(gte0, constant.1), custom_call_target="unknown"
gte1 = f32[4,4] get-tuple-element(loop_var), index=1
mul = f32[4,4] multiply(gte1, gte1)
cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}}
ROOT tuple = (s32[], f32[4,4]) tuple(custom, cp)
}
cond {
loop_var = (s32[], f32[4,4]) parameter(0)
gte.cond = s32[] get-tuple-element(loop_var), index=0
constant.3 = s32[] constant(5)
ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT
}
ENTRY main {
constant.2 = s32[] constant(0)
param = f32[4,4] parameter(0)
tuple.1 = (s32[], f32[4,4]) tuple(constant.2, param)
while = (s32[], f32[4,4]) while(tuple.1), condition=cond, body=body
ROOT result = s32[] get-tuple-element(while), index=0
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
CollectivePermuteMotion pass;
ASSERT_FALSE(pass.Run(&*module).value());
}
TEST_F(CollectivePermuteMotionTest, DoNotMoveIfMultiOutput) {
absl::string_view hlo_string = R"(
HloModule test
body {
loop_var = (s32[], f32[4,4], f32[4,4]) parameter(0)
constant.1 = s32[] constant(1)
gte0 = s32[] get-tuple-element(loop_var), index=0
add = s32[] add(gte0, constant.1)
gte1 = f32[4,4] get-tuple-element(loop_var), index=1
mul = f32[4,4] multiply(gte1, gte1)
cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}}
ROOT tuple = (s32[], f32[4,4], f32[4,4]) tuple(add, cp, cp)
}
cond {
loop_var = (s32[], f32[4,4], f32[4,4]) parameter(0)
gte.cond = s32[] get-tuple-element(loop_var), index=0
constant.3 = s32[] constant(5)
ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT
}
ENTRY main {
constant.2 = s32[] constant(0)
param = f32[4,4] parameter(0)
tuple.1 = (s32[], f32[4,4], f32[4,4]) tuple(constant.2, param, param)
while = (s32[], f32[4,4], f32[4,4]) while(tuple.1),
condition=cond, body=body
ROOT result = s32[] get-tuple-element(while), index=0
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
CollectivePermuteMotion pass;
ASSERT_FALSE(pass.Run(&*module).value());
}
}
} |
2,008 | cpp | tensorflow/tensorflow | shape_partition | third_party/xla/xla/service/cpu/shape_partition.cc | third_party/xla/xla/service/cpu/shape_partition_test.cc | #ifndef XLA_SERVICE_CPU_SHAPE_PARTITION_H_
#define XLA_SERVICE_CPU_SHAPE_PARTITION_H_
#include <cstdint>
#include <utility>
#include <vector>
#include "absl/types/span.h"
#include "xla/shape.h"
namespace xla {
namespace cpu {
class ShapePartitionAssigner {
public:
explicit ShapePartitionAssigner(const Shape& shape) : shape_(shape) {}
std::vector<int64_t> Run(int64_t target_partition_count);
static int64_t GetTotalPartitionCount(
const std::vector<int64_t>& dimension_partition_counts);
private:
const Shape& shape_;
};
class ShapePartitionIterator {
public:
ShapePartitionIterator(const Shape& shape,
absl::Span<const int64_t> dimension_partition_counts);
std::vector<std::pair<int64_t, int64_t>> GetPartition(int64_t index) const;
int64_t GetTotalPartitionCount() const;
private:
const Shape& shape_;
const std::vector<int64_t> dimension_partition_counts_;
std::vector<int64_t> dimensions_;
std::vector<int64_t> dimension_partition_sizes_;
std::vector<int64_t> dimension_partition_strides_;
};
}
}
#endif
#include "xla/service/cpu/shape_partition.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <utility>
#include <vector>
namespace xla {
namespace cpu {
std::vector<int64_t> ShapePartitionAssigner::Run(
int64_t target_partition_count) {
std::vector<int64_t> outer_dims;
int64_t outer_dim_size = 1;
for (int i = shape_.layout().minor_to_major_size() - 1; i >= 0; --i) {
const int64_t dimension = shape_.layout().minor_to_major(i);
outer_dims.push_back(dimension);
outer_dim_size *= shape_.dimensions(dimension);
if (outer_dim_size >= target_partition_count) {
break;
}
}
target_partition_count = std::min(outer_dim_size, target_partition_count);
const int64_t target_dim_partition_count = std::pow(
static_cast<double>(target_partition_count), 1.0 / outer_dims.size());
std::vector<int64_t> dimension_partition_counts(outer_dims.size());
for (int64_t i = 0; i < outer_dims.size(); ++i) {
dimension_partition_counts[i] =
std::min(static_cast<int64_t>(shape_.dimensions(outer_dims[i])),
target_dim_partition_count);
}
if (GetTotalPartitionCount(dimension_partition_counts) <
target_partition_count) {
for (int64_t i = 0; i < dimension_partition_counts.size(); ++i) {
const int64_t current_dim_partition_count = dimension_partition_counts[i];
const int64_t other_dims_partition_count =
GetTotalPartitionCount(dimension_partition_counts) /
current_dim_partition_count;
int64_t additional_partition_count =
target_partition_count / other_dims_partition_count -
current_dim_partition_count;
additional_partition_count = std::min(
shape_.dimensions(outer_dims[i]) - dimension_partition_counts[i],
additional_partition_count);
if (additional_partition_count > 0) {
dimension_partition_counts[i] += additional_partition_count;
}
}
}
return dimension_partition_counts;
}
int64_t ShapePartitionAssigner::GetTotalPartitionCount(
const std::vector<int64_t>& dimension_partition_counts) {
int64_t total_partition_count = 1;
for (int64_t dim_partition_count : dimension_partition_counts) {
total_partition_count *= dim_partition_count;
}
return total_partition_count;
}
ShapePartitionIterator::ShapePartitionIterator(
const Shape& shape, absl::Span<const int64_t> dimension_partition_counts)
: shape_(shape),
dimension_partition_counts_(dimension_partition_counts.begin(),
dimension_partition_counts.end()),
dimensions_(dimension_partition_counts_.size()),
dimension_partition_sizes_(dimension_partition_counts_.size()),
dimension_partition_strides_(dimension_partition_counts_.size()) {
for (int i = 0; i < dimensions_.size(); ++i) {
dimensions_[i] = shape_.layout().minor_to_major(
shape_.layout().minor_to_major_size() - 1 - i);
}
for (int i = 0; i < dimension_partition_sizes_.size(); ++i) {
const int64_t dim_size = shape_.dimensions(dimensions_[i]);
dimension_partition_sizes_[i] =
std::max(int64_t{1}, dim_size / dimension_partition_counts_[i]);
}
dimension_partition_strides_[dimension_partition_strides_.size() - 1] = 1;
for (int i = dimension_partition_strides_.size() - 2; i >= 0; --i) {
dimension_partition_strides_[i] = dimension_partition_strides_[i + 1] *
dimension_partition_counts_[i + 1];
}
}
std::vector<std::pair<int64_t, int64_t>> ShapePartitionIterator::GetPartition(
int64_t index) const {
std::vector<std::pair<int64_t, int64_t>> partition(dimensions_.size());
for (int64_t i = 0; i < partition.size(); ++i) {
const int64_t partition_index = index / dimension_partition_strides_[i];
partition[i].first = partition_index * dimension_partition_sizes_[i];
if (partition_index == dimension_partition_counts_[i] - 1) {
partition[i].second =
shape_.dimensions(dimensions_[i]) - partition[i].first;
} else {
partition[i].second = dimension_partition_sizes_[i];
}
CHECK_GT(partition[i].second, 0);
index -= partition_index * dimension_partition_strides_[i];
}
return partition;
}
int64_t ShapePartitionIterator::GetTotalPartitionCount() const {
return ShapePartitionAssigner::GetTotalPartitionCount(
dimension_partition_counts_);
}
}
} | #include "xla/service/cpu/shape_partition.h"
#include <algorithm>
#include <random>
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
namespace xla {
namespace cpu {
namespace {
class ShapePartitionAssignerTest : public HloTestBase {
protected:
typedef std::vector<int64_t> Vec;
void RunR2Test(const Shape& shape, int64_t max_target_partition_count,
const std::vector<int64_t>* expected_partitions) {
ShapePartitionAssigner assigner(shape);
for (int64_t i = 1; i <= max_target_partition_count; ++i) {
std::vector<int64_t> actual_partitions =
assigner.Run(i);
EXPECT_THAT(actual_partitions, expected_partitions[i - 1]);
}
}
};
TEST_F(ShapePartitionAssignerTest, Shape13WithLayout10) {
std::vector<int64_t> expected_partitions[] = {{1} , {1, 2} };
RunR2Test(ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 3}, {1, 0}), 2,
expected_partitions);
}
TEST_F(ShapePartitionAssignerTest, Shape31WithLayout01) {
std::vector<int64_t> expected_partitions[] = {
{1} , {1, 2}
};
RunR2Test(ShapeUtil::MakeShapeWithDenseLayout(F32, {3, 1}, {0, 1}), 2,
expected_partitions);
}
TEST_F(ShapePartitionAssignerTest, Shape53WithLayout10) {
std::vector<int64_t> expected_partitions[] = {{1} , {2} ,
{3} , {4} ,
{5} , {3, 2} };
RunR2Test(ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3}, {1, 0}), 6,
expected_partitions);
}
TEST_F(ShapePartitionAssignerTest, Shape53WithLayout01) {
std::vector<int64_t> expected_partitions[] = {
{1} , {2} , {3} , {2, 2} };
RunR2Test(ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3}, {0, 1}), 4,
expected_partitions);
}
TEST_F(ShapePartitionAssignerTest, Shape532WithLayout210) {
std::vector<int64_t> expected_partitions[] = {
{1} , {2} , {3} , {4} ,
{5} , {3, 2} , {3, 2} , {4, 2} ,
{3, 3} , {3, 3} , {3, 3} , {4, 3} ,
{4, 3} , {4, 3} , {5, 3} , {4, 2, 2} };
RunR2Test(ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3, 2}, {2, 1, 0}), 16,
expected_partitions);
}
TEST_F(ShapePartitionAssignerTest, Shape532WithLayout201) {
std::vector<int64_t> expected_partitions[] = {
{1} , {2} , {3} , {2, 2} ,
{2, 2} , {3, 2} , {3, 2} , {3, 2} ,
{3, 3} , {3, 3} , {3, 3} , {3, 4} ,
{3, 4} , {3, 4} , {3, 5} , {3, 2, 2} };
RunR2Test(ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3, 2}, {2, 0, 1}), 16,
expected_partitions);
}
class ShapePartitionIteratorTest : public HloTestBase {
protected:
typedef std::vector<std::pair<int64_t, int64_t>> Partition;
};
TEST_F(ShapePartitionIteratorTest, Shape53WithLayout10) {
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3}, {1, 0});
{
ShapePartitionIterator iterator(shape, {1});
EXPECT_EQ(1, iterator.GetTotalPartitionCount());
EXPECT_TRUE(absl::c_equal(Partition({{0, 5}}), iterator.GetPartition(0)));
}
{
ShapePartitionIterator iterator(shape, {2});
EXPECT_EQ(2, iterator.GetTotalPartitionCount());
EXPECT_TRUE(absl::c_equal(Partition({{0, 2}}), iterator.GetPartition(0)));
EXPECT_TRUE(absl::c_equal(Partition({{2, 3}}), iterator.GetPartition(1)));
}
{
ShapePartitionIterator iterator(shape, {3});
EXPECT_EQ(3, iterator.GetTotalPartitionCount());
EXPECT_TRUE(absl::c_equal(Partition({{0, 1}}), iterator.GetPartition(0)));
EXPECT_TRUE(absl::c_equal(Partition({{1, 1}}), iterator.GetPartition(1)));
EXPECT_TRUE(absl::c_equal(Partition({{2, 3}}), iterator.GetPartition(2)));
}
}
TEST_F(ShapePartitionIteratorTest, Shape532WithLayout210) {
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3, 2}, {2, 1, 0});
{
ShapePartitionIterator iterator(shape, {1, 1});
EXPECT_EQ(1, iterator.GetTotalPartitionCount());
EXPECT_TRUE(
absl::c_equal(Partition({{0, 5}, {0, 3}}), iterator.GetPartition(0)));
}
{
ShapePartitionIterator iterator(shape, {2, 2});
EXPECT_EQ(4, iterator.GetTotalPartitionCount());
EXPECT_TRUE(
absl::c_equal(Partition({{0, 2}, {0, 1}}), iterator.GetPartition(0)));
EXPECT_TRUE(
absl::c_equal(Partition({{0, 2}, {1, 2}}), iterator.GetPartition(1)));
EXPECT_TRUE(
absl::c_equal(Partition({{2, 3}, {0, 1}}), iterator.GetPartition(2)));
EXPECT_TRUE(
absl::c_equal(Partition({{2, 3}, {1, 2}}), iterator.GetPartition(3)));
}
}
class RandomShapePartitionIteratorTest : public HloTestBase {
protected:
typedef std::vector<std::pair<int64_t, int64_t>> Partition;
RandomShapePartitionIteratorTest()
: generator_(rd_()), distribution_(1, 10) {}
std::vector<int64_t> RandR4Dims() { return {Rand(), Rand(), Rand(), Rand()}; }
int64_t Rand() { return distribution_(generator_); }
std::random_device rd_;
std::mt19937 generator_;
std::uniform_int_distribution<int> distribution_;
};
TEST_F(RandomShapePartitionIteratorTest, RandomShapeAndPartitions) {
Shape shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, RandR4Dims(), {3, 2, 1, 0});
const int num_outer_dims_to_partition = 1 + (Rand() % 3);
std::vector<int64_t> dim_sizes(num_outer_dims_to_partition);
std::vector<int64_t> dim_partition_counts(num_outer_dims_to_partition);
int64_t total_dim_size = 1;
for (int i = 0; i < num_outer_dims_to_partition; ++i) {
const int64_t dimension = shape.layout().minor_to_major(
shape.layout().minor_to_major_size() - 1 - i);
dim_sizes[i] = shape.dimensions(dimension);
total_dim_size *= dim_sizes[i];
const int64_t dim_partition_count = 1 + Rand() % dim_sizes[i];
dim_partition_counts[i] = dim_partition_count;
}
std::vector<std::map<int64_t, int64_t>> ranges(num_outer_dims_to_partition);
ShapePartitionIterator partition_iterator(shape, dim_partition_counts);
const int64_t partition_count = partition_iterator.GetTotalPartitionCount();
for (int64_t i = 0; i < partition_count; ++i) {
const auto& dim_partition = partition_iterator.GetPartition(i);
for (int dim = 0; dim < dim_partition.size(); ++dim) {
ranges[dim].insert(
std::make_pair(dim_partition[dim].first,
dim_partition[dim].first + dim_partition[dim].second));
}
}
for (int i = 0; i < ranges.size(); ++i) {
int64_t expected_index = 0;
for (auto& r : ranges[i]) {
EXPECT_EQ(expected_index, r.first);
expected_index = r.second;
}
EXPECT_EQ(expected_index, dim_sizes[i]);
}
}
}
}
} |
2,009 | cpp | tensorflow/tensorflow | cpu_runtime | third_party/xla/xla/service/cpu/cpu_runtime.cc | third_party/xla/xla/service/cpu/cpu_runtime_test.cc | #ifndef XLA_SERVICE_CPU_CPU_RUNTIME_H_
#define XLA_SERVICE_CPU_CPU_RUNTIME_H_
#include "xla/executable_run_options.h"
#include "xla/service/cpu/xfeed_manager.h"
namespace xla {
namespace cpu {
namespace runtime {
extern const char* const kEigenMatMulF16SymbolName;
extern const char* const kEigenMatMulF32SymbolName;
extern const char* const kEigenMatMulF64SymbolName;
extern const char* const kEigenMatMulC64SymbolName;
extern const char* const kEigenMatMulC128SymbolName;
extern const char* const kEigenMatMulS32SymbolName;
extern const char* const kEigenBatchMatMulF32SymbolName;
extern const char* const kMKLConv2DF32SymbolName;
extern const char* const kACLConv2DF32SymbolName;
extern const char* const kACLMatMulF32SymbolName;
extern const char* const kACLBatchMatMulF32SymbolName;
extern const char* const kEigenConv2DF16SymbolName;
extern const char* const kEigenConv2DF32SymbolName;
extern const char* const kEigenConv3DF16SymbolName;
extern const char* const kEigenConv3DF32SymbolName;
extern const char* const kDuccFftSymbolName;
extern const char* const kDuccSingleThreadedFftSymbolName;
extern const char* const kEigenSingleThreadedMatMulF16SymbolName;
extern const char* const kEigenSingleThreadedMatMulF32SymbolName;
extern const char* const kEigenSingleThreadedMatMulF64SymbolName;
extern const char* const kEigenSingleThreadedMatMulC64SymbolName;
extern const char* const kEigenSingleThreadedMatMulC128SymbolName;
extern const char* const kEigenSingleThreadedMatMulS32SymbolName;
extern const char* const kEigenSingleThreadedConv2DF16SymbolName;
extern const char* const kEigenSingleThreadedConv2DF32SymbolName;
extern const char* const kEigenSingleThreadedConv3DF16SymbolName;
extern const char* const kEigenSingleThreadedConv3DF32SymbolName;
extern const char* const kAcquireInfeedBufferForDequeueSymbolName;
extern const char* const kReleaseInfeedBufferAfterDequeueSymbolName;
extern const char* const kAcquireOutfeedBufferForPopulationSymbolName;
extern const char* const kReleaseOutfeedBufferAfterPopulationSymbolName;
extern const char* const kParallelForkJoinSymbolName;
extern const char* const kPrintfToStderrSymbolName;
extern const char* const kStatusIsSuccessSymbolName;
extern const char* const kKeyValueSortSymbolName;
extern const char* const kTopKF32SymbolName;
extern const char* const kAllReduceSymbolName;
extern const char* const kCollectivePermuteSymbolName;
extern const char* const kPartitionIdSymbolName;
extern const char* const kReplicaIdSymbolName;
extern const char* const kTracingStartSymbolName;
extern const char* const kTracingEndSymbolName;
extern const char* const kAllToAllSymbolName;
extern const char* const kAllGatherSymbolName;
extern const char* const kReduceScatterSymbolName;
extern const char* const kOneDnnMatMulSymbolName;
extern const char* const kOneDnnSoftmaxSymbolName;
extern const char* const kOneDnnLayerNormSymbolName;
extern const char* const kOneDnnConvolutionSymbolName;
extern const char* const kOneDnnMatMulReorderSymbolName;
extern const char* const kHandleFfiCallSymbolName;
extern const char* const kXlaCpuRuntimeSymbolNamePrefix;
XfeedManager* GetXfeedManager(int device_ordinal);
}
}
}
extern "C" {
extern int __xla_cpu_runtime_PrintfToStderr(const char* format, ...);
extern int64_t __xla_cpu_runtime_TracingStart(
const void* run_options_ptr,
const char* name, const char* hlo_module, int64_t program_id);
extern void __xla_cpu_runtime_TracingEnd(
const void* run_options_ptr, int64_t id);
extern void* __xla_cpu_runtime_AcquireInfeedBufferForDequeue(
const xla::ExecutableRunOptions* run_options, int32_t buffer_length,
const void* shape, int32_t shape_length);
extern void __xla_cpu_runtime_ReleaseInfeedBufferAfterDequeue(
const xla::ExecutableRunOptions* run_options, int32_t buffer_length,
void* buffer_ptr, const void* shape_ptr, int32_t shape_length);
extern void* __xla_cpu_runtime_AcquireOutfeedBufferForPopulation(
const xla::ExecutableRunOptions* run_options, int32_t buffer_length,
const void* shape_ptr, int32_t shape_length);
extern void __xla_cpu_runtime_ReleaseOutfeedBufferAfterPopulation(
const xla::ExecutableRunOptions* run_options, int32_t buffer_length,
void* buffer_ptr, const void* shape_ptr, int32_t shape_length);
extern void __xla_cpu_runtime_AllReduce(
const xla::ExecutableRunOptions* run_options,
const void* replica_groups_str, int32_t replica_groups_str_size,
int32_t channel_id_present, int32_t use_global_device_ids, int64_t op_id,
int32_t reduction_kind, const void* shape_ptr, int32_t shape_length,
int32_t num_buffers, void** input_buffers, void** output_buffers);
extern void __xla_cpu_runtime_CollectivePermute(
const xla::ExecutableRunOptions* run_options, int32_t channel_id_present,
int64_t op_id, int32_t byte_size, void* input_buffer, void* output_buffer,
const void* source_target_pairs, int32_t source_target_pairs_size);
extern void __xla_cpu_runtime_AllToAll(
const xla::ExecutableRunOptions* run_options, int32_t channel_id_present,
int64_t op_id, const void* replica_groups_str,
int32_t replica_groups_str_size, int32_t num_buffers, int64_t buffer_size,
void** source_buffers, void** destination_buffers);
extern void __xla_cpu_runtime_AllGather(
const xla::ExecutableRunOptions* run_options, int32_t channel_id_present,
int32_t use_global_device_ids, int64_t op_id,
const void* replica_groups_str, int32_t replica_groups_str_size,
int64_t buffer_size, void* source_buffer, void* destination_buffer);
void __xla_cpu_runtime_ReduceScatter(
const xla::ExecutableRunOptions* run_options,
const void* replica_groups_str, int32_t replica_groups_str_size,
int32_t channel_id_present, int32_t use_global_device_ids, int64_t op_id,
int32_t reduction_kind, int32_t element_type, int64_t chunk_elems,
void* input_buffer, void* output_buffer);
extern void __xla_cpu_runtime_PartitionId(
const xla::ExecutableRunOptions* run_options, void* output_buffer);
extern void __xla_cpu_runtime_ReplicaId(
const xla::ExecutableRunOptions* run_options, void* output_buffer);
}
#endif
#include "xla/service/cpu/cpu_runtime.h"
#include <cstdarg>
#include <cstdint>
#include <cstring>
#include <iterator>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/attributes.h"
#include "absl/base/dynamic_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "xla/executable_run_options.h"
#include "xla/layout_util.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/computation_placer.h"
#include "xla/service/cpu/collectives_interface.h"
#include "xla/service/cpu/cpu_executable_run_options.h"
#include "xla/service/cpu/in_process_collectives.h"
#include "xla/service/cpu/xfeed_manager.h"
#include "xla/service/global_device_id.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/profiler/lib/traceme.h"
namespace xla {
namespace cpu {
namespace runtime {
XfeedManager* GetXfeedManager(int device_ordinal) {
static auto* managers = new absl::flat_hash_map<int, XfeedManager*>();
static absl::Mutex* mutex = new absl::Mutex();
absl::MutexLock lock(mutex);
auto it = managers->find(device_ordinal);
if (it == managers->end()) {
it = managers->emplace(device_ordinal, new XfeedManager()).first;
}
return it->second;
}
extern const char* const kEigenMatMulF16SymbolName =
"__xla_cpu_runtime_EigenMatMulF16";
extern const char* const kEigenMatMulF32SymbolName =
"__xla_cpu_runtime_EigenMatMulF32";
extern const char* const kEigenMatMulF64SymbolName =
"__xla_cpu_runtime_EigenMatMulF64";
extern const char* const kEigenMatMulC64SymbolName =
"__xla_cpu_runtime_EigenMatMulC64";
extern const char* const kEigenMatMulC128SymbolName =
"__xla_cpu_runtime_EigenMatMulC128";
extern const char* const kEigenMatMulS32SymbolName =
"__xla_cpu_runtime_EigenMatMulS32";
extern const char* const kEigenBatchMatMulF32SymbolName =
"__xla_cpu_runtime_EigenBatchMatMulF32";
extern const char* const kMKLConv2DF32SymbolName =
"__xla_cpu_runtime_MKLConv2DF32";
extern const char* const kACLConv2DF32SymbolName =
"__xla_cpu_runtime_ACLConv2DF32";
extern const char* const kACLMatMulF32SymbolName =
"__xla_cpu_runtime_ACLMatMulF32";
extern const char* const kACLBatchMatMulF32SymbolName =
"__xla_cpu_runtime_ACLBatchMatMulF32";
extern const char* const kEigenConv2DF16SymbolName =
"__xla_cpu_runtime_EigenConv2DF16";
extern const char* const kEigenConv2DF32SymbolName =
"__xla_cpu_runtime_EigenConv2DF32";
extern const char* const kEigenConv3DF16SymbolName =
"__xla_cpu_runtime_EigenConv3DF16";
extern const char* const kEigenConv3DF32SymbolName =
"__xla_cpu_runtime_EigenConv3DF32";
extern const char* const kDuccFftSymbolName = "__xla_cpu_runtime_DuccFft";
extern const char* const kDuccSingleThreadedFftSymbolName =
"__xla_cpu_runtime_DuccSingleThreadedFft";
extern const char* const kEigenSingleThreadedMatMulF16SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedMatMulF16";
extern const char* const kEigenSingleThreadedMatMulF32SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedMatMulF32";
extern const char* const kEigenSingleThreadedMatMulF64SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedMatMulF64";
extern const char* const kEigenSingleThreadedMatMulC64SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedMatMulC64";
extern const char* const kEigenSingleThreadedMatMulC128SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedMatMulC128";
extern const char* const kEigenSingleThreadedMatMulS32SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedMatMulS32";
extern const char* const kEigenSingleThreadedConv2DF16SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedConv2DF16";
extern const char* const kEigenSingleThreadedConv2DF32SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedConv2DF32";
extern const char* const kEigenSingleThreadedConv3DF16SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedConv3DF16";
extern const char* const kEigenSingleThreadedConv3DF32SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedConv3DF32";
extern const char* const kAcquireInfeedBufferForDequeueSymbolName =
"__xla_cpu_runtime_AcquireInfeedBufferForDequeue";
extern const char* const kReleaseInfeedBufferAfterDequeueSymbolName =
"__xla_cpu_runtime_ReleaseInfeedBufferAfterDequeue";
extern const char* const kAcquireOutfeedBufferForPopulationSymbolName =
"__xla_cpu_runtime_AcquireOutfeedBufferForPopulation";
extern const char* const kReleaseOutfeedBufferAfterPopulationSymbolName =
"__xla_cpu_runtime_ReleaseOutfeedBufferAfterPopulation";
extern const char* const kParallelForkJoinSymbolName =
"__xla_cpu_runtime_ParallelForkJoin";
extern const char* const kPrintfToStderrSymbolName =
"__xla_cpu_runtime_PrintfToStderr";
extern const char* const kStatusIsSuccessSymbolName =
"__xla_cpu_runtime_StatusIsSuccess";
extern const char* const kKeyValueSortSymbolName =
"__xla_cpu_runtime_KeyValueSort";
extern const char* const kTopKF32SymbolName = "__xla_cpu_runtime_TopKF32";
extern const char* const kTracingStartSymbolName =
"__xla_cpu_runtime_TracingStart";
extern const char* const kTracingEndSymbolName = "__xla_cpu_runtime_TracingEnd";
extern const char* const kXlaCpuRuntimeSymbolNamePrefix = "__xla_cpu_runtime_";
extern const char* const kAllReduceSymbolName = "__xla_cpu_runtime_AllReduce";
extern const char* const kAllGatherSymbolName = "__xla_cpu_runtime_AllGather";
extern const char* const kReduceScatterSymbolName =
"__xla_cpu_runtime_ReduceScatter";
extern const char* const kAllToAllSymbolName = "__xla_cpu_runtime_AllToAll";
extern const char* const kCollectivePermuteSymbolName =
"__xla_cpu_runtime_CollectivePermute";
extern const char* const kPartitionIdSymbolName =
"__xla_cpu_runtime_PartitionId";
extern const char* const kReplicaIdSymbolName = "__xla_cpu_runtime_ReplicaId";
extern const char* const kOneDnnMatMulSymbolName =
"__xla_cpu_runtime_OneDnnMatMul";
extern const char* const kOneDnnSoftmaxSymbolName =
"__xla_cpu_runtime_OneDnnSoftmax";
extern const char* const kOneDnnLayerNormSymbolName =
"__xla_cpu_runtime_OneDnnLayerNorm";
extern const char* const kOneDnnConvolutionSymbolName =
"__xla_cpu_runtime_OneDnnConvolution";
extern const char* const kOneDnnMatMulReorderSymbolName =
"__xla_cpu_runtime_OneDnnMatMulReorder";
extern const char* const kHandleFfiCallSymbolName =
"__xla_cpu_runtime_HandleFfiCall";
namespace {
absl::StatusOr<Shape> DecodeSelfDescribingShapeConstant(const void* shape_ptr,
int32_t size_bytes) {
ShapeProto shape_proto;
if (!shape_proto.ParseFromArray(shape_ptr, size_bytes)) {
return tsl::errors::Internal("Failed parsing the shape proto");
}
Shape shape(shape_proto);
auto status = ShapeUtil::ValidateShape(shape);
if (!status.ok()) {
return status;
}
return std::move(shape);
}
std::string ShapeString(const void* shape_ptr, int32_t shape_length) {
absl::StatusOr<Shape> shape =
DecodeSelfDescribingShapeConstant(shape_ptr, shape_length);
if (shape.ok()) {
return ShapeUtil::HumanStringWithLayout(shape.value());
}
return "<invalid shape>";
}
int GetDeviceOrdinal(const ExecutableRunOptions* run_options) {
if (!run_options) {
return 0;
} else if (run_options->device_ordinal() != -1) {
return run_options->device_ordinal();
}
return run_options->stream()->parent()->device_ordinal();
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void* AcquireInfeedBufferForDequeueImpl(const ExecutableRunOptions* run_options,
int32_t buffer_length,
const void* shape,
int32_t shape_length) {
int device_ordinal = GetDeviceOrdinal(run_options);
VLOG(2) << "AcquireInfeedBufferForDequeue: "
<< ShapeString(shape, shape_length) << " on stream executor "
<< device_ordinal;
XfeedManager* xfeed = GetXfeedManager(device_ordinal);
XfeedBuffer* buffer = xfeed->infeed()->BlockingDequeueBuffer();
CHECK_EQ(buffer->length(), buffer_length)
<< "XLA program infeed request buffer size " << buffer_length
<< " did not match the runtime's infed buffer length " << buffer->length()
<< "; program reports desired shape: "
<< ShapeString(shape, shape_length);
return buffer->data();
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void ReleaseInfeedBufferAfterDequeueImpl(
const ExecutableRunOptions* run_options, int32_t buffer_length,
void* buffer_ptr, const void* shape_ptr, int32_t shape_length) {
int device_ordinal = GetDeviceOrdinal(run_options);
VLOG(2) << "ReleaseInfeedBufferAfterDeque: "
<< ShapeString(shape_ptr, shape_length) << " on stream executor "
<< device_ordinal;
XfeedManager* xfeed = GetXfeedManager(device_ordinal);
absl::StatusOr<Shape> shape =
DecodeSelfDescribingShapeConstant(shape_ptr, shape_length);
xfeed->infeed()->ReleaseCurrentBuffer(buffer_length, buffer_ptr,
std::move(shape));
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void* AcquireOutfeedBufferForPopulationImpl(
const ExecutableRunOptions* run_options, int32_t buffer_length,
const void* shape_ptr, int32_t shape_length) {
int device_ordinal = GetDeviceOrdinal(run_options);
VLOG(2) << "AcquireOutfeedBufferForPopulation: "
<< ShapeString(shape_ptr, shape_length) << " on stream executor "
<< device_ordinal;
XfeedManager* xfeed = GetXfeedManager(device_ordinal);
XfeedBuffer* buffer = xfeed->outfeed()->BlockingDequeueBuffer();
CHECK_EQ(buffer->length(), buffer_length)
<< "XLA program outfeed request buffer size " << buffer_length
<< " did not match the runtime's outfeed buffer length "
<< buffer->length() << "; program reports outfed shape: "
<< ShapeString(shape_ptr, shape_length);
return buffer->data();
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void ReleaseOutfeedBufferAfterPopulationImpl(
const ExecutableRunOptions* run_options, int32_t buffer_length,
void* buffer_ptr, const void* shape_ptr, int32_t shape_length) {
int device_ordinal = GetDeviceOrdinal(run_options);
VLOG(2) << "ReleaseOutfeedBufferAfterPopulation: "
<< ShapeString(shape_ptr, shape_length) << " on stream executor "
<< device_ordinal;
XfeedManager* xfeed = GetXfeedManager(device_ordinal);
absl::StatusOr<Shape> shape =
DecodeSelfDescribingShapeConstant(shape_ptr, shape_length);
xfeed->outfeed()->ReleaseCurrentBuffer(buffer_length, buffer_ptr,
std::move(shape));
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void ReplicaIdImpl(const ExecutableRunOptions* run_options,
void* output_buffer) {
int device_ordinal = GetDeviceOrdinal(run_options);
int32_t replica_id = run_options->device_assignment()
->ReplicaIdForDevice(GlobalDeviceId(device_ordinal))
.value();
std::memcpy(output_buffer, &replica_id, 4);
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void PartitionIdImpl(const ExecutableRunOptions* run_options,
void* output_buffer) {
int device_ordinal = GetDeviceOrdinal(run_options);
const DeviceAssignment::LogicalID logical_id =
run_options->device_assignment()
->LogicalIdForDevice(GlobalDeviceId(device_ordinal))
.value();
std::memcpy(output_buffer, &logical_id.computation_id, 4);
}
RendezvousKey GetRendezvousKey(const ExecutableRunOptions* run_options,
GlobalDeviceId device,
std::vector<ReplicaGroup> group,
int32_t channel_id_present,
std::optional<bool> use_global_device_ids,
int64_t op_id) {
const DeviceAssignment& device_assignment = *run_options->device_assignment();
RendezvousKey::CollectiveOpKind op_kind = channel_id_present
? RendezvousKey::kCrossModule
: RendezvousKey::kCrossReplica;
std::vector<GlobalDeviceId> participating_devices =
GetParticipatingDevices(GlobalDeviceId(device), device_assignment, group,
GetCollectiveOpGroupMode(channel_id_present != 0,
use_global_device_ids)
.value())
.value();
int num_local_participants = participating_devices.size();
return RendezvousKey{run_options->run_id(), std::move(participating_devices),
num_local_participants, op_kind, op_id};
}
CollectivesInterface* GetInProcessCollectivesImpl() {
static InProcessCollectives* c = new InProcessCollectives();
return c;
}
CollectivesInterface* GetCollectivesImpl(
const ExecutableRunOptions* run_options) {
if (run_options->cpu_executable_run_options() &&
run_options->cpu_executable_run_options()->collectives()) {
return run_options->cpu_executable_run_options()->collectives();
}
return GetInProcessCollectivesImpl();
}
absl::Duration DefaultCollectiveTimeout() { return absl::Minutes(30); }
absl::StatusOr<int> RankInGlobalDevices(
absl::Span<GlobalDeviceId const> devices, GlobalDeviceId device) {
auto it = absl::c_find(devices, device);
if (it == devices.end()) {
return InvalidArgument(
"Device %d not present in global devices %s.", device.value(),
absl::StrJoin(devices, ", ", [](std::string* out, GlobalDeviceId id) {
absl::StrAppend(out, id.value());
}));
}
return std::distance(devices.begin(), it);
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void AllToAllImpl(const ExecutableRunOptions* run_options,
int32_t channel_id_present, int64_t op_id,
const void* replica_groups_str,
int32_t replica_groups_str_size, int32_t num_buffers,
int64_t buffer_size, void** source_buffers,
void** destination_buffers) {
GlobalDeviceId device(GetDeviceOrdinal(run_options));
std::string_view replica_groups_serialized(
static_cast<const char*>(replica_groups_str), replica_groups_str_size);
std::vector<ReplicaGroup> group =
ParseReplicaGroupsOnly(replica_groups_serialized).value();
RendezvousKey rendezvous_key =
GetRendezvousKey(run_options, device, group, channel_id_present,
std::nullopt, op_id);
int rank = RankInGlobalDevices(rendezvous_key.global_devices, device).value();
CollectivesInterface* collectives = GetCollectivesImpl(run_options);
ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(source_buffers,
sizeof(void*) * num_buffers);
ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(destination_buffers,
sizeof(void*) * num_buffers);
auto communicator =
collectives->GetCommunicator(rendezvous_key.global_devices, rank).value();
TF_CHECK_OK(communicator->AllToAll(
rendezvous_key, buffer_size,
absl::Span<const void* const>(source_buffers, num_buffers),
absl::Span<void* const>(destination_buffers, num_buffers),
DefaultCollectiveTimeout()));
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void AllGatherImpl(const ExecutableRunOptions* run_options,
int32_t channel_id_present, int32_t use_global_device_ids,
int64_t op_id, const void* replica_groups_str,
int32_t replica_groups_str_size, int64_t buffer_size,
void* source_buffer, void* destination_buffer) {
GlobalDeviceId device(GetDeviceOrdinal(run_options));
std::string_view replica_groups_serialized(
static_cast<const char*>(replica_groups_str), replica_groups_str_size);
std::vector<ReplicaGroup> group =
ParseReplicaGroupsOnly(replica_groups_serialized).value();
RendezvousKey rendezvous_key =
GetRendezvousKey(run_options, device, group, channel_id_present,
use_global_device_ids, op_id);
int rank = RankInGlobalDevices(rendezvous_key.global_devices, device).value();
CollectivesInterface* collectives = GetCollectivesImpl(run_options);
auto communicator =
collectives->GetCommunicator(rendezvous_key.global_devices, rank).value();
TF_CHECK_OK(communicator->AllGather(rendezvous_key, buffer_size,
source_buffer, destination_buffer,
DefaultCollectiveTimeout()));
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void ReduceScatterImpl(const ExecutableRunOptions* run_options,
const void* replica_groups_str,
int32_t replica_groups_str_size,
int32_t channel_id_present,
int32_t use_global_device_ids, int64_t op_id,
int32_t reduction_kind, int32_t element_type,
int64_t chunk_elems, void* input_buffer,
void* output_buffer) {
GlobalDeviceId device(GetDeviceOrdinal(run_options));
std::string_view replica_groups_serialized(
static_cast<const char*>(replica_groups_str), replica_groups_str_size);
std::vector<ReplicaGroup> group =
ParseReplicaGroupsOnly(replica_groups_serialized).value();
RendezvousKey rendezvous_key =
GetRendezvousKey(run_options, device, group, channel_id_present,
use_global_device_ids, op_id);
int rank = RankInGlobalDevices(rendezvous_key.global_devices, device).value();
CollectivesInterface* collectives = GetCollectivesImpl(run_options);
auto communicator =
collectives->GetCommunicator(rendezvous_key.global_devices, rank).value();
TF_CHECK_OK(communicator->ReduceScatter(
rendezvous_key, static_cast<ReductionKind>(reduction_kind),
static_cast<PrimitiveType>(element_type), chunk_elems, input_buffer,
output_buffer, DefaultCollectiveTimeout()));
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void AllReduceImpl(const ExecutableRunOptions* run_options,
const void* replica_groups_str,
int32_t replica_groups_str_size, int32_t channel_id_present,
int32_t use_global_device_ids, int64_t op_id,
int32_t reduction_kind, const void* shape_ptr,
int32_t shape_length, int32_t num_buffers,
void** input_buffers, void** output_buffers) {
GlobalDeviceId device(GetDeviceOrdinal(run_options));
std::string_view replica_groups_serialized(
static_cast<const char*>(replica_groups_str), replica_groups_str_size);
std::vector<ReplicaGroup> group =
ParseReplicaGroupsOnly(replica_groups_serialized).value();
RendezvousKey rendezvous_key =
GetRendezvousKey(run_options, device, group, channel_id_present,
use_global_device_ids, op_id);
auto shape_str = ShapeString(shape_ptr, shape_length);
VLOG(2) << "All-reduce input/output shape : " << shape_str;
Shape shape =
DecodeSelfDescribingShapeConstant(shape_ptr, shape_length).value();
CHECK((num_buffers > 1 && shape.IsTuple()) ||
(num_buffers == 1 && LayoutUtil::IsDenseArray(shape)));
int rank = RankInGlobalDevices(rendezvous_key.global_devices, device).value();
CollectivesInterface* collectives = GetCollectivesImpl(run_options);
auto communicator =
collectives->GetCommunicator(rendezvous_key.global_devices, rank).value();
for (int i = 0; i < num_buffers; i++) {
Shape subshape = num_buffers == 1 ? shape : shape.tuple_shapes(i);
TF_CHECK_OK(communicator->AllReduce(
rendezvous_key, static_cast<ReductionKind>(reduction_kind),
subshape.element_type(), ShapeUtil::ElementsIn(subshape),
input_buffers[i], output_buffers[i], DefaultCollectiveTimeout()));
}
} | #define EIGEN_USE_THREADS
#include "xla/service/cpu/cpu_runtime.h"
#include <memory>
#include <string>
#include <tuple>
#include "absl/strings/str_format.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "xla/array2d.h"
#include "xla/client/local_client.h"
#include "xla/service/cpu/runtime_custom_call_status.h"
#include "xla/service/cpu/runtime_matmul.h"
#include "xla/service/cpu/runtime_matmul_acl.h"
#include "xla/service/cpu/runtime_single_threaded_matmul.h"
#include "xla/service/custom_call_status_internal.h"
#include "xla/types.h"
#include "tsl/platform/env.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class CpuRuntimeTest : public ::testing::Test {};
template <typename T>
std::unique_ptr<Array2D<float>> MaybeTransposeArray2D(const Array2D<T>& array,
bool transpose) {
int64_t output_height = array.height();
int64_t output_width = array.width();
if (transpose) {
std::swap(output_width, output_height);
}
auto output = std::make_unique<Array2D<float>>(output_height, output_width);
for (int y = 0; y < array.height(); y++) {
for (int x = 0; x < array.width(); x++) {
if (transpose) {
(*output)(x, y) = array(y, x);
} else {
(*output)(y, x) = array(y, x);
}
}
}
return output;
}
void CheckMatrixMultiply(const Array2D<float>& a, const Array2D<float>& b,
const Array2D<float>& c) {
for (int i = 0; i < a.height(); ++i) {
for (int j = 0; j < b.width(); ++j) {
float sum = 0.0;
for (int k = 0; k < a.width(); ++k) {
sum += a(i, k) * b(k, j);
}
EXPECT_NEAR(sum, c(i, j), 0.01);
}
}
}
std::unique_ptr<Array2D<float>> EigenMatrixMultiply(const Array2D<float>& a,
const Array2D<float>& b,
bool transpose_lhs,
bool transpose_rhs,
bool single_threaded) {
CHECK_EQ(a.width(), b.height());
int64_t m = a.height();
int64_t n = b.width();
int64_t k = a.width();
auto a_transpose = MaybeTransposeArray2D(a, !transpose_lhs);
auto b_transpose = MaybeTransposeArray2D(b, !transpose_rhs);
auto c_transpose = std::make_unique<Array2D<float>>(n, m);
if (single_threaded) {
__xla_cpu_runtime_EigenSingleThreadedMatMulF32(
nullptr, c_transpose->data(), a_transpose->data(), b_transpose->data(),
m, n, k, transpose_lhs, transpose_rhs);
} else {
tsl::thread::ThreadPool pool(tsl::Env::Default(), "XLAEigen", 2);
Eigen::ThreadPoolDevice device(pool.AsEigenThreadPool(), pool.NumThreads());
ExecutableRunOptions run_options;
run_options.set_intra_op_thread_pool(&device);
__xla_cpu_runtime_EigenMatMulF32(&run_options, c_transpose->data(),
a_transpose->data(), b_transpose->data(),
m, n, k, transpose_lhs, transpose_rhs);
}
return MaybeTransposeArray2D(*c_transpose, true);
}
struct MatMulShape {
int64_t m;
int64_t k;
int64_t n;
};
MatMulShape MatMulShapes[] = {
MatMulShape{2, 2, 3}, MatMulShape{256, 512, 1024},
MatMulShape{128, 128, 1}, MatMulShape{1, 128, 128},
MatMulShape{1, 32, 128}, MatMulShape{1, 32, 16},
MatMulShape{32, 16, 1}, MatMulShape{32, 128, 1},
};
using MatMulTestParam = std::tuple<MatMulShape, bool, bool, bool>;
class EigenMatMulTest : public CpuRuntimeTest,
public ::testing::WithParamInterface<MatMulTestParam> {
public:
static std::string Name(
const ::testing::TestParamInfo<MatMulTestParam>& info) {
MatMulShape shape = std::get<0>(info.param);
bool transpose_lhs = std::get<1>(info.param);
bool transpose_rhs = std::get<2>(info.param);
bool single_threaded = std::get<3>(info.param);
return absl::StrFormat("EigenMatMul_%d_%d_%d_%s%s%s_threaded", shape.m,
shape.k, shape.n, transpose_lhs ? "Tlhs_" : "",
transpose_rhs ? "Trhs_" : "",
single_threaded ? "single" : "multi");
}
};
TEST_P(EigenMatMulTest, DoIt) {
MatMulShape shape = std::get<0>(GetParam());
bool transpose_lhs = std::get<1>(GetParam());
bool transpose_rhs = std::get<2>(GetParam());
bool single_threaded = std::get<3>(GetParam());
auto a = MakeLinspaceArray2D(0.0, 1.0, shape.m, shape.k);
auto b = MakeLinspaceArray2D(-2.0, 2.0, shape.k, shape.n);
auto c = EigenMatrixMultiply(*a, *b, transpose_lhs, transpose_rhs,
single_threaded);
CheckMatrixMultiply(*a, *b, *c);
}
INSTANTIATE_TEST_SUITE_P(EigenMatMulTestInstantiaion, EigenMatMulTest,
::testing::Combine(::testing::ValuesIn(MatMulShapes),
::testing::Bool(),
::testing::Bool(),
::testing::Bool()),
EigenMatMulTest::Name);
TEST_F(CpuRuntimeTest, SuccessStatus) {
XlaCustomCallStatus success_status;
ASSERT_TRUE(__xla_cpu_runtime_StatusIsSuccess(&success_status));
}
TEST_F(CpuRuntimeTest, FailureStatus) {
XlaCustomCallStatus success_status;
XlaCustomCallStatusSetFailure(&success_status, "Failed", 6);
ASSERT_FALSE(__xla_cpu_runtime_StatusIsSuccess(&success_status));
}
}
} |
2,010 | cpp | tensorflow/tensorflow | cpu_instruction_fusion | third_party/xla/xla/service/cpu/cpu_instruction_fusion.cc | third_party/xla/xla/service/cpu/cpu_instruction_fusion_test.cc | #ifndef XLA_SERVICE_CPU_CPU_INSTRUCTION_FUSION_H_
#define XLA_SERVICE_CPU_CPU_INSTRUCTION_FUSION_H_
#include "absl/container/flat_hash_map.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/fusion_node_indexing_evaluation.h"
#include "xla/service/instruction_fusion.h"
namespace xla {
namespace cpu {
class CpuInstructionFusion : public InstructionFusion {
public:
CpuInstructionFusion()
: InstructionFusion(CpuInstructionFusion::IsExpensive) {}
~CpuInstructionFusion() override = default;
using HloPassInterface::Run;
absl::StatusOr<bool> Run(HloModule* module,
const absl::flat_hash_set<absl::string_view>&
execution_threads) override {
fusion_node_evaluations_.clear();
return InstructionFusion::Run(module, execution_threads);
}
protected:
FusionDecision ShouldFuse(HloInstruction* consumer,
int64_t operand_index) override;
HloInstruction::FusionKind ChooseKind(
const HloInstruction* producer, const HloInstruction* consumer) override;
private:
HloInstruction* FuseInstruction(HloInstruction* fusion_instruction,
HloInstruction* producer) override;
absl::flat_hash_map<const HloInstruction*, FusionNodeIndexingEvaluation>
fusion_node_evaluations_;
};
}
}
#endif
#include "xla/service/cpu/cpu_instruction_fusion.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/fusion_node_indexing_evaluation.h"
#include "xla/service/instruction_fusion.h"
#include "xla/service/llvm_ir/fused_ir_emitter.h"
namespace xla {
namespace cpu {
namespace {
bool CanBeLoopFused(const HloInstruction& hlo) {
return hlo.IsElementwise() ||
hlo.opcode() == HloOpcode::kBitcast ||
hlo.opcode() == HloOpcode::kBroadcast ||
hlo.opcode() == HloOpcode::kConcatenate ||
hlo.opcode() == HloOpcode::kDynamicSlice ||
hlo.opcode() == HloOpcode::kDynamicUpdateSlice ||
hlo.opcode() == HloOpcode::kGather ||
hlo.opcode() == HloOpcode::kIota || hlo.opcode() == HloOpcode::kPad ||
hlo.opcode() == HloOpcode::kReduce ||
hlo.opcode() == HloOpcode::kReshape ||
hlo.opcode() == HloOpcode::kReverse ||
hlo.opcode() == HloOpcode::kSlice ||
hlo.opcode() == HloOpcode::kTranspose;
}
bool IsNonComplexNonBatchedMatrixVectorDot(const HloInstruction* hlo) {
const Shape& hlo_shape = hlo->shape();
return !ShapeUtil::ElementIsComplex(hlo_shape) &&
hlo->opcode() == HloOpcode::kDot && hlo_shape.dimensions_size() <= 1 &&
hlo->dot_dimension_numbers().lhs_batch_dimensions_size() == 0;
}
bool HasExactlyOneUse(const HloInstruction& hlo_instr) {
return hlo_instr.user_count() == 1 &&
absl::c_count(hlo_instr.users().front()->operands(), &hlo_instr) == 1;
}
bool CanBeOutputFused(const HloInstruction* producer,
const HloInstruction* consumer) {
return consumer->opcode() == HloOpcode::kAdd &&
IsNonComplexNonBatchedMatrixVectorDot(producer) &&
HasExactlyOneUse(*producer) == 1;
}
bool CanBeOutputFusedIntoSomeOperand(const HloInstruction* consumer) {
return consumer->opcode() == HloOpcode::kAdd &&
(CanBeOutputFused(consumer->operand(0), consumer) ||
CanBeOutputFused(consumer->operand(1), consumer));
}
}
FusionDecision CpuInstructionFusion::ShouldFuse(HloInstruction* consumer,
int64_t operand_index) {
HloInstruction* producer = consumer->mutable_operand(operand_index);
VLOG(2) << "Considering for fusion: operand " << operand_index << " of "
<< consumer->ToString();
constexpr int kFusionThresholdBytes = 16 * 1024;
if (CanBeOutputFused(producer, consumer)) {
VLOG(2) << "Fusion OK: Can create output fusion.";
return {};
}
if (CanBeOutputFusedIntoSomeOperand(producer)) {
return "Bailing because producer can be output-fused into some operand.";
}
if (!CanBeLoopFused(*producer)) {
return "Producer is not loop-fusible.";
}
if (producer->opcode() != HloOpcode::kFusion && is_expensive(*producer) &&
ReusesOperandElements(consumer, operand_index)) {
return "Fusion is not profitable.";
}
RETURN_IF_NOT_FUSIBLE(InstructionFusion::ShouldFuse(consumer, operand_index));
if (producer->opcode() == HloOpcode::kConstant &&
consumer->opcode() != HloOpcode::kFusion) {
return "Not fusing: insufficient non-constant nodes.";
}
if (producer->opcode() == HloOpcode::kFusion) {
return "Not fusing: producer is itself a fusion node.";
}
if (consumer->opcode() == HloOpcode::kFusion) {
if (fusion_node_evaluations_.find(consumer) ==
fusion_node_evaluations_.end()) {
fusion_node_evaluations_.emplace(consumer,
FusionNodeIndexingEvaluation(consumer));
}
if (fusion_node_evaluations_.at(consumer).CodeDuplicationTooHigh(
producer)) {
return "Code duplication too high";
}
}
if (consumer->opcode() == HloOpcode::kDot) {
const Shape& output_shape = consumer->shape();
if (output_shape.dimensions_size() <= 1) {
if (consumer->operand(0)->shape().rank() == 1 && operand_index == 1 &&
ShapeUtil::ByteSizeOfElements(consumer->operand(0)->shape()) <
kFusionThresholdBytes) {
VLOG(2) << "Fusing small matrix-vector product.";
return {};
} else if (consumer->operand(1)->shape().rank() == 1 &&
operand_index == 0 &&
ShapeUtil::ByteSizeOfElements(consumer->operand(1)->shape()) <
kFusionThresholdBytes) {
VLOG(2) << "Fusing small matrix-vector product.";
return {};
}
}
}
if (consumer->opcode() == HloOpcode::kReduce &&
!absl::c_linear_search(
consumer->dimensions(),
LayoutUtil::Minor(consumer->operand(0)->shape().layout(), 0))) {
return "Not fusing reductions over major dimensions";
}
if (producer->opcode() == HloOpcode::kReduce &&
!absl::c_linear_search(
producer->dimensions(),
LayoutUtil::Minor(producer->operand(0)->shape().layout(), 0))) {
return "Not fusing reductions over major dimensions";
}
if (consumer->IsLoopFusion()) {
VLOG(2) << "Fusing: consumer is a fusion node.";
return {};
}
if (CanBeLoopFused(*consumer)) {
VLOG(2) << "Fusing: consumer is elementwise or fusible.";
return {};
}
return "Not fusing: not found a fusible case";
}
HloInstruction::FusionKind CpuInstructionFusion::ChooseKind(
const HloInstruction* producer, const HloInstruction* consumer) {
return CanBeOutputFused(producer, consumer)
? HloInstruction::FusionKind::kOutput
: HloInstruction::FusionKind::kLoop;
}
HloInstruction* CpuInstructionFusion::FuseInstruction(
HloInstruction* fusion_instruction, HloInstruction* producer) {
auto evaluation = fusion_node_evaluations_.find(fusion_instruction);
if (evaluation == fusion_node_evaluations_.end()) {
evaluation = fusion_node_evaluations_
.emplace(fusion_instruction,
FusionNodeIndexingEvaluation(fusion_instruction))
.first;
}
auto indexing_users = evaluation->second.RemoveFusionOperand(producer);
HloInstruction* new_producer =
InstructionFusion::FuseInstruction(fusion_instruction, producer);
evaluation->second.UpdateEvaluationCache(new_producer, indexing_users);
return new_producer;
}
}
} | #include "xla/service/cpu/cpu_instruction_fusion.h"
#include <algorithm>
#include <memory>
#include <set>
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/transpose_folding.h"
#include "xla/shape.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_utils.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace cpu {
namespace {
using InstructionFusionTest = HloTestBase;
std::unique_ptr<HloInstruction> MakeDot(const Shape& shape, HloInstruction* lhs,
HloInstruction* rhs) {
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(lhs->shape().rank() - 1);
dot_dnums.add_rhs_contracting_dimensions(0);
PrecisionConfig precision_config;
precision_config.mutable_operand_precision()->Resize(
2, PrecisionConfig::DEFAULT);
return HloInstruction::CreateDot(shape, lhs, rhs, dot_dnums,
precision_config);
}
TEST_F(InstructionFusionTest, DotOperationFusion_Basic_0) {
HloComputation::Builder builder(TestName());
HloInstruction* arg0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {1024, 256}), "arg0"));
HloInstruction* arg1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {256}), "arg1"));
HloInstruction* exp0 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {1024, 256}), HloOpcode::kExp, arg0));
HloInstruction* dot = builder.AddInstruction(
MakeDot(ShapeUtil::MakeShape(F32, {1024}), exp0, arg1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(dot, computation->root_instruction());
EXPECT_TRUE(CpuInstructionFusion().Run(module.get()).value());
EXPECT_THAT(computation->root_instruction(), op::Fusion());
}
TEST_F(InstructionFusionTest, DotOperationFusion_Basic_1) {
HloComputation::Builder builder(TestName());
HloInstruction* arg0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {256}), "arg0"));
HloInstruction* arg1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {256, 1024}), "arg1"));
HloInstruction* exp1 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {256, 1024}), HloOpcode::kExp, arg1));
HloInstruction* dot = builder.AddInstruction(
MakeDot(ShapeUtil::MakeShape(F32, {1024}), arg0, exp1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(dot, computation->root_instruction());
EXPECT_TRUE(CpuInstructionFusion().Run(module.get()).value());
EXPECT_THAT(computation->root_instruction(), op::Fusion());
}
TEST_F(InstructionFusionTest, DotOperationFusion_Bitcast) {
HloComputation::Builder builder(TestName());
HloInstruction* arg0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {2, 512, 2, 128}), "arg0"));
HloInstruction* arg1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {256}), "arg1"));
HloInstruction* exp0 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {2, 512, 2, 128}), HloOpcode::kExp, arg0));
HloInstruction* bitcast0 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {1024, 256}), HloOpcode::kBitcast, exp0));
HloInstruction* dot = builder.AddInstruction(
MakeDot(ShapeUtil::MakeShape(F32, {1024}), bitcast0, arg1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(dot, computation->root_instruction());
EXPECT_TRUE(CpuInstructionFusion().Run(module.get()).value());
EXPECT_THAT(computation->root_instruction(), op::Fusion());
}
TEST_F(InstructionFusionTest, DotOperationFusion_Reshape) {
HloComputation::Builder builder(TestName());
HloInstruction* arg0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {2, 512, 2, 128}), "arg0"));
HloInstruction* arg1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {256}), "arg1"));
HloInstruction* exp0 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {2, 512, 2, 128}), HloOpcode::kExp, arg0));
HloInstruction* reshape0 =
builder.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {1024, 256}), exp0));
HloInstruction* dot = builder.AddInstruction(
MakeDot(ShapeUtil::MakeShape(F32, {1024}), reshape0, arg1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(dot, computation->root_instruction());
EXPECT_TRUE(CpuInstructionFusion().Run(module.get()).value());
EXPECT_THAT(computation->root_instruction(), op::Fusion());
}
TEST_F(InstructionFusionTest, DotOperationFusion_TooLarge) {
HloComputation::Builder builder(TestName());
HloInstruction* arg0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {32 * 1024}), "arg0"));
HloInstruction* arg1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {32 * 1024, 256}), "arg1"));
HloInstruction* exp1 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {32 * 1024, 256}), HloOpcode::kExp, arg1));
HloInstruction* dot = builder.AddInstruction(
MakeDot(ShapeUtil::MakeShape(F32, {256}), arg0, exp1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(dot, computation->root_instruction());
EXPECT_FALSE(CpuInstructionFusion().Run(module.get()).value());
EXPECT_EQ(dot, computation->root_instruction());
}
TEST_F(InstructionFusionTest, DotOperationFusion_ElementReuse) {
HloComputation::Builder builder(TestName());
HloInstruction* arg0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {2, 256}), "arg0"));
HloInstruction* arg1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {256, 1024}), "arg1"));
HloInstruction* exp1 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {256, 1024}), HloOpcode::kExp, arg1));
HloInstruction* dot = builder.AddInstruction(
MakeDot(ShapeUtil::MakeShape(F32, {2, 1024}), arg0, exp1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(dot, computation->root_instruction());
EXPECT_FALSE(CpuInstructionFusion().Run(module.get()).value());
EXPECT_EQ(dot, computation->root_instruction());
}
TEST_F(InstructionFusionTest, DotOperationFusion_TransposeFusion_RHS) {
std::string hlo_string = R"(
HloModule DotOperationFusion_TransposeFusion
ENTRY DotOperationFusion_TransposeFusion {
arg0 = f32[1,256] parameter(0)
arg1 = f32[1024,256] parameter(1)
exponential = f32[1024,256] exponential(arg1)
transpose = f32[256,1024] transpose(exponential), dimensions={1,0}
ROOT dot = f32[1,1024] dot(arg0, transpose), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloComputation* computation = module->entry_computation();
TF_ASSERT_OK_AND_ASSIGN(bool changed, TransposeFolding().Run(module.get()));
ASSERT_TRUE(changed);
ASSERT_THAT(computation->root_instruction(),
op::Dot(op::Parameter(0), op::Exp(op::Parameter(1)),
1, 1));
}
TEST_F(InstructionFusionTest, DotOperationFusion_TransposeFusion_LHS) {
std::string hlo_string = R"(
HloModule DotOperationFusion_TransposeFusion
ENTRY DotOperationFusion_TransposeFusion {
arg0 = f32[256,1] parameter(0)
arg1 = f32[256,1024] parameter(1)
transpose = f32[1,256] transpose(arg0), dimensions={1,0}
exponential = f32[256,1024] exponential(arg1)
ROOT dot = f32[1,1024] dot(transpose, exponential), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloComputation* computation = module->entry_computation();
TF_ASSERT_OK_AND_ASSIGN(bool changed, TransposeFolding().Run(module.get()));
ASSERT_TRUE(changed);
ASSERT_THAT(computation->root_instruction(),
op::Dot(op::Parameter(0), op::Exp(op::Parameter(1)),
0, 0));
}
TEST_F(InstructionFusionTest,
DotOperationFusion_TransposeFusion_LHS_NonDefault) {
std::string hlo_string = R"(
HloModule DotOperationFusion_TransposeFusion
ENTRY DotOperationFusion_TransposeFusion {
arg0 = f32[1,256] parameter(0)
arg1 = f32[256,1024] parameter(1)
transpose = f32[256,1] transpose(arg0), dimensions={1,0}
exponential = f32[256,1024] exponential(arg1)
ROOT dot = f32[1,1024] dot(transpose, exponential), lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloComputation* computation = module->entry_computation();
TF_ASSERT_OK_AND_ASSIGN(bool changed, TransposeFolding().Run(module.get()));
ASSERT_TRUE(changed);
ASSERT_THAT(computation->root_instruction(),
op::Dot(op::Parameter(0), op::Exp(op::Parameter(1)),
1, 0));
}
class OpcodeFusionTest : public InstructionFusionTest {
protected:
void RunFusionAndCheckOpcodesWereFused(
HloModule* module, const std::multiset<HloOpcode>& expected_opcodes,
HloInstruction::FusionKind fusion_kind =
HloInstruction::FusionKind::kLoop) {
auto computation = module->entry_computation();
auto did_fusion = CpuInstructionFusion().Run(module);
ASSERT_TRUE(did_fusion.ok());
EXPECT_TRUE(did_fusion.value());
HloInstruction* root = computation->root_instruction();
ASSERT_THAT(root, op::Fusion());
EXPECT_EQ(root->fusion_kind(), fusion_kind);
std::vector<HloOpcode> fused_opcodes(root->fused_instruction_count());
std::transform(root->fused_instructions().begin(),
root->fused_instructions().end(), fused_opcodes.begin(),
[](const HloInstruction* hlo) { return hlo->opcode(); });
EXPECT_EQ(
std::multiset<HloOpcode>(fused_opcodes.begin(), fused_opcodes.end()),
expected_opcodes);
}
HloComputation* CreateAdderToOne(HloModule* module) {
HloComputation::Builder builder(TestName());
HloInstruction* arg0 =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "arg0"));
HloInstruction* one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kAdd, arg0, one));
return module->AddEmbeddedComputation(builder.Build());
}
HloComputation* CreateMax(HloModule* module) {
HloComputation::Builder builder(TestName());
HloInstruction* arg0 =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "arg0"));
HloInstruction* arg1 =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "arg1"));
builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kMaximum, arg0, arg1));
return module->AddEmbeddedComputation(builder.Build());
}
};
TEST_F(OpcodeFusionTest, Exponential_Reshape_Negate) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {1, 4});
Shape result_shape = ShapeUtil::MakeShape(F32, {4});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(param_shape, HloOpcode::kExp, param0));
HloInstruction* reshape2 =
builder.AddInstruction(HloInstruction::CreateReshape(result_shape, exp1));
builder.AddInstruction(
HloInstruction::CreateUnary(result_shape, HloOpcode::kNegate, reshape2));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(), {HloOpcode::kNegate, HloOpcode::kReshape, HloOpcode::kExp,
HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, Broadcast_Reshape_DynamicSlice_Tanh) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {8});
Shape starts_shape = ShapeUtil::MakeShape(S32, {});
Shape broadcast_shape = ShapeUtil::MakeShape(F32, {1, 8, 8});
Shape reshape_shape = ShapeUtil::MakeShape(F32, {8, 8});
Shape dynamic_slice_shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, starts_shape, "starts"));
HloInstruction* param2 = builder.AddInstruction(
HloInstruction::CreateParameter(2, starts_shape, "starts"));
HloInstruction* broadcast2 = builder.AddInstruction(
HloInstruction::CreateBroadcast(broadcast_shape, param0, {1}));
HloInstruction* reshape3 = builder.AddInstruction(
HloInstruction::CreateReshape(reshape_shape, broadcast2));
HloInstruction* dynamic_slice4 =
builder.AddInstruction(HloInstruction::CreateDynamicSlice(
dynamic_slice_shape, reshape3, {param1, param2}, {4, 4}));
builder.AddInstruction(HloInstruction::CreateUnary(
dynamic_slice_shape, HloOpcode::kTanh, dynamic_slice4));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kTanh, HloOpcode::kDynamicSlice, HloOpcode::kReshape,
HloOpcode::kBroadcast, HloOpcode::kParameter, HloOpcode::kParameter,
HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, Broadcast_Negate) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {8});
Shape result_shape = ShapeUtil::MakeShape(F32, {8, 8});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* broadcast1 = builder.AddInstruction(
HloInstruction::CreateBroadcast(result_shape, param0, {1}));
builder.AddInstruction(HloInstruction::CreateUnary(
result_shape, HloOpcode::kNegate, broadcast1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kNegate, HloOpcode::kBroadcast, HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, DynamicSlice_Negate) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {4});
Shape slice_shape = ShapeUtil::MakeShape(S32, {});
Shape result_shape = ShapeUtil::MakeShape(F32, {2});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, slice_shape, "starts"));
HloInstruction* dynamic_slice2 = builder.AddInstruction(
HloInstruction::CreateDynamicSlice(result_shape, param0, {param1}, {2}));
builder.AddInstruction(HloInstruction::CreateUnary(
result_shape, HloOpcode::kNegate, dynamic_slice2));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(), {HloOpcode::kNegate, HloOpcode::kDynamicSlice,
HloOpcode::kParameter, HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, Exponential_Negate) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {4});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(param_shape, HloOpcode::kExp, param0));
builder.AddInstruction(
HloInstruction::CreateUnary(param_shape, HloOpcode::kNegate, exp1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kNegate, HloOpcode::kExp, HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, Reshape_Negate) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {4, 4});
Shape result_shape = ShapeUtil::MakeShape(F32, {16});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* reshape1 = builder.AddInstruction(
HloInstruction::CreateReshape(result_shape, param0));
builder.AddInstruction(
HloInstruction::CreateUnary(result_shape, HloOpcode::kNegate, reshape1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kNegate, HloOpcode::kReshape, HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, Reverse_Negate) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {8});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* reverse1 = builder.AddInstruction(
HloInstruction::CreateReverse(param_shape, param0, {0}));
builder.AddInstruction(
HloInstruction::CreateUnary(param_shape, HloOpcode::kNegate, reverse1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kNegate, HloOpcode::kReverse, HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, Slice_Negate) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {4});
Shape slice_shape = ShapeUtil::MakeShape(F32, {2});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* slice1 = builder.AddInstruction(
HloInstruction::CreateSlice(slice_shape, param0, {0}, {4}, {2}));
builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {2}), HloOpcode::kNegate, slice1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kNegate, HloOpcode::kSlice, HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, Exponential_Transpose_Negate) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {3, 4});
Shape result_shape = ShapeUtil::MakeShape(F32, {4, 3});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(param_shape, HloOpcode::kExp, param0));
HloInstruction* transpose2 = builder.AddInstruction(
HloInstruction::CreateTranspose(result_shape, exp1, {1, 0}));
builder.AddInstruction(HloInstruction::CreateUnary(
result_shape, HloOpcode::kNegate, transpose2));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(), {HloOpcode::kNegate, HloOpcode::kTranspose, HloOpcode::kExp,
HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, UnaryMapOfExp) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {3, 4});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
HloInstruction* exp = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kExp, param0));
builder.AddInstruction(
HloInstruction::CreateMap(shape, {exp}, CreateAdderToOne(module.get())));
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(), {HloOpcode::kParameter, HloOpcode::kExp, HloOpcode::kMap});
}
TEST_F(OpcodeFusionTest, BinaryMapOfExps) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {3, 4});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "param"));
HloInstruction* exp0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kExp, param0));
HloInstruction* exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kExp, param1));
builder.AddInstruction(
HloInstruction::CreateMap(shape, {exp0, exp1}, CreateMax(module.get())));
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(), {HloOpcode::kParameter, HloOpcode::kParameter,
HloOpcode::kExp, HloOpcode::kExp, HloOpcode::kMap});
}
TEST_F(OpcodeFusionTest, DynamicSliceWithDynamicUpdateSlice) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder builder(TestName());
Shape full_shape = ShapeUtil::MakeShape(F32, {10, 100, 1000});
Shape slice_shape = ShapeUtil::MakeShape(F32, {10, 1, 1000});
std::vector<HloInstruction*> slice_indices, update_indices;
for (int i = 0; i < 3; ++i) {
slice_indices.push_back(
builder.AddInstruction(HloInstruction::CreateParameter(
1 + i, ShapeUtil::MakeShape(U32, {}), "slice_indices")));
update_indices.push_back(
builder.AddInstruction(HloInstruction::CreateParameter(
5 + i, ShapeUtil::MakeShape(U32, {}), "update_indices")));
}
HloInstruction* slice =
builder.AddInstruction(HloInstruction::CreateDynamicSlice(
slice_shape,
builder.AddInstruction(
HloInstruction::CreateParameter(0, full_shape, "slice_from")),
slice_indices,
{10, 1, 1000}));
builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
full_shape,
builder.AddInstruction(
HloInstruction::CreateParameter(4, full_shape, "to_update")),
slice, update_indices));
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kDynamicSlice, HloOpcode::kDynamicUpdateSlice,
HloOpcode::kParameter, HloOpcode::kParameter, HloOpcode::kParameter,
HloOpcode::kParameter, HloOpcode::kParameter, HloOpcode::kParameter,
HloOpcode::kParameter, HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, MessOfFusibleNodes) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder builder(TestName());
Shape full_shape = ShapeUtil::MakeShape(F32, {4, 100, 10, 100, 50});
auto loop_idx = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(S32, {}), "param0"));
auto param1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(S32, {}), "param1"));
auto idx_choice = builder.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(S32, {}),
builder.AddInstruction(HloInstruction::CreateDynamicSlice(
ShapeUtil::MakeShape(S32, {1}),
builder.AddInstruction(HloInstruction::CreateParameter(
2, ShapeUtil::MakeShape(S32, {4}), "param2")),
{loop_idx},
{1}))));
auto zero = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0(0)));
auto slice = builder.AddInstruction(HloInstruction::CreateDynamicSlice(
ShapeUtil::MakeShape(F32, {1, 100, 10, 100, 50}),
builder.AddInstruction(HloInstruction::CreateParameter(
3, ShapeUtil::MakeShape(F32, {100, 100, 10, 100, 50}), "param3")),
{idx_choice, zero, zero, zero, zero},
{1, 100, 10, 100, 50}));
builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
full_shape,
builder.AddInstruction(
HloInstruction::CreateParameter(4, full_shape, "param4")),
slice, {loop_idx, param1, param1, param1, param1}));
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kDynamicSlice, HloOpcode::kDynamicSlice,
HloOpcode::kDynamicUpdateSlice, HloOpcode::kReshape,
HloOpcode::kConstant, HloOpcode::kParameter, HloOpcode::kParameter,
HloOpcode::kParameter, HloOpcode::kParameter, HloOpcode::kParameter});
}
void CreateComputationForDotAddOutputFusionTest(const std::string& test_name,
HloModule* module, int m, int k,
int n,
bool add_extra_use_for_dot) {
HloComputation::Builder builder(test_name);
Shape dot_lhs_shape = ShapeUtil::MakeShape(F32, {m, k});
Shape dot_rhs_shape = ShapeUtil::MakeShape(F32, {k, n});
Shape dot_shape = ShapeUtil::MakeShape(F32, {m, n});
if (m == 1) {
dot_lhs_shape = ShapeUtil::MakeShape(F32, {k});
dot_shape = ShapeUtil::MakeShape(F32, {n});
} else if (n == 1) {
dot_rhs_shape = ShapeUtil::MakeShape(F32, {k});
dot_shape = ShapeUtil::MakeShape(F32, {m});
}
auto* dot_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, dot_lhs_shape, "param0"));
auto* dot_rhs = builder.AddInstruction(
HloInstruction::CreateParameter(1, dot_rhs_shape, "param1"));
auto* addend = builder.AddInstruction(
HloInstruction::CreateParameter(2, dot_shape, "param2"));
auto* dot =
builder.AddInstruction(CreateCanonicalDot(dot_shape, dot_lhs, dot_rhs));
builder.AddInstruction(
HloInstruction::CreateBinary(dot_shape, HloOpcode::kAdd, dot, addend));
if (add_extra_use_for_dot) {
auto* token = builder.AddInstruction(HloInstruction::CreateToken());
builder.AddInstruction(
HloInstruction::CreateOutfeed(dot_shape, dot, token, "no_config"));
}
module->AddEntryComputation(builder.Build());
}
TEST_F(OpcodeFusionTest, DotAddOutputFusion_1x50x19) {
auto module = CreateNewVerifiedModule();
CreateComputationForDotAddOutputFusionTest(TestName(), module.get(), 1,
50, 19,
false);
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kDot, HloOpcode::kAdd, HloOpcode::kParameter,
HloOpcode::kParameter, HloOpcode::kParameter},
HloInstruction::FusionKind::kOutput);
}
TEST_F(OpcodeFusionTest, DotAddOutputFusion_19x50x1) {
auto module = CreateNewVerifiedModule();
CreateComputationForDotAddOutputFusionTest(TestName(), module.get(), 19,
50, 1,
false);
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kDot, HloOpcode::kAdd, HloOpcode::kParameter,
HloOpcode::kParameter, HloOpcode::kParameter},
HloInstruction::FusionKind::kOutput);
}
TEST_F(OpcodeFusionTest, DotAddOutputFusion_19x50x19) {
auto module = CreateNewVerifiedModule();
CreateComputationForDotAddOutputFusionTest(TestName(), module.get(), 19,
50, 19,
false);
TF_ASSERT_OK_AND_ASSIGN(bool fused_something,
CpuInstructionFusion().Run(module.get()));
EXPECT_FALSE(fused_something);
EXPECT_THAT(module->entry_computation()->root_instruction(),
Not(op::Fusion()));
}
TEST_F(OpcodeFusionTest, DotAddOutputFusion_19x50x1_multi_use) {
auto module = CreateNewVerifiedModule();
CreateComputationForDotAddOutputFusionTest(TestName(), module.get(), 19,
50, 1,
true);
TF_ASSERT_OK_AND_ASSIGN(bool fused_something,
CpuInstructionFusion().Run(module.get()));
EXPECT_FALSE(fused_something);
EXPECT_THAT(module->entry_computation()->root_instruction(),
Not(op::Fusion()));
}
TEST_F(InstructionFusionTest,
DotOperationFusion_DontOutputFuseDuplicateOperands) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
a = f32[50,60]{1,0} parameter(0)
b = f32[60,1]{1,0} parameter(1)
c = f32[50,1]{1,0} dot(a, b), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT d = f32[50,1]{1,0} add(c, c)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool fused_something,
CpuInstructionFusion().Run(module.get()));
EXPECT_FALSE(fused_something);
EXPECT_THAT(module->entry_computation()->root_instruction(),
Not(op::Fusion()));
}
struct GatherLoopFusionTestSpec {
std::string test_name;
std::string hlo_computation_text;
static std::string Name(
const ::testing::TestParamInfo<GatherLoopFusionTestSpec>& info) {
return info.param.test_name;
}
};
class GatherLoopFusionTest
: public OpcodeFusionTest,
public ::testing::WithParamInterface<GatherLoopFusionTestSpec> {};
TEST_P(GatherLoopFusionTest, GatherLoopFusion) {
const GatherLoopFusionTestSpec& spec = GetParam();
std::string hlo_string = absl::StrCat("HloModule ", spec.test_name, "\n\n",
spec.hlo_computation_text);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kGather, HloOpcode::kAdd, HloOpcode::kBroadcast,
HloOpcode::kConstant, HloOpcode::kParameter, HloOpcode::kParameter});
}
std::vector<GatherLoopFusionTestSpec> GetGatherLoopFusionTestSpecs() {
std::vector<GatherLoopFusionTestSpec> result;
result.push_back({"FusedTensorFlowGatherV2", R"(
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
gather = s32[3,2] gather(operand, indices),
offset_dims={0},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=1,
slice_sizes={3, 1}
one = s32[] constant(1)
one_broadcasted = s32[3,2] broadcast(one), dimensions={}
ROOT result = s32[3,2]{1,0} add(gather, one_broadcasted)
}
)"});
result.push_back({"FusedTensorFlowGatherMultipleBatchDims", R"(
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2,2] parameter(1)
gather = s32[2,3,2] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=2, |
2,011 | cpp | tensorflow/tensorflow | onednn_matmul | third_party/xla/xla/service/cpu/onednn_matmul.cc | third_party/xla/xla/service/cpu/tests/onednn_matmul_test.cc | #ifndef XLA_SERVICE_CPU_ONEDNN_MATMUL_H_
#define XLA_SERVICE_CPU_ONEDNN_MATMUL_H_
#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include "dnnl.hpp"
#include "xla/service/cpu/backend_config.pb.h"
#include "xla/shape.h"
namespace xla {
namespace cpu {
Shape OneDnnMatMulOptWeightsShape(const Shape& input_shape,
const Shape& weights_shape,
const Shape& bias_shape,
const Shape& output_shape,
const OneDnnMatMulConfig* matmul_config);
extern "C" {
extern void __xla_cpu_runtime_OneDnnMatMul(void* result, void* scratch,
void** args);
extern void __xla_cpu_runtime_OneDnnMatMulReorder(void* result, void** args);
}
}
}
#endif
#endif
#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include "xla/service/cpu/onednn_matmul.h"
#include <algorithm>
#include <cmath>
#include <cstring>
#include <initializer_list>
#include <iterator>
#include <utility>
#include <vector>
#include "dnnl.hpp"
#include "absl/base/dynamic_annotations.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "xla/executable_run_options.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/cpu/backend_config.pb.h"
#include "xla/service/cpu/onednn_memory_util.h"
#include "xla/service/cpu/onednn_util.h"
#include "xla/service/cpu/runtime_lightweight_check.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tsl/util/onednn_threadpool.h"
#include "tsl/platform/logging.h"
#define EIGEN_USE_THREADS
namespace xla {
namespace cpu {
namespace {
using dnnl::engine;
using dnnl::matmul;
using dnnl::memory;
using dnnl::stream;
dnnl::memory::desc OneDnnMatMulOptWeightsDesc(
const dnnl::engine& engine, const dnnl::memory::desc& input_md,
const dnnl::memory::desc& weights_md, const dnnl::memory::desc& bias_md,
const dnnl::memory::desc& output_md) {
auto weights_any_md =
memory::desc(weights_md.get_dims(), weights_md.get_data_type(),
dnnl::memory::format_tag::any);
auto matmul_pd = matmul::primitive_desc(engine, input_md, weights_any_md,
bias_md, output_md);
return matmul_pd.weights_desc();
}
dnnl::memory::desc OneDnnMatMulOptWeightsDesc(
const dnnl::engine& engine, const Shape& input_shape,
const Shape& weights_shape, const Shape& bias_shape,
const Shape& output_shape, const OneDnnMatMulConfig* matmul_config) {
auto input_md = ShapeToMemDesc(input_shape);
auto weights_md = ShapeToMemDesc(weights_shape);
TRANSPOSE_LAST_TWO_DIMS_IF(matmul_config->transpose_a(), input_md);
TRANSPOSE_LAST_TWO_DIMS_IF(matmul_config->transpose_b(), weights_md);
auto bias_md = absl::c_count(matmul_config->fusions().ops(),
OneDnnFusionConfig::BIAS) > 0
? ShapeToMemDesc(bias_shape)
: dnnl::memory::desc{};
auto output_md = ShapeToMemDesc(output_shape);
auto missed_rank = output_md.get_ndims() - bias_md.get_ndims();
XLA_LIGHTWEIGHT_CHECK(missed_rank >= 0);
if (!bias_md.is_zero() && missed_rank > 0) {
auto bias_dims = bias_md.get_dims();
bias_dims.insert(bias_dims.begin(), missed_rank, 1);
bias_md = bias_md.reshape(bias_dims);
}
return OneDnnMatMulOptWeightsDesc(engine, input_md, weights_md, bias_md,
output_md);
}
}
Shape OneDnnMatMulOptWeightsShape(const Shape& input_shape,
const Shape& weights_shape,
const Shape& bias_shape,
const Shape& output_shape,
const OneDnnMatMulConfig* matmul_config) {
engine cpu_engine(engine::kind::cpu, 0);
auto optimized_weights_md =
OneDnnMatMulOptWeightsDesc(cpu_engine, input_shape, weights_shape,
bias_shape, output_shape, matmul_config);
return MemDescToXlaShapeFlattened(optimized_weights_md);
}
struct FusedOperandsRef {
const std::vector<void*>& bufs;
std::vector<std::pair<int, dnnl::memory>>& postop_args;
};
std::unique_ptr<matmul::primitive_desc> CreateMatMulPrimDesc(
const engine& cpu_engine, const memory::desc& input_md,
const memory::desc& plain_weights_md, const memory::desc& output_md,
const std::vector<memory::desc>& fused_mds,
const OneDnnMatMulConfig& matmul_config,
FusedOperandsRef* fused_operands_ref = nullptr) {
auto bias_md = memory::desc();
bool weights_packed = matmul_config.optimization_config().weights_prepacked();
auto weights_md = plain_weights_md;
if (weights_packed) {
weights_md = memory::desc(weights_md.get_dims(), weights_md.get_data_type(),
memory::format_tag::any);
}
dnnl::post_ops post_ops;
int fused_operand_idx = 0;
for (auto& fused_op : matmul_config.fusions().ops()) {
switch (fused_op) {
case OneDnnFusionConfig::RELU:
post_ops.append_eltwise(dnnl::algorithm::eltwise_relu, 0.f, 0.f);
break;
case OneDnnFusionConfig::TANH:
post_ops.append_eltwise(dnnl::algorithm::eltwise_tanh, 0.f, 0.f);
break;
case OneDnnFusionConfig::GELU_TANH:
post_ops.append_eltwise(dnnl::algorithm::eltwise_gelu_tanh, 0.f, 0.f);
break;
case OneDnnFusionConfig::GELU_ERF:
post_ops.append_eltwise(dnnl::algorithm::eltwise_gelu_erf, 0.f, 0.f);
break;
case OneDnnFusionConfig::RELU6:
post_ops.append_eltwise(dnnl::algorithm::eltwise_clip_v2, 0.f, 6.0f);
break;
case OneDnnFusionConfig::SIGMOID:
post_ops.append_eltwise(dnnl::algorithm::eltwise_logistic, 0.f, 0.f);
break;
case OneDnnFusionConfig::BIAS: {
bias_md = fused_mds.at(fused_operand_idx);
auto missed_rank = output_md.get_ndims() - bias_md.get_ndims();
XLA_LIGHTWEIGHT_CHECK(missed_rank >= 0);
if (missed_rank > 0) {
auto bias_dims = bias_md.get_dims();
bias_dims.insert(bias_dims.begin(), missed_rank, 1);
bias_md = bias_md.reshape(bias_dims);
}
if (fused_operands_ref) {
fused_operands_ref->postop_args.emplace_back(
DNNL_ARG_BIAS,
dnnl::memory(bias_md, cpu_engine,
fused_operands_ref->bufs[fused_operand_idx]));
}
fused_operand_idx++;
} break;
case OneDnnFusionConfig::ELU:
post_ops.append_eltwise(dnnl::algorithm::eltwise_elu, 1.0f, 0.0f);
break;
case OneDnnFusionConfig::BINARY_ADD: {
auto binary_md = fused_mds.at(fused_operand_idx);
auto missed_rank = output_md.get_ndims() - binary_md.get_ndims();
XLA_LIGHTWEIGHT_CHECK(missed_rank >= 0);
if (missed_rank > 0) {
auto binary_dims = binary_md.get_dims();
binary_dims.insert(binary_dims.begin(), missed_rank, 1);
binary_md = binary_md.reshape(binary_dims);
}
if (fused_operands_ref) {
auto arg_idx =
DNNL_ARG_ATTR_MULTIPLE_POST_OP(post_ops.len()) | DNNL_ARG_SRC_1;
fused_operands_ref->postop_args.emplace_back(
arg_idx,
dnnl::memory(binary_md, cpu_engine,
fused_operands_ref->bufs[fused_operand_idx]));
}
post_ops.append_binary(dnnl::algorithm::binary_add, binary_md);
fused_operand_idx++;
} break;
case OneDnnFusionConfig::LINEAR: {
float const_float;
*(reinterpret_cast<int32_t*>(&const_float)) =
matmul_config.fusions().alpha_typecast();
post_ops.append_eltwise(dnnl::algorithm::eltwise_linear, const_float,
0.f);
} break;
default:
LOG(FATAL) << __FILE__ << ":" << __LINE__
<< " Attempt to call OneDNN MatMul runtime library with "
"unsupported post op."
<< std::endl;
}
}
dnnl::primitive_attr attrs;
if (matmul_config.optimization_config().user_scratchpad()) {
attrs.set_scratchpad_mode(dnnl::scratchpad_mode::user);
}
if (post_ops.len() > 0) {
attrs.set_post_ops(post_ops);
}
return std::make_unique<matmul::primitive_desc>(
cpu_engine, input_md, weights_md, bias_md, output_md, attrs);
}
std::unique_ptr<matmul::primitive_desc> CreateMatMulPrimDesc(
const Shape& input_shape, const Shape& weights_shape,
const Shape& output_shape, const std::vector<Shape>& fused_shapes,
const OneDnnMatMulConfig& matmul_config) {
auto input_md = ShapeToMemDesc(input_shape);
auto weights_md = ShapeToMemDesc(weights_shape);
TRANSPOSE_LAST_TWO_DIMS_IF(matmul_config.transpose_a(), input_md);
TRANSPOSE_LAST_TWO_DIMS_IF(matmul_config.transpose_b(), weights_md);
auto output_md = ShapeToMemDesc(output_shape);
std::vector<memory::desc> fused_mds;
std::transform(fused_shapes.begin(), fused_shapes.end(),
std::back_inserter(fused_mds),
[](const Shape& shape) { return ShapeToMemDesc(shape); });
return CreateMatMulPrimDesc(engine(engine::kind::cpu, 0), input_md,
weights_md, output_md, fused_mds, matmul_config);
}
template <>
std::unique_ptr<dnnl::matmul::primitive_desc>
CreateOneDnnPrimDesc<dnnl::matmul::primitive_desc>(HloInstruction* instr) {
if (instr->opcode() != HloOpcode::kCustomCall) {
return nullptr;
}
auto custom_call = Cast<xla::HloCustomCallInstruction>(instr);
auto backend_config = custom_call->backend_config<BackendConfig>();
if (!backend_config.ok()) {
return nullptr;
}
auto& matmul_config = backend_config.value().onednn_matmul_config();
auto operands = custom_call->operands();
auto input = operands[0];
auto weight = operands[1];
auto input_shape = input->shape();
auto weight_shape = weight->shape();
auto output_shape = custom_call->shape().IsTuple()
? custom_call->shape().tuple_shapes(0)
: custom_call->shape();
auto fused_operands =
HloInstruction::InstructionVector(operands.begin() + 2, operands.end());
std::vector<Shape> fused_shapes;
std::transform(fused_operands.begin(), fused_operands.end(),
std::back_inserter(fused_shapes),
[](const HloInstruction* instr) { return instr->shape(); });
return CreateMatMulPrimDesc(input_shape, weight_shape, output_shape,
fused_shapes, matmul_config);
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_OneDnnMatMul(
void* result, void* scratch, void** args) {
int arg_indx = 0;
const int64_t num_args = *(static_cast<int64_t*>(args[arg_indx++]));
const xla::ExecutableRunOptions* run_options =
static_cast<const xla::ExecutableRunOptions*>(args[arg_indx++]);
auto thread_pool = CreateOneDnnThreadPool(
run_options ? run_options->intra_op_thread_pool() : nullptr);
engine cpu_engine(engine::kind::cpu, 0);
auto onednn_stream = MakeOneDnnStream(cpu_engine, thread_pool.get());
std::string config_str(static_cast<const char*>(args[arg_indx++]));
OneDnnMatMulConfig matmul_config;
matmul_config.ParseFromString(config_str);
MemrefInfo input_minfo(args[arg_indx++]);
MemrefInfo weights_minfo(args[arg_indx++]);
MemrefInfo output_minfo(result);
auto input_md = input_minfo.GetOneDnnMemDesc();
auto weights_md = weights_minfo.GetOneDnnMemDesc();
TRANSPOSE_LAST_TWO_DIMS_IF(
matmul_config.transpose_a() && input_md.get_ndims() > 1, input_md);
TRANSPOSE_LAST_TWO_DIMS_IF(
matmul_config.transpose_b() && weights_md.get_ndims() > 1, weights_md);
auto output_md = output_minfo.GetOneDnnMemDesc();
if (matmul_config.optimization_config().weights_prepacked()) {
weights_md =
memory::desc({input_md.get_dims().back(), output_md.get_dims().back()},
weights_md.get_data_type(), memory::format_tag::ab);
}
const int64_t num_fused_operands = num_args - arg_indx;
std::vector<memory::desc> fused_mds;
std::vector<void*> fused_bufs;
for (int64_t i = 0; i < num_fused_operands; ++i) {
MemrefInfo operand_minfo(args[arg_indx++]);
fused_mds.push_back(operand_minfo.GetOneDnnMemDesc());
fused_bufs.push_back(operand_minfo.Data());
}
std::vector<std::pair<int, dnnl::memory>> postop_args;
FusedOperandsRef fused_operands_ref{fused_bufs, postop_args};
auto matmul_pd =
CreateMatMulPrimDesc(cpu_engine, input_md, weights_md, output_md,
fused_mds, matmul_config, &fused_operands_ref);
XLA_LIGHTWEIGHT_CHECK(num_args == arg_indx);
auto lhs_mem = memory(input_md, cpu_engine, input_minfo.Data());
auto rhs_mem =
memory(matmul_pd->weights_desc(), cpu_engine, weights_minfo.Data());
auto result_mem = memory(output_md, cpu_engine, output_minfo.Data());
if (std::strstr(matmul_pd->impl_info_str(), "ref") != nullptr) {
LOG(WARNING) << "[Perf]: MatMul reference implementation being executed";
}
auto matmul_prim = matmul(*matmul_pd);
std::unordered_map<int, memory> matmul_args{{DNNL_ARG_SRC, lhs_mem},
{DNNL_ARG_WEIGHTS, rhs_mem},
{DNNL_ARG_DST, result_mem}};
if (matmul_config.optimization_config().user_scratchpad()) {
XLA_LIGHTWEIGHT_CHECK(scratch != nullptr);
MemrefInfo scratch_minfo(scratch);
auto scratchpad_md = matmul_pd->scratchpad_desc();
auto scratch_mem = memory(scratchpad_md, cpu_engine, scratch_minfo.Data());
matmul_args.insert({DNNL_ARG_SCRATCHPAD, scratch_mem});
}
matmul_args.insert(postop_args.begin(), postop_args.end());
matmul_prim.execute(onednn_stream, matmul_args);
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_OneDnnMatMulReorder(
void* result, void** args) {
int arg_indx = 0;
const int64_t num_args = *(static_cast<int64_t*>(args[arg_indx++]));
const xla::ExecutableRunOptions* run_options =
static_cast<const xla::ExecutableRunOptions*>(args[arg_indx++]);
auto thread_pool = CreateOneDnnThreadPool(
run_options ? run_options->intra_op_thread_pool() : nullptr);
engine cpu_engine(engine::kind::cpu, 0);
auto onednn_stream = MakeOneDnnStream(cpu_engine, thread_pool.get());
std::string config_str(static_cast<const char*>(args[arg_indx++]));
OneDnnMatMulConfig matmul_config;
matmul_config.ParseFromString(config_str);
MemrefInfo input_minfo(args[arg_indx++]);
MemrefInfo weight_minfo(args[arg_indx++]);
MemrefInfo output_minfo(args[arg_indx++]);
MemrefInfo result_minfo(result);
auto input_md = input_minfo.GetOneDnnMemDesc();
auto weight_md = weight_minfo.GetOneDnnMemDesc();
auto output_md = output_minfo.GetOneDnnMemDesc();
auto bias_md = dnnl::memory::desc{};
if (absl::c_count(matmul_config.fusions().ops(), OneDnnFusionConfig::BIAS) >
0) {
MemrefInfo bias_minfo(args[arg_indx++]);
bias_md = bias_minfo.GetOneDnnMemDesc();
}
XLA_LIGHTWEIGHT_CHECK(num_args >= arg_indx);
TRANSPOSE_LAST_TWO_DIMS_IF(matmul_config.transpose_a(), input_md);
TRANSPOSE_LAST_TWO_DIMS_IF(matmul_config.transpose_b(), weight_md);
if (!bias_md.is_zero()) {
auto missed_rank = output_md.get_ndims() - bias_md.get_ndims();
XLA_LIGHTWEIGHT_CHECK(missed_rank >= 0);
if (missed_rank > 0) {
auto bias_dims = bias_md.get_dims();
bias_dims.insert(bias_dims.begin(), missed_rank, 1);
bias_md = bias_md.reshape(bias_dims);
}
}
auto result_md = OneDnnMatMulOptWeightsDesc(cpu_engine, input_md, weight_md,
bias_md, output_md);
XLA_LIGHTWEIGHT_CHECK(result_minfo.GetOneDnnMemDesc().get_size() ==
result_md.get_size());
auto weight_mem = dnnl::memory{weight_md, cpu_engine, weight_minfo.Data()};
auto result_mem = dnnl::memory{result_md, cpu_engine, result_minfo.Data()};
dnnl::reorder rdr{weight_mem, result_mem};
rdr.execute(onednn_stream, weight_mem, result_mem);
onednn_stream.wait();
}
}
}
#endif | #if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include <utility>
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/service/cpu/onednn_matmul_rewriter.h"
#include "xla/service/cpu/onednn_util.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_macros.h"
#include "tsl/platform/cpu_info.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace cpu {
class MatmulTest : public HloTestBase {
protected:
const char* fused_matmul_bias_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BIAS"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)";
const char* fused_matmul_binary_add_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BINARY_ADD"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)";
const char* matmul_rewrite_str_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: }
; CHECK: }
)";
const char* fused_matmul_bias_gelu_tanh_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BIAS","GELU_TANH"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)";
const char* fused_matmul_bias_gelu_erf_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BIAS","GELU_ERF"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)";
const char* fused_matmul_bias_elu_rewrite_str_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BIAS","ELU"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)";
const char* fused_matmul_bias_tanh_rewrite_str_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BIAS","TANH"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)";
const char* fused_matmul_bias_relu6_rewrite_str_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BIAS","RELU6"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)";
const char* fused_matmul_bias_sigmoid_rewrite_str_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BIAS","SIGMOID"]
; CHECK-DAG: }
; CHECK: }
)";
};
TEST_F(MatmulTest, SimpleTestF32) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg.0 = f32[32,8,128,64] parameter(0), parameter_replication={false}
arg.1 = f32[32,8,64,128] parameter(1), parameter_replication={false}
ROOT onednn.matmul.0 = f32[32,8,128,128] dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, matmul_rewrite_str_);
}
TEST_F(MatmulTest, SimpleTestBF16) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.bf16
ENTRY matmul.test.bf16 {
arg.0 = bf16[32,8,128,64] parameter(0), parameter_replication={false}
arg.1 = bf16[32,8,64,128] parameter(1), parameter_replication={false}
ROOT onednn.matmul.0 = bf16[32,8,128,128] dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-4}));
MatchOptimizedHlo(matmul_module_str, matmul_rewrite_str_);
}
TEST_F(MatmulTest, SimpleTestF16) {
if (!IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.f16
ENTRY matmul.test.f16 {
arg.0 = f16[32,8,128,64] parameter(0), parameter_replication={false}
arg.1 = f16[32,8,64,128] parameter(1), parameter_replication={false}
ROOT onednn.matmul.0 = f16[32,8,128,128] dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-4}));
MatchOptimizedHlo(matmul_module_str, matmul_rewrite_str_);
}
TEST_F(MatmulTest, SimpleTestF32TransposeB) {
const char* matmul_module_str = R"(
HloModule matmul.test.1
ENTRY matmul.test.1 {
arg.0 = f32[32,8,128,64]{3,1,2,0} parameter(0), parameter_replication={false}
arg.1 = f32[32,8,128,64]{3,1,2,0} parameter(1), parameter_replication={false}
ROOT onednn.matmul.0 = f32[32,8,128,128] dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3}
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, matmul_rewrite_str_);
}
TEST_F(MatmulTest, SimpleTestF32WithBiasAddFusion1) {
const char* matmul_module_str = R"(
HloModule matmul.biasadd.test.f32
ENTRY matmul.biasadd.test.f32 {
arg0.1 = f32[32,32,40,30] parameter(0), parameter_replication={false}
reshape.2 = f32[32,32,40,30] reshape(arg0.1)
constant.3 = f32[] constant(1)
broadcast.4 = f32[32,32,30,40] broadcast(constant.3), dimensions={}
dot.7 = f32[32,32,40,40] dot(reshape.2, broadcast.4), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
constant.5 = f32[] constant(15)
broadcast.6 = f32[40] broadcast(constant.5), dimensions={}
broadcast.9 = f32[32,32,40,40] broadcast(broadcast.6), dimensions={3}
add.10 = f32[32,32,40,40] add(dot.7, broadcast.9)
reshape.11 = f32[32,32,40,40] reshape(add.10)
tuple.12 = (f32[32,32,40,40]) tuple(reshape.11)
ROOT get-tuple-element.13 = f32[32,32,40,40] get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_binary_add_);
}
TEST_F(MatmulTest, SimpleTestF32WithBiasAddFusion2) {
const char* matmul_module_str = R"(
HloModule matmul.biasadd.test.f32
ENTRY matmul.biasadd.test.f32 {
arg0.1 = f32[400,300] parameter(0), parameter_replication={false}
reshape.2 = f32[400,300] reshape(arg0.1)
constant.3 = f32[] constant(1)
broadcast.4 = f32[300,400] broadcast(constant.3), dimensions={}
dot.7 = f32[400,400] dot(reshape.2, broadcast.4), lhs_batch_dims={}, lhs_contracting_dims={1}, rhs_batch_dims={}, rhs_contracting_dims={0}
reshape.1 = f32[400,1,400] reshape(dot.7)
constant.5 = f32[] constant(15)
broadcast.6 = f32[400] broadcast(constant.5), dimensions={}
broadcast.9 = f32[400,1,400] broadcast(broadcast.6), dimensions={2}
add.10 = f32[400,1,400] add(reshape.1, broadcast.9)
tuple.12 = (f32[400,1,400]) tuple(add.10)
ROOT get-tuple-element.13 = f32[400,1,400] get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_binary_add_);
}
TEST_F(MatmulTest, SimpleTestF32WithBiasAsParameter1) {
const char* matmul_module_str = R"(
HloModule matmul.biasadd.test.f32
ENTRY matmul.biasadd.test.f32 {
arg0.1 = f32[32,32,40,30] parameter(0), parameter_replication={false}
arg0.2 = f32[32,32,30,40] parameter(1), parameter_replication={false}
arg0.3 = f32[32,32,40,40] parameter(2), parameter_replication={false}
dot.7 = f32[32,32,40,40] dot(arg0.1, arg0.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
add.10 = f32[32,32,40,40] add(dot.7, arg0.3)
reshape.11 = f32[32,32,40,40] reshape(add.10)
tuple.12 = (f32[32,32,40,40]) tuple(reshape.11)
ROOT get-tuple-element.13 = f32[32,32,40,40] get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_binary_add_);
}
TEST_F(MatmulTest, SimpleTestF32WithBiasAsParameter2) {
const char* matmul_module_str = R"(
HloModule matmul.biasadd.test.f32
ENTRY matmul.biasadd.test.f32 {
arg0.1 = f32[32,32,40,30] parameter(0), parameter_replication={false}
arg0.2 = f32[32,32,30,40] parameter(1), parameter_replication={false}
arg0.3 = f32[40]{0} parameter(2), parameter_replication={false}
dot.7 = f32[32,32,40,40] dot(arg0.1, arg0.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
broad.1 = f32[32,32,40,40] broadcast(arg0.3), dimensions={3}
add.10 = f32[32,32,40,40] add(dot.7, broad.1)
reshape.11 = f32[32,32,40,40] reshape(add.10)
tuple.12 = (f32[32,32,40,40]) tuple(reshape.11)
ROOT get-tuple-element.13 = f32[32,32,40,40] get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_);
}
TEST_F(MatmulTest, SimpleTestF32WithBiasAsParameter2D) {
const char* matmul_module_str = R"(
HloModule matmul.biasadd.test.f32
ENTRY matmul.biasadd.test.f32 {
arg0.1 = f32[2,2,400,30] parameter(0), parameter_replication={false}
arg0.2 = f32[2,2,30,400] parameter(1), parameter_replication={false}
arg0.3 = f32[2,400] parameter(2), parameter_replication={false}
dot.7 = f32[2,2,400,400] dot(arg0.1, arg0.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
broad.1 = f32[2,2,400,400] broadcast(arg0.3), dimensions={0,3}
add.10 = f32[2,2,400,400] add(dot.7, broad.1)
reshape.11 = f32[2,2,400,400] reshape(add.10)
tuple.12 = (f32[2,2,400,400]) tuple(reshape.11)
ROOT get-tuple-element.13 = f32[2,2,400,400] get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_binary_add_);
}
TEST_F(MatmulTest, SimpleTestF32WithBiasAsParameter2D1B) {
const char* matmul_module_str = R"(
HloModule matmul.biasadd.test.f32
ENTRY matmul.biasadd.test.f32 {
arg0.1 = f32[1,2,400,30] parameter(0), parameter_replication={false}
arg0.2 = f32[1,2,30,400] parameter(1), parameter_replication={false}
arg0.3 = f32[1,400] parameter(2), parameter_replication={false}
dot.7 = f32[1,2,400,400] dot(arg0.1, arg0.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
broad.1 = f32[1,2,400,400] broadcast(arg0.3), dimensions={0,3}
add.10 = f32[1,2,400,400] add(dot.7, broad.1)
reshape.11 = f32[1,2,400,400] reshape(add.10)
tuple.12 = (f32[1,2,400,400]) tuple(reshape.11)
ROOT get-tuple-element.13 = f32[1,2,400,400] get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_);
}
TEST_F(MatmulTest, SimpleTestF32WithBiasAsParameter3) {
const char* matmul_module_str = R"(
HloModule matmul.biasadd.test.f32
ENTRY matmul.biasadd.test.f32 {
arg0.1 = f32[16,128,768] parameter(0), sharding={replicated}
arg0.2 = f32[768,768] parameter(1), sharding={replicated}
dot.84 = f32[16,128,768] dot(arg0.1, arg0.2), lhs_contracting_dims={2}, rhs_contracting_dims={0}
arg0.3 = f32[768]{0} parameter(2), sharding={replicated}
reshape.85 = f32[1,1,768] reshape(arg0.3)
broadcast.86 = f32[1,1,768] broadcast(reshape.85), dimensions={0,1,2}
reshape.87 = f32[768]{0} reshape(broadcast.86)
broadcast.88 = f32[16,128,768] broadcast(reshape.87), dimensions={2}
ROOT add.89 = f32[16,128,768] add(dot.84, broadcast.88)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_);
}
TEST_F(MatmulTest, SimpleTestF32TransposeBWithBiasAddFusion) {
const char* matmul_module_str = R"(
HloModule matmul.test.1
ENTRY matmul.test.1 {
arg.0 = f32[32,8,4,16]{3,1,2,0} parameter(0), parameter_replication={false}
arg.1 = f32[32,8,16,16]{3,1,2,0} parameter(1), parameter_replication={false}
dot.7 = f32[32,8,4,16]{3,2,1,0} dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3}
constant.5 = f32[] constant(15)
broadcast.6 = f32[16]{0} broadcast(constant.5), dimensions={}
broadcast.9 = f32[32,8,4,16]{3,2,1,0} broadcast(broadcast.6), dimensions={3}
add.10 = f32[32,8,4,16]{3,2,1,0} add(dot.7, broadcast.9)
reshape.11 = f32[32,8,4,16]{3,2,1,0} reshape(add.10)
tuple.12 = (f32[32,8,4,16]{3,2,1,0}) tuple(reshape.11)
ROOT get-tuple-element.13 = f32[32,8,4,16]{3,2,1,0} get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_binary_add_);
}
TEST_F(MatmulTest, F32BiasAddFusionNonCompatibleBias) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.1 {
arg.0 = f32[12288,2] parameter(0), parameter_replication={false}
arg.1 = f32[2,1024] parameter(1), parameter_replication={false}
dot.0 = f32[12288,1024] dot(arg.0, arg.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
reshape.0 = f32[32,384,1024] reshape(dot.0)
constant.0 = f32[1,384,1024] constant(15)
reshape.1 = f32[384,1024] reshape(constant.0)
broadcast.0 = f32[32,384,1024] broadcast(reshape.1), dimensions={1,2}
add.0 = f32[32,384,1024] add(reshape.0, broadcast.0)
tuple.0 = (f32[32,384,1024]) tuple(add.0)
ROOT get-tuple-element.0 = f32[32,384,1024] get-tuple-element(tuple.0), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, matmul_rewrite_str_);
}
TEST_F(MatmulTest, ApproxGELUTestF32) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg.0 = f32[32,32,4,16] parameter(0), parameter_replication={false}
arg.1 = f32[32,32,16,32] parameter(1), parameter_replication={false}
onednn.matmul.0 = f32[32,32,4,32] dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
mul.0 = f32[32,32,4,32] multiply(onednn.matmul.0, onednn.matmul.0)
mul.1 = f32[32,32,4,32] multiply(onednn.matmul.0, mul.0)
const.0 = f32[] constant(0.044715)
bcast.0 = f32[32,32,4,32] broadcast(const.0), dimensions={}
mul.2 = f32[32,32,4,32] multiply(mul.1, bcast.0)
add.0 = f32[32,32,4,32] add(onednn.matmul.0, mul.2)
const.1 = f32[] constant(0.797884583)
bcast.1 = f32[32,32,4,32] broadcast(const.1), dimensions={}
mul.3 = f32[32,32,4,32] multiply(add.0, bcast.1)
tanh = f32[32,32,4,32] tanh(mul.3)
const.2 = f32[] constant(1)
bcast.2 = f32[32,32,4,32] broadcast(const.2), dimensions={}
add.2 = f32[32,32,4,32] add(tanh, bcast.2)
const.3 = f32[] constant(0.5)
bcast.3 = f32[32,32,4,32] broadcast(const.3), dimensions={}
mul.4 = f32[32,32,4,32] multiply(add.2, bcast.3)
ROOT out = f32[32,32,4,32] multiply(onednn.matmul.0, mul.4)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str,
R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["GELU_TANH"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)");
}
TEST_F(MatmulTest, BiasAndApproxGELUTestF32) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
Arg_5.6 = f32[32,32,64] parameter(0), sharding={replicated}
Arg_7.8 = f32[64,256] parameter(1), sharding={replicated}
dot.232 = f32[32,32,256] dot(Arg_5.6, Arg_7.8), lhs_contracting_dims={2}, rhs_contracting_dims={0}
Arg_6.7 = f32[256] parameter(2), sharding={replicated}
reshape.233 = f32[1,1,256] reshape(Arg_6.7)
broadcast.234 = f32[1,1,256] broadcast(reshape.233), dimensions={0,1,2}
reshape.235 = f32[256] reshape(broadcast.234)
broadcast.236 = f32[32,32,256] broadcast(reshape.235), dimensions={2}
add.237 = f32[32,32,256] add(dot.232, broadcast.236)
multiply.238 = f32[32,32,256] multiply(add.237, add.237)
multiply.239 = f32[32,32,256] multiply(add.237, multiply.238)
constant.20 = f32[] constant(0.044715)
broadcast.21 = f32[32,32,256] broadcast(constant.20), dimensions={}
multiply.240 = f32[32,32,256] multiply(multiply.239, broadcast.21)
add.241 = f32[32,32,256] add(add.237, multiply.240)
constant.18 = f32[] constant(0.797884583)
broadcast.19 = f32[32,32,256] broadcast(constant.18), dimensions={}
multiply.242 = f32[32,32,256] multiply(add.241, broadcast.19)
tanh.243 = f32[32,32,256] tanh(multiply.242)
constant.16 = f32[] constant(1)
broadcast.17 = f32[32,32,256] broadcast(constant.16), dimensions={}
add.244 = f32[32,32,256] add(tanh.243, broadcast.17)
constant.14 = f32[] constant(0.5)
broadcast.15 = f32[32,32,256] broadcast(constant.14), dimensions={}
multiply.245 = f32[32,32,256] multiply(add.244, broadcast.15)
ROOT out = f32[32,32,256] multiply(add.237, multiply.245)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_gelu_tanh_);
}
TEST_F(MatmulTest, BiasAndApproxTFGELUTestF32) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg0.1 = f32[1024,512] parameter(0), parameter_replication={false}
arg1.2 = f32[256,512] parameter(1), parameter_replication={false}
dot.7 = f32[1024,256] dot(arg0.1, arg1.2), lhs_contracting_dims={1}, rhs_contracting_dims={1}, frontend_attributes={grad_x="false",grad_y="false"}
arg2.3 = f32[256] parameter(2), parameter_replication={false}
broadcast.9 = f32[1024,256] broadcast(arg2.3), dimensions={1}
add.10 = f32[1024,256] add(dot.7, broadcast.9)
constant.12 = f32[] constant(0.044715)
broadcast.13 = f32[1024,256] broadcast(constant.12), dimensions={}
multiply.14 = f32[1024,256] multiply(broadcast.13, add.10)
multiply.11 = f32[1024,256] multiply(add.10, add.10)
multiply.15 = f32[1024,256] multiply(multiply.14, multiply.11)
add.16 = f32[1024,256] add(add.10, multiply.15)
constant.17 = f32[] constant(0.797884583)
broadcast.18 = f32[1024,256] broadcast(constant.17), dimensions={}
multiply.19 = f32[1024,256] multiply(add.16, broadcast.18)
tanh.20 = f32[1024,256] tanh(multiply.19)
constant.21 = f32[] constant(1)
broadcast.22 = f32[1024,256] broadcast(constant.21), dimensions={}
add.23 = f32[1024,256] add(tanh.20, broadcast.22)
constant.24 = f32[] constant(0.5)
broadcast.25 = f32[1024,256] broadcast(constant.24), dimensions={}
multiply.26 = f32[1024,256] multiply(add.23, broadcast.25)
ROOT multiply.27 = f32[1024,256] multiply(add.10, multiply.26)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_gelu_tanh_);
}
TEST_F(MatmulTest, BiasAndApproxTFGELUTestBF16) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg0.1 = f32[1024,512] parameter(0), parameter_replication={false}
convert.8 = bf16[1024,512] convert(arg0.1)
arg1.2 = f32[256,512] parameter(1), parameter_replication={false}
convert.9 = bf16[256,512] convert(arg1.2)
dot.10 = bf16[1024,256] dot(convert.8, convert.9), lhs_contracting_dims={1}, rhs_contracting_dims={1}, frontend_attributes={grad_x="false",grad_y="false"}
convert = f32[1024,256] convert(dot.10)
arg2.3 = f32[256] parameter(2), parameter_replication={false}
broadcast = f32[1024,256] broadcast(arg2.3), dimensions={1}
add.13 = f32[1024,256] add(convert, broadcast)
constant.16 = f32[] constant(0.044715)
broadcast.17 = f32[1024,256] broadcast(constant.16), dimensions={}
multiply.18 = f32[1024,256] multiply(broadcast.17, add.13)
multiply.15 = f32[1024,256] multiply(add.13, add.13)
multiply.19 = f32[1024,256] multiply(multiply.18, multiply.15)
add.20 = f32[1024,256] add(add.13, multiply.19)
constant.21 = f32[] constant(0.797884583)
broadcast.22 = f32[1024,256] broadcast(constant.21), dimensions={}
multiply.23 = f32[1024,256] multiply(add.20, broadcast.22)
tanh.24 = f32[1024,256] tanh(multiply.23)
constant.25 = f32[] constant(1)
broadcast.26 = f32[1024,256] broadcast(constant.25), dimensions={}
add.27 = f32[1024,256] add(tanh.24, broadcast.26)
constant.1 = f32[] constant(0.5)
broadcast.2 = f32[1024,256] broadcast(constant.1), dimensions={}
multiply.30 = f32[1024,256] multiply(add.13, broadcast.2)
ROOT multiply.32 = f32[1024,256] multiply(add.27, multiply.30)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_gelu_tanh_);
}
TEST_F(MatmulTest, BiasAndApproxTFGELUTestF16) {
if (!IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg0.1 = f16[1024,512] parameter(0), parameter_replication={false}
reshape.4 = f16[1024,512] reshape(arg0.1)
arg1.2 = f16[256,512] parameter(1), parameter_replication={false}
reshape.5 = f16[256,512] reshape(arg1.2)
dot.7 = f16[1024,256] dot(reshape.4, reshape.5), lhs_contracting_dims={1}, rhs_contracting_dims={1}, frontend_attributes={grad_x="false",grad_y="false"}
transpose.8 = f16[1024,256] transpose(dot.7), dimensions={0,1}
arg2.3 = f16[256] parameter(2), parameter_replication={false}
reshape.6 = f16[256] reshape(arg2.3)
broadcast.9 = f16[1024,256] broadcast(reshape.6), dimensions={1}
add.10 = f16[1024,256] add(transpose.8, broadcast.9)
constant.12 = f16[] constant(0.044708)
broadcast.13 = f16[1024,256] broadcast(constant.12), dimensions={}
multiply.14 = f16[1024,256] multiply(broadcast.13, add.10)
multiply.11 = f16[1024,256] multiply(add.10, add.10)
multiply.15 = f16[1024,256] multiply(multiply.14, multiply.11)
add.16 = f16[1024,256] add(add.10, multiply.15)
constant.17 = f16[] constant(0.79785)
broadcast.18 = f16[1024,256] broadcast(constant.17), dimensions={}
multiply.19 = f16[1024,256] multiply(add.16, broadcast.18)
tanh.20 = f16[1024,256] tanh(multiply.19)
constant.21 = f16[] constant(1)
broadcast.22 = f16[1024,256] broadcast(constant.21), dimensions={}
add.23 = f16[1024,256] add(tanh.20, broadcast.22)
constant.24 = f16[] constant(0.5)
broadcast.25 = f16[1024,256] broadcast(constant.24), dimensions={}
multiply.26 = f16[1024,256] multiply(add.23, broadcast.25)
ROOT multiply.27 = f16[1024,256] multiply(add.10, multiply.26)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_gelu_tanh_);
}
TEST_F(MatmulTest, ExactGELUTestF32) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg.0 = f32[32,32,4,16] parameter(0), parameter_replication={false}
arg.1 = f32[32,32,16,32] parameter(1), parameter_replication={false}
onednn.matmul.0 = f32[32,32,4,32] dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
const.0 = f32[] constant(0.707106769)
bcast.0 = f32[32,32,4,32] broadcast(const.0), dimensions={}
mul.0 = f32[32,32,4,32] multiply(onednn.matmul.0, bcast.0)
erf.0 = f32[32,32,4,32] erf(mul.0)
const.1 = f32[] constant(1)
bcast.1 = f32[32,32,4,32] broadcast(const.1), dimensions={}
add.0 = f32[32,32,4,32] add(erf.0, bcast.1)
const.2 = f32[] constant(0.5)
bcast.2 = f32[32,32,4,32] broadcast(const.2), dimensions={}
mul.1 = f32[32,32,4,32] multiply(add.0, bcast.2)
ROOT out = f32[32,32,4,32] multiply(onednn.matmul.0, mul.1)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str,
R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["GELU_ERF"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)");
}
TEST_F(MatmulTest, BiasAndExactGELUTestF32) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg.0 = f32[6304,768] parameter(0), parameter_replication={false}
arg.1 = f32[768,3072] parameter(1), parameter_replication={false}
dot.378 = f32[6304,3072] dot(arg.0, arg.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
reshape.11 = f32[32,197,3072]reshape(dot.378)
constant.381 = f32[3072] constant(0.3)
broadcast.382 = f32[32,197,3072] broadcast(constant.381), dimensions={2}
add.383 = f32[32,197,3072] add(reshape.11, broadcast.382)
constant.384 = f32[] constant(0.707106769)
broadcast.385 = f32[32,197,3072] broadcast(constant.384), dimensions={}
multiply.386 = f32[32,197,3072] multiply(broadcast.385, add.383)
erf.387 = f32[32,197,3072] erf(multiply.386)
constant.388 = f32[] constant(1)
broadcast.389 = f32[32,197,3072] broadcast(constant.388), dimensions={}
add.390 = f32[32,197,3072] add(erf.387, broadcast.389)
constant.391 = f32[] constant(0.5)
broadcast.392 = f32[32,197,3072] broadcast(constant.391)
multiply.393 = f32[32,197,3072] multiply(add.390, broadcast.392)
multiply.394 = f32[32,197,3072] multiply(multiply.393, add.383)
ROOT out = f32[6304,3072] reshape(multiply.394)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_gelu_erf_);
}
TEST_F(MatmulTest, BiasAndExactGELUTestBF16) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg.0 = f32[6304,768] parameter(0), parameter_replication={false}
convert.0 = bf16[6304,768] convert(arg.0)
arg.1 = f32[768,3072] parameter(1), parameter_replication={false}
convert.1 = bf16[768,3072] convert(arg.1)
dot.378 = bf16[6304,3072] dot(convert.0, convert.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
convert.2 = f32[6304,3072] convert(dot.378)
constant.381 = f32[3072] constant(0.3)
broadcast.382 = f32[6304,3072] broadcast(constant.381), dimensions={1}
add.383 = f32[6304,3072] add(convert.2, broadcast.382)
constant.384 = f32[] constant(0.707106769)
broadcast.385 = f32[6304,3072] broadcast(constant.384), dimensions={}
multiply.386 = f32[6304,3072] multiply(broadcast.385, add.383)
erf.387 = f32[6304,3072] erf(multiply.386)
constant.388 = f32[] constant(1)
broadcast.389 = f32[6304,3072] broadcast(constant.388), dimensions={}
add.390 = f32[6304,3072] add(erf.387, broadcast.389)
constant.391 = f32[] constant(0.5)
broadcast.392 = f32[6304,3072] broadcast(constant.391)
multiply.393 = f32[6304,3072] multiply(add.390, broadcast.392)
ROOT out = f32[6304,3072] multiply(multiply.393, add.383)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_gelu_erf_);
}
TEST_F(MatmulTest, BiasAndExactJaxGELUTestBF16) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg.0 = f32[6304,768] parameter(0), parameter_replication={false}
convert.0 = bf16[6304,768] convert(arg.0)
arg.1 = f32[768,3072] parameter(1), parameter_replication={false}
convert.1 = bf16[768,3072] convert(arg.1)
dot.378 = bf16[6304,3072] dot(convert.0, convert.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
convert.2 = f32[6304,3072] convert(dot.378)
reshape.0 = f32[32,197,3072] reshape(convert.2)
constant.381 = f32[3072] constant(0.3)
broadcast.382 = f32[32,197,3072] broadcast(constant.381), dimensions={2}
add.383 = f32[32,197,3072] add(reshape.0, broadcast.382)
constant.384 = f32[] constant(0.707182348)
broadcast.385 = f32[32,197,3072] broadcast(constant.384), dimensions={}
multiply.386 = f32[32,197,3072] multiply(broadcast.385, add.383)
erf.387 = f32[32,197,3072] erf(multiply.386)
constant.388 = f32[] constant(1)
broadcast.389 = f32[32,197,3072] broadcast(constant.388), dimensions={}
add.390 = f32[32,197,3072] add(erf.387, broadcast.389)
multiply.393 = f32[32,197,3072] multiply(add.390, add.383)
constant.391 = f32[] constant(0.5)
broadcast.392 = f32[32,197,3072] broadcast(constant.391)
ROOT multiply.394 = f32[32,197,3072] multiply(multiply.393, broadcast.392)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_gelu_erf_);
}
TEST_F(MatmulTest, BiasAndExactTFGELUTestBF16) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.bf16
ENTRY matmul.test.bf16 {
arg0.1 = f32[1024,512] parameter(0), parameter_replication={false}
convert.8 = bf16[1024,512] convert(arg0.1)
arg1.2 = f32[512,256] parameter(1), parameter_replication={false}
convert.9 = bf16[512,256] convert(arg1.2)
dot.10 = bf16[1024,256] dot(convert.8, convert.9), lhs_contracting_dims={1}, rhs_contracting_dims={0}, frontend_attributes={grad_x="false",grad_y="false"}
convert = f32[1024,256] convert(dot.10)
arg2.3 = f32 |
2,012 | cpp | tensorflow/tensorflow | ir_emitter2 | third_party/xla/xla/service/cpu/ir_emitter2.cc | third_party/xla/xla/service/cpu/ir_emitter2_test.cc | #ifndef XLA_SERVICE_CPU_IR_EMITTER2_H_
#define XLA_SERVICE_CPU_IR_EMITTER2_H_
#include <cstddef>
#include <cstdint>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Value.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/cpu/ir_emitter.h"
#include "xla/service/llvm_ir/ir_array.h"
#include "xla/service/llvm_ir/loop_emitter.h"
#include "xla/shape.h"
#include "xla/stream_executor/launch_dim.h"
namespace xla::cpu {
class IrEmitter2 {
public:
IrEmitter2(const HloModule& hlo_module, llvm::Module* module,
IrEmitter* nested_ir_emitter);
struct KernelThreadDims {
llvm::Value* x;
llvm::Value* y;
llvm::Value* z;
};
struct KernelThread {
llvm::Value* x;
llvm::Value* y;
llvm::Value* z;
};
struct KernelPrototype {
llvm::Function* function;
KernelThreadDims thread_dims;
KernelThread thread;
std::vector<llvm_ir::IrArray> arguments;
std::vector<llvm_ir::IrArray> results;
};
struct KernelInfo {
std::string name;
se::BlockDim block_dims;
se::ThreadDim thread_dims;
};
absl::Span<const KernelInfo> kernels() const { return kernels_; }
absl::StatusOr<KernelInfo> EmitElementalHostKernel(
const HloInstruction* instr);
absl::StatusOr<KernelInfo> EmitFusionHostKernel(
const HloFusionInstruction* fusion);
absl::StatusOr<KernelInfo> EmitReductionHostKernel(
const HloInstruction* instr);
absl::StatusOr<KernelInfo> EmitDotHostKernel(const HloInstruction* instr);
absl::StatusOr<KernelInfo> EmitDotFusionHostKernel(
const HloFusionInstruction* fusion);
absl::StatusOr<KernelInfo> EmitSelectAndScatterHostKernel(
const HloInstruction* instr);
KernelPrototype EmitKernelPrototype(std::string_view name,
absl::Span<const Shape> arguments,
absl::Span<const Shape> results);
KernelPrototype EmitKernelPrototype(const HloInstruction* instr);
private:
class ElementalIrEmitter;
using ParallelPartitionBounds =
std::vector<std::pair<llvm::Value*, llvm::Value*>>;
struct ParallelConfig {
std::vector<int64_t> outer_dimension_partitions;
};
KernelThreadDims EmitKernelThreadDims(llvm::IRBuilder<>& b,
llvm::Value* call_frame);
KernelThread EmitKernelThread(llvm::IRBuilder<>& b, llvm::Value* call_frame);
llvm_ir::IrArray EmitKernelArgument(llvm::IRBuilder<>& b,
llvm::Value* call_frame, int64_t index,
const Shape& shape);
std::optional<ParallelConfig> GetParallelConfig(const HloInstruction* instr);
ParallelPartitionBounds EmitParallelPartitionBounds(
llvm::IRBuilder<>& b, const KernelPrototype& kernel_prototype,
const ParallelConfig& parallel_config, const Shape& shape,
std::string_view name);
absl::StatusOr<se::ThreadDim> EmitElementalLoops(
llvm::IRBuilder<>& b, const HloInstruction* instr,
const KernelPrototype& kernel_prototype,
const llvm_ir::ElementGenerator& element_generator);
bool fast_min_max() const;
const HloModule& hlo_module_;
llvm::Module* module_;
IrEmitter* nested_ir_emitter_;
llvm::StructType* call_frame_ty_;
llvm::StructType* thread_dims_ty_;
llvm::StructType* thread_ty_;
llvm::StructType* arg_ty_;
std::vector<KernelInfo> kernels_;
};
}
#endif
#include "xla/service/cpu/ir_emitter2.h"
#include <array>
#include <cstddef>
#include <cstdint>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/ADT/Twine.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Casting.h"
#include "xla/cpu_function_runtime.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/cpu/backend_config.pb.h"
#include "xla/service/cpu/dot_op_emitter.h"
#include "xla/service/cpu/elemental_math_emitter.h"
#include "xla/service/cpu/ir_emitter.h"
#include "xla/service/cpu/parallel_loop_emitter.h"
#include "xla/service/cpu/shape_partition.h"
#include "xla/service/elemental_ir_emitter.h"
#include "xla/service/llvm_ir/dynamic_update_slice_util.h"
#include "xla/service/llvm_ir/fused_ir_emitter.h"
#include "xla/service/llvm_ir/ir_array.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/service/llvm_ir/loop_emitter.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla::cpu {
namespace {
static std::vector<Shape> FlattenedParameters(const HloInstruction* instr) {
std::vector<Shape> parameters;
for (auto* operand : instr->operands()) {
for (auto& indexed : ShapeUtil::GetLeafShapes(operand->shape())) {
parameters.push_back(indexed.shape);
}
}
return parameters;
}
static std::vector<Shape> FlattenedResults(const HloInstruction* instr) {
std::vector<Shape> results;
for (auto& indexed : ShapeUtil::GetLeafShapes(instr->shape())) {
results.push_back(indexed.shape);
}
return results;
}
static llvm::StructType* Dim3StructTy(llvm::LLVMContext& ctx,
std::string_view name) {
auto* i64 = llvm::IntegerType::getInt64Ty(ctx);
return llvm::StructType::create(name, i64, i64, i64);
}
static llvm::StructType* KernelThreadDimTy(llvm::LLVMContext& ctx) {
return Dim3StructTy(ctx, "SE_HOST_KernelThreadDim");
}
static llvm::StructType* KernelThreadTy(llvm::LLVMContext& ctx) {
return Dim3StructTy(ctx, "SE_HOST_KernelThread");
}
static llvm::StructType* KernelArgTy(llvm::LLVMContext& ctx) {
auto* ptr = llvm::PointerType::getUnqual(ctx);
auto* i64 = llvm::IntegerType::getInt64Ty(ctx);
return llvm::StructType::create("SE_HOST_KernelArg", ptr, i64);
}
static llvm::StructType* KernelCallFrameTy(llvm::LLVMContext& ctx) {
auto* ptr = llvm::PointerType::getUnqual(ctx);
auto* i64 = llvm::IntegerType::getInt64Ty(ctx);
return llvm::StructType::create("SE_HOST_KernelCallFrame", ptr, ptr, i64,
ptr);
}
static llvm::FunctionType* KernelFunctionTy(llvm::LLVMContext& ctx) {
return llvm::FunctionType::get(llvm::PointerType::getUnqual(ctx),
llvm::PointerType::getUnqual(ctx),
false);
}
}
class IrEmitter2::ElementalIrEmitter : public xla::ElementalIrEmitter {
public:
ElementalIrEmitter(llvm::Module* module, llvm::IRBuilder<>* b,
const HloModule* hlo_module, IrEmitter* nested_ir_emitter,
bool fast_min_max)
: xla::ElementalIrEmitter(module, b),
hlo_module_(hlo_module),
nested_ir_emitter_(nested_ir_emitter),
fast_min_max_(fast_min_max) {}
protected:
absl::StatusOr<llvm::Value*> EmitAtan2(PrimitiveType prim_type,
llvm::Value* lhs, llvm::Value* rhs,
absl::string_view) override {
return xla::cpu::EmitAtan2(module(), *b(), prim_type, lhs, rhs);
}
absl::StatusOr<llvm::Value*> EmitTanh(PrimitiveType prim_type,
llvm::Value* value) override {
return xla::cpu::EmitTanh(module(), *b(), prim_type, value);
}
absl::StatusOr<llvm::Value*> EmitErf(PrimitiveType prim_type,
llvm::Value* value) override {
return xla::cpu::EmitErf(module(), *b(), prim_type, value);
}
absl::StatusOr<std::vector<llvm::Value*>> EmitThreadLocalCall(
const HloComputation& callee, absl::Span<llvm::Value* const> parameters,
absl::string_view name, bool is_reducer) override {
if (!hlo_module_ || !hlo_module_->has_schedule()) {
return absl::InternalError(
"HLO module must be scheduled to emit thread local computation.");
}
auto emit_computation = [&](const HloComputation* computation) {
if (!nested_ir_emitter_->is_computation_emitted(*computation,
is_reducer)) {
VLOG(2) << "Emit nested computation: " << computation->name();
TF_RETURN_IF_ERROR(
nested_ir_emitter_
->EmitComputation(
const_cast<HloComputation*>(computation), name, false,
hlo_module_->schedule()
.sequence(computation)
.instructions(),
is_reducer,
{llvm::Attribute::AlwaysInline})
.status());
}
return absl::OkStatus();
};
for (HloComputation* embedded : callee.MakeEmbeddedComputationsList()) {
if (embedded->IsFusionComputation()) continue;
TF_RETURN_IF_ERROR(emit_computation(embedded));
}
TF_RETURN_IF_ERROR(emit_computation(&callee));
VLOG(2) << "Emit thread local call to: " << callee.name();
nested_ir_emitter_->b()->SetInsertPoint(b()->GetInsertPoint());
auto values = nested_ir_emitter_->EmitThreadLocalCall(
callee, parameters, name, is_reducer, false);
return values;
}
bool fast_min_max() override { return fast_min_max_; }
private:
const HloModule* hlo_module_;
IrEmitter* nested_ir_emitter_;
bool fast_min_max_;
};
IrEmitter2::IrEmitter2(const HloModule& hlo_module, llvm::Module* module,
IrEmitter* nested_ir_emitter)
: hlo_module_(hlo_module),
module_(module),
nested_ir_emitter_(nested_ir_emitter),
call_frame_ty_(KernelCallFrameTy(module_->getContext())),
thread_dims_ty_(KernelThreadDimTy(module_->getContext())),
thread_ty_(KernelThreadTy(module_->getContext())),
arg_ty_(KernelArgTy(module_->getContext())) {}
bool IrEmitter2::fast_min_max() const {
return hlo_module_.config().debug_options().xla_cpu_enable_fast_min_max();
}
absl::StatusOr<IrEmitter2::KernelInfo> IrEmitter2::EmitElementalHostKernel(
const HloInstruction* instr) {
VLOG(2) << "Emit elemental host kernel: " << instr->name();
KernelPrototype kernel_prototype = EmitKernelPrototype(instr);
llvm::IRBuilder<> b(module_->getContext());
b.SetInsertPoint(kernel_prototype.function->getEntryBlock().getTerminator());
ElementalIrEmitter::HloToElementGeneratorMap operand_to_generator;
for (int64_t i = 0; i < instr->operand_count(); ++i) {
const HloInstruction* operand = instr->operand(i);
operand_to_generator[operand] = [&, i](const llvm_ir::IrArray::Index& idx) {
return kernel_prototype.arguments[i].EmitReadArrayElement(idx, &b);
};
}
ElementalIrEmitter elemental_emitter(module_, &b, &hlo_module_,
nested_ir_emitter_, fast_min_max());
llvm_ir::ElementGenerator element_generator =
elemental_emitter.MakeElementGenerator(instr, operand_to_generator);
TF_ASSIGN_OR_RETURN(
se::ThreadDim thread_dims,
EmitElementalLoops(b, instr, kernel_prototype, element_generator));
return kernels_.emplace_back(KernelInfo{
kernel_prototype.function->getName().str(), se::BlockDim(), thread_dims});
}
absl::StatusOr<IrEmitter2::KernelInfo> IrEmitter2::EmitFusionHostKernel(
const HloFusionInstruction* fusion) {
VLOG(2) << "Emit fusion host kernel: " << fusion->name();
if (fusion->fusion_kind() == HloInstruction::FusionKind::kOutput) {
return EmitDotFusionHostKernel(fusion);
}
if (fusion->fusion_kind() != HloInstruction::FusionKind::kLoop) {
return Internal("Unsupported loop fusion kind for instruction: %s",
fusion->ToString());
}
KernelPrototype kernel_prototype = EmitKernelPrototype(fusion);
llvm::IRBuilder<> b(module_->getContext());
b.SetInsertPoint(kernel_prototype.function->getEntryBlock().getTerminator());
ElementalIrEmitter elemental_emitter(module_, &b, &hlo_module_,
nested_ir_emitter_, fast_min_max());
FusedIrEmitter fused_emitter(elemental_emitter);
for (int i = 0; i < fusion->operand_count(); i++) {
fused_emitter.BindGenerator(
*fusion->fused_parameter(i), [&, i](llvm_ir::IrArray::Index idx) {
return kernel_prototype.arguments[i].EmitReadArrayElement(idx, &b);
});
}
if (llvm_ir::CanEmitFusedDynamicUpdateSliceInPlace(
const_cast<HloFusionInstruction*>(fusion),
nested_ir_emitter_->assignment())) {
TF_RETURN_IF_ERROR(llvm_ir::EmitFusedDynamicUpdateSliceInPlace(
const_cast<HloFusionInstruction*>(fusion), kernel_prototype.results[0],
&fused_emitter, &b));
return kernels_.emplace_back(
KernelInfo{kernel_prototype.function->getName().str(), se::BlockDim(),
se::ThreadDim()});
}
TF_ASSIGN_OR_RETURN(
auto element_generator,
fused_emitter.GetGenerator(*fusion->fused_expression_root()));
TF_ASSIGN_OR_RETURN(
se::ThreadDim thread_dims,
EmitElementalLoops(b, fusion, kernel_prototype, element_generator));
return kernels_.emplace_back(KernelInfo{
kernel_prototype.function->getName().str(), se::BlockDim(), thread_dims});
}
absl::StatusOr<IrEmitter2::KernelInfo> IrEmitter2::EmitReductionHostKernel(
const HloInstruction* instr) {
VLOG(2) << "Emit reduction host kernel: " << instr->name();
return EmitElementalHostKernel(instr);
}
static bool IsDotCodegenStrategy(DotImplementationStrategy strategy) {
static std::array<DotImplementationStrategy, 3> kDotCodegenStrategies = {
DotImplementationStrategy::kNaiveLlvmIr,
DotImplementationStrategy::kTiledLlvmIrGemm,
DotImplementationStrategy::kTiledLlvmIrGemv,
};
return absl::c_find(kDotCodegenStrategies, strategy) !=
kDotCodegenStrategies.end();
}
absl::StatusOr<IrEmitter2::KernelInfo> IrEmitter2::EmitDotHostKernel(
const HloInstruction* instr) {
VLOG(2) << "Emit dot host kernel: " << instr->name();
DotImplementationStrategy strategy = GetDotImplementationStrategy(
hlo_module_.config(), *instr,
nested_ir_emitter_->target_machine_features());
if (!IsDotCodegenStrategy(strategy)) {
return Internal("Unsupported dot implementation strategy");
}
KernelPrototype kernel_prototype = EmitKernelPrototype(instr);
llvm::IRBuilder<> b(module_->getContext());
b.SetInsertPoint(kernel_prototype.function->getEntryBlock().getTerminator());
llvm_ir::IrArray lhs_array = kernel_prototype.arguments[0];
llvm_ir::IrArray rhs_array = kernel_prototype.arguments[1];
llvm_ir::IrArray target_array = kernel_prototype.results[0];
TF_RETURN_IF_ERROR(EmitDotOperation(
*instr, target_array, lhs_array, rhs_array,
nullptr, nullptr, &b,
hlo_module_.config(), nested_ir_emitter_->target_machine_features(),
false));
return kernels_.emplace_back(
KernelInfo{kernel_prototype.function->getName().str(), se::BlockDim(),
se::ThreadDim()});
}
absl::StatusOr<IrEmitter2::KernelInfo> IrEmitter2::EmitDotFusionHostKernel(
const HloFusionInstruction* fusion) {
VLOG(2) << "Emit dot fusion host kernel: " << fusion->name();
const HloInstruction* add = fusion->fused_expression_root();
if (add->opcode() != HloOpcode::kAdd) {
return Internal("Dot fusion supports only `add` root instruction");
}
bool is_dot_operand0 = add->operand(0)->opcode() == HloOpcode::kDot;
bool is_dot_operand1 = add->operand(1)->opcode() == HloOpcode::kDot;
if (is_dot_operand0 == is_dot_operand1) {
return Internal("Dot fusion root instruction must have single dot operand");
}
int64_t dot_op_index = is_dot_operand0 ? 0 : 1;
int64_t addend_op_index = 1 - dot_op_index;
const HloInstruction* dot = add->operand(dot_op_index);
DotImplementationStrategy strategy = GetDotImplementationStrategy(
hlo_module_.config(), *dot,
nested_ir_emitter_->target_machine_features());
if (!IsDotCodegenStrategy(strategy)) {
return Internal("Unsupported dot implementation strategy");
}
int64_t dot_lhs_pnum = dot->operand(0)->parameter_number();
int64_t dot_rhs_pnum = dot->operand(1)->parameter_number();
int64_t addend_pnum = add->operand(addend_op_index)->parameter_number();
KernelPrototype kernel_prototype = EmitKernelPrototype(fusion);
llvm::IRBuilder<> b(module_->getContext());
b.SetInsertPoint(kernel_prototype.function->getEntryBlock().getTerminator());
llvm_ir::IrArray lhs_array = kernel_prototype.arguments[dot_lhs_pnum];
llvm_ir::IrArray rhs_array = kernel_prototype.arguments[dot_rhs_pnum];
llvm_ir::IrArray addend_array = kernel_prototype.arguments[addend_pnum];
llvm_ir::IrArray target_array = kernel_prototype.results[0];
TF_RETURN_IF_ERROR(EmitDotOperation(
*dot, target_array, lhs_array, rhs_array, &addend_array,
nullptr, &b, hlo_module_.config(),
nested_ir_emitter_->target_machine_features(),
false));
return kernels_.emplace_back(
KernelInfo{kernel_prototype.function->getName().str(), se::BlockDim(),
se::ThreadDim()});
}
absl::StatusOr<IrEmitter2::KernelInfo>
IrEmitter2::EmitSelectAndScatterHostKernel(const HloInstruction* instr) {
KernelPrototype kernel_prototype = EmitKernelPrototype(instr);
llvm_ir::IrArray operand_array = kernel_prototype.arguments[0];
llvm_ir::IrArray source_array = kernel_prototype.arguments[1];
llvm_ir::IrArray output_array = kernel_prototype.results[0];
TF_RETURN_IF_ERROR(nested_ir_emitter_->HandleSelectAndScatter(
const_cast<HloInstruction*>(instr), operand_array, source_array,
output_array));
return kernels_.emplace_back(
KernelInfo{kernel_prototype.function->getName().str(), se::BlockDim(),
se::ThreadDim()});
}
IrEmitter2::KernelThreadDims IrEmitter2::EmitKernelThreadDims(
llvm::IRBuilder<>& b, llvm::Value* call_frame) {
auto* td_gep = b.CreateStructGEP(call_frame_ty_, call_frame, 0, "tdims_gep");
auto* tdims = b.CreateLoad(b.getPtrTy(), td_gep, "tdims");
auto* x_gep = b.CreateStructGEP(thread_dims_ty_, tdims, 0, "tdim_x_gep");
auto* y_gep = b.CreateStructGEP(thread_dims_ty_, tdims, 1, "tdim_y_gep");
auto* z_gep = b.CreateStructGEP(thread_dims_ty_, tdims, 2, "tdim_z_gep");
return {b.CreateLoad(b.getInt64Ty(), x_gep, "tdim_x"),
b.CreateLoad(b.getInt64Ty(), y_gep, "tdim_y"),
b.CreateLoad(b.getInt64Ty(), z_gep, "tdim_z")};
}
IrEmitter2::KernelThread IrEmitter2::EmitKernelThread(llvm::IRBuilder<>& b,
llvm::Value* call_frame) {
auto* t_gep = b.CreateStructGEP(call_frame_ty_, call_frame, 1, "tid_gep");
auto* tids = b.CreateLoad(b.getPtrTy(), t_gep, "tids");
auto* x_gep = b.CreateStructGEP(thread_ty_, tids, 0, "tid_x_gep");
auto* y_gep = b.CreateStructGEP(thread_ty_, tids, 1, "tid_y_gep");
auto* z_gep = b.CreateStructGEP(thread_ty_, tids, 2, "tid_z_gep");
return {b.CreateLoad(b.getInt64Ty(), x_gep, "tid_x"),
b.CreateLoad(b.getInt64Ty(), y_gep, "tid_y"),
b.CreateLoad(b.getInt64Ty(), z_gep, "tid_z")};
}
llvm_ir::IrArray IrEmitter2::EmitKernelArgument(llvm::IRBuilder<>& b,
llvm::Value* call_frame,
int64_t index,
const Shape& shape) {
llvm::Type* ptr = llvm::PointerType::get(b.getContext(), 0);
std::string name = absl::StrCat("arg", index);
auto* args_gep = b.CreateStructGEP(call_frame_ty_, call_frame, 3, "args_gep");
auto* args = b.CreateLoad(ptr, args_gep, "args");
auto* data_gep = b.CreateConstGEP2_32(arg_ty_, args, index, 0, name + "_gep");
auto* data = b.CreateLoad(ptr, data_gep, name);
llvm_ir::SetAlignmentMetadataForLoad(data, cpu_function_runtime::MinAlign());
return llvm_ir::IrArray(data, llvm_ir::ShapeToIrType(shape, module_), shape);
}
IrEmitter2::KernelPrototype IrEmitter2::EmitKernelPrototype(
std::string_view name, absl::Span<const Shape> arguments,
absl::Span<const Shape> results) {
VLOG(3) << "Emit kernel prototype: " << name
<< ", #arguments=" << arguments.size()
<< ", #results=" << results.size();
for (const Shape& argument : arguments) {
VLOG(3) << " argument: " << argument.ToString(true);
}
for (const Shape& result : results) {
VLOG(3) << " result: " << result.ToString(true);
}
llvm::LLVMContext& ctx = module_->getContext();
llvm::IRBuilder<> b(ctx);
llvm::Function* function = llvm::dyn_cast<llvm::Function>(
module_->getOrInsertFunction(name, KernelFunctionTy(ctx)).getCallee());
function->setCallingConv(llvm::CallingConv::C);
function->setDoesNotThrow();
const DebugOptions& debug_options = hlo_module_.config().debug_options();
function->addFnAttr(
"prefer-vector-width",
absl::StrCat(debug_options.xla_cpu_prefer_vector_width()));
function->addFnAttr("frame-pointer", "all");
b.SetInsertPoint(llvm::BasicBlock::Create(ctx, "", function));
llvm::Value* call_frame = function->getArg(0);
KernelThreadDims kernel_thread_dims = EmitKernelThreadDims(b, call_frame);
KernelThread kernel_thread = EmitKernelThread(b, call_frame);
int64_t idx = 0;
std::vector<llvm_ir::IrArray> ir_arguments;
for (const Shape& argument : arguments) {
ir_arguments.push_back(EmitKernelArgument(b, call_frame, idx++, argument));
}
std::vector<llvm_ir::IrArray> ir_results;
for (const Shape& result : results) {
ir_results.push_back(EmitKernelArgument(b, call_frame, idx++, result));
}
b.CreateRet(
llvm::ConstantPointerNull::get(llvm::PointerType::getUnqual(ctx)));
return KernelPrototype{function, kernel_thread_dims, kernel_thread,
std::move(ir_arguments), std::move(ir_results)};
}
IrEmitter2::KernelPrototype IrEmitter2::EmitKernelPrototype(
const HloInstruction* instr) {
return EmitKernelPrototype(instr->name(), FlattenedParameters(instr),
FlattenedResults(instr));
}
std::optional<IrEmitter2::ParallelConfig> IrEmitter2::GetParallelConfig(
const HloInstruction* instr) {
auto backend_config = instr->backend_config<BackendConfig>();
if (!backend_config.ok() ||
backend_config->outer_dimension_partitions().empty()) {
return std::nullopt;
}
ParallelConfig config;
config.outer_dimension_partitions.assign(
backend_config->outer_dimension_partitions().begin(),
backend_config->outer_dimension_partitions().end());
return config;
}
IrEmitter2::ParallelPartitionBounds IrEmitter2::EmitParallelPartitionBounds(
llvm::IRBuilder<>& b, const KernelPrototype& kernel_prototype,
const ParallelConfig& parallel_config, const Shape& shape,
std::string_view name) {
ShapePartitionIterator it(shape, parallel_config.outer_dimension_partitions);
size_t num_parallel_dimensions =
parallel_config.outer_dimension_partitions.size();
llvm::ArrayType* dim_bounds_ty = llvm::ArrayType::get(b.getInt64Ty(), 2);
llvm::ArrayType* partition_bounds_ty =
llvm::ArrayType::get(dim_bounds_ty, num_parallel_dimensions);
llvm::ArrayType* parallel_bounds_ty =
llvm::ArrayType::get(partition_bounds_ty, it.GetTotalPartitionCount());
std::vector<llvm::Constant*> partition_bounds;
for (int64_t i = 0; i < it.GetTotalPartitionCount(); ++i) {
std::vector<llvm::Constant*> dim_counts;
for (auto [lower, size] : it.GetPartition(i)) {
dim_counts.push_back(llvm::ConstantArray::get(
dim_bounds_ty, {b.getInt64(lower), b.getInt64(lower + size)}));
}
partition_bounds.push_back(
llvm::ConstantArray::get(partition_bounds_ty, dim_counts));
}
llvm::Constant* parallel_bounds =
llvm::Constant | #include "xla/service/cpu/ir_emitter2.h"
#include <memory>
#include <vector>
#include "absl/status/statusor.h"
#include "llvm/IR/LLVMContext.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
using IrEmitter2Test = HloTestBase;
TEST_F(IrEmitter2Test, BuildKernelPrototype) {
auto hlo = std::make_unique<HloModule>("test", HloModuleConfig());
llvm::LLVMContext context;
auto module = std::make_unique<llvm::Module>("test", context);
auto shape = ShapeUtil::MakeShape(PrimitiveType::F32, {4, 2});
std::vector<Shape> parameters = {shape};
std::vector<Shape> results = {shape};
IrEmitter2 ir_emitter(*hlo, module.get(), nullptr);
IrEmitter2::KernelPrototype prototype =
ir_emitter.EmitKernelPrototype("test", parameters, results);
ASSERT_TRUE(*RunFileCheck(llvm_ir::DumpToString(module.get()), R"(
CHECK: define ptr @test(ptr %0) #0 {
CHECK-NEXT: getelementptr inbounds %SE_HOST_KernelCallFrame, {{.*}} i32 0
CHECK: getelementptr inbounds %SE_HOST_KernelThreadDim, {{.*}} i32 0
CHECK: getelementptr inbounds %SE_HOST_KernelThreadDim, {{.*}} i32 1
CHECK: getelementptr inbounds %SE_HOST_KernelThreadDim, {{.*}} i32 2
CHECK: load i64
CHECK: load i64
CHECK: load i64
CHECK-NEXT: getelementptr inbounds %SE_HOST_KernelCallFrame, {{.*}} i32 1
CHECK: getelementptr inbounds %SE_HOST_KernelThread, {{.*}} i32 0
CHECK: getelementptr inbounds %SE_HOST_KernelThread, {{.*}} i32 1
CHECK: getelementptr inbounds %SE_HOST_KernelThread, {{.*}} i32 2
CHECK: load i64
CHECK: load i64
CHECK: load i64
CHECK-NEXT: getelementptr inbounds %SE_HOST_KernelCallFrame, {{.*}} i32 3
CHECK: load ptr
CHECK: getelementptr %SE_HOST_KernelArg, {{.*}} i32 0, i32 0
CHECK: load ptr, {{.*}} !align !0
CHECK-NEXT: getelementptr inbounds %SE_HOST_KernelCallFrame, {{.*}} i32 3
CHECK: load ptr
CHECK: getelementptr %SE_HOST_KernelArg, {{.*}} i32 1, i32 0
CHECK: load ptr, {{.*}} !align !0
CHECK: ret ptr null
CHECK: }
CHECK: !0 = !{i64 16}
)"));
}
TEST_F(IrEmitter2Test, EmitElementalKernel) {
llvm::LLVMContext context;
auto module = std::make_unique<llvm::Module>("test", context);
const char* hlo_text = R"(
HloModule m
ENTRY main {
p0 = f32[2,2] parameter(0)
ROOT convert = s32[2,2] convert(p0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo, ParseAndReturnUnverifiedModule(hlo_text));
HloInstruction* convert = FindInstruction(hlo.get(), "convert");
ASSERT_NE(convert, nullptr);
IrEmitter2 ir_emitter(*hlo, module.get(), nullptr);
TF_ASSERT_OK_AND_ASSIGN(IrEmitter2::KernelInfo kernel,
ir_emitter.EmitElementalHostKernel(convert));
ASSERT_TRUE(*RunFileCheck(llvm_ir::DumpToString(module.get()), R"(
CHECK: define ptr @convert(ptr %0) #0 {
CHECK: fptosi float {{.*}} to i32
CHECK: }
)"));
}
TEST_F(IrEmitter2Test, EmitParallelKernel) {
llvm::LLVMContext context;
auto module = std::make_unique<llvm::Module>("test", context);
const char* hlo_text = R"(
HloModule m
ENTRY main {
p0 = f32[1,2,1,16384,256] parameter(0)
ROOT convert = s32[1,2,1,16384,256] convert(p0),
backend_config={"outer_dimension_partitions":["1","2","1","4"]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo, ParseAndReturnUnverifiedModule(hlo_text));
HloInstruction* convert = FindInstruction(hlo.get(), "convert");
ASSERT_NE(convert, nullptr);
IrEmitter2 ir_emitter(*hlo, module.get(), nullptr);
TF_ASSERT_OK_AND_ASSIGN(IrEmitter2::KernelInfo kernel,
ir_emitter.EmitElementalHostKernel(convert));
ASSERT_TRUE(*RunFileCheck(llvm_ir::DumpToString(module.get()), R"(
CHECK: @convert_parallel_bounds = private constant [8 x [4 x [2 x i64]]]
CHECK: define ptr @convert(ptr %0) #0 {
CHECK: %lo_dim_0_gep = getelementptr{{.*}} i32 0, i64 %tid_x, i32 0, i32 0
CHECK: %up_dim_0_gep = getelementptr{{.*}} i32 0, i64 %tid_x, i32 0, i32 1
CHECK: %lo_dim_1_gep = getelementptr{{.*}} i32 0, i64 %tid_x, i32 1, i32 0
CHECK: %up_dim_1_gep = getelementptr{{.*}} i32 0, i64 %tid_x, i32 1, i32 1
CHECK: %lo_dim_2_gep = getelementptr{{.*}} i32 0, i64 %tid_x, i32 2, i32 0
CHECK: %up_dim_2_gep = getelementptr{{.*}} i32 0, i64 %tid_x, i32 2, i32 1
CHECK: %lo_dim_3_gep = getelementptr{{.*}} i32 0, i64 %tid_x, i32 3, i32 0
CHECK: %up_dim_3_gep = getelementptr{{.*}} i32 0, i64 %tid_x, i32 3, i32 1
CHECK: fptosi float {{.*}} to i32
CHECK: }
)"));
}
}
} |
2,013 | cpp | tensorflow/tensorflow | onednn_layer_norm | third_party/xla/xla/service/cpu/onednn_layer_norm.cc | third_party/xla/xla/service/cpu/tests/onednn_layer_norm_test.cc | #ifndef XLA_SERVICE_CPU_ONEDNN_LAYER_NORM_H_
#define XLA_SERVICE_CPU_ONEDNN_LAYER_NORM_H_
#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
namespace xla {
namespace cpu {
extern "C" {
extern void __xla_cpu_runtime_OneDnnLayerNorm(void* result, void** args);
}
}
}
#endif
#endif
#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include "xla/service/cpu/onednn_layer_norm.h"
#include <algorithm>
#include <cmath>
#include <initializer_list>
#include <vector>
#define EIGEN_USE_THREADS
#include "dnnl.hpp"
#include "absl/base/dynamic_annotations.h"
#include "xla/executable_run_options.h"
#include "xla/service/cpu/backend_config.pb.h"
#include "xla/service/cpu/onednn_memory_util.h"
#include "xla/service/cpu/runtime_lightweight_check.h"
#include "xla/tsl/util/onednn_threadpool.h"
#include "unsupported/Eigen/CXX11/Tensor"
namespace xla {
namespace cpu {
namespace {
using dnnl::engine;
using dnnl::layer_normalization_forward;
using dnnl::memory;
using dnnl::normalization_flags;
using dnnl::prop_kind;
using dnnl::stream;
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_OneDnnLayerNorm(
void* result, void** args) {
int arg_indx = 1;
const xla::ExecutableRunOptions* run_options =
static_cast<const xla::ExecutableRunOptions*>(args[arg_indx++]);
XLA_LIGHTWEIGHT_CHECK(run_options != nullptr);
XLA_LIGHTWEIGHT_CHECK(run_options->intra_op_thread_pool() != nullptr);
tsl::OneDnnThreadPool thread_pool(
run_options->intra_op_thread_pool()->getPool(), false);
engine cpu_engine(engine::kind::cpu, 0);
#ifndef ENABLE_ONEDNN_OPENMP
auto onednn_stream =
stream(dnnl::threadpool_interop::make_stream(cpu_engine, &thread_pool));
#else
auto onednn_stream = stream(cpu_engine);
#endif
std::string config_str(static_cast<const char*>(args[arg_indx++]));
OneDnnNormConfig ln_config;
ln_config.ParseFromString(config_str);
MemrefInfo layer_minfo(args[arg_indx++]);
MemrefInfo gamma_minfo(args[arg_indx++]);
MemrefInfo beta_minfo(args[arg_indx++]);
MemrefInfo result_minfo(result);
auto src_md = layer_minfo.GetOneDnnMemDesc();
auto dst_md = result_minfo.GetOneDnnMemDesc();
auto scaleshift_md = beta_minfo.GetOneDnnMemDesc();
auto src_mem = memory(src_md, cpu_engine, layer_minfo.Data());
auto dst_mem = memory(dst_md, cpu_engine, result_minfo.Data());
auto scale_mem = memory(scaleshift_md, cpu_engine, gamma_minfo.Data());
auto shift_mem = memory(scaleshift_md, cpu_engine, beta_minfo.Data());
float epsilon;
*(reinterpret_cast<int32_t*>(&epsilon)) = ln_config.epsilon_typecast();
auto lnorm_pd = layer_normalization_forward::primitive_desc(
cpu_engine, prop_kind::forward_inference, src_md, dst_md, epsilon,
normalization_flags::use_scale | normalization_flags::use_shift);
auto lnorm_prim = layer_normalization_forward(lnorm_pd);
std::unordered_map<int, memory> ln_args;
ln_args.insert({DNNL_ARG_SRC, src_mem});
ln_args.insert({DNNL_ARG_SCALE, scale_mem});
ln_args.insert({DNNL_ARG_SHIFT, shift_mem});
ln_args.insert({DNNL_ARG_DST, dst_mem});
lnorm_prim.execute(onednn_stream, ln_args);
}
}
}
#endif | #if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include "xla/service/cpu/onednn_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
class LayerNormTest : public HloTestBase {
protected:
const char* onednn_layer_norm_ =
R"(
; CHECK: custom_call_target="__onednn$layernorm",
; CHECK: backend_config={
; CHECK-DAG: "onednn_layer_norm_config":{
; CHECK-DAG: "rescale":"SCALE_AND_SHIFT"
; CHECK-DAG: }
; CHECK: }
)";
std::string common_hlo_region_ =
R"(
region_add {
Arg_0.7555 = f32[] parameter(0)
Arg_1.7556 = f32[] parameter(1)
ROOT add.7557 = f32[] add(Arg_0.7555, Arg_1.7556)
}
)";
std::string common_hlo_entry_computation_block_ =
R"(
Arg_0.2 = f32[768]{0} parameter(1), sharding={replicated}
Arg_0.3 = f32[768]{0} parameter(2), sharding={replicated}
convert.290 = f32[84,197,768]{2,1,0} convert(Arg_0.1)
constant.291 = f32[] constant(0)
convert.292 = f32[] convert(constant.291)
reduce.297 = f32[84,197]{1,0} reduce(convert.290, convert.292), dimensions={2}, to_apply=region_add
constant.298 = s32[] constant(768)
convert.299 = f32[] convert(constant.298)
broadcast.300 = f32[84,197]{1,0} broadcast(convert.299), dimensions={}
divide.301 = f32[84,197]{1,0} divide(reduce.297, broadcast.300)
convert.302 = f32[84,197]{1,0} convert(divide.301)
reshape.303 = f32[84,197,1]{2,1,0} reshape(convert.302)
reshape.304 = f32[84,197]{1,0} reshape(reshape.303)
broadcast.305 = f32[84,197,768]{2,1,0} broadcast(reshape.304), dimensions={0,1}
subtract.306 = f32[84,197,768]{2,1,0} subtract(Arg_0.1, broadcast.305)
multiply.307 = f32[84,197,768]{2,1,0} multiply(subtract.306, subtract.306)
convert.308 = f32[84,197,768]{2,1,0} convert(multiply.307)
constant.309 = f32[] constant(0)
convert.310 = f32[] convert(constant.309)
reduce.315 = f32[84,197]{1,0} reduce(convert.308, convert.310), dimensions={2}, to_apply=region_add
constant.316 = s32[] constant(768)
convert.317 = f32[] convert(constant.316)
broadcast.318 = f32[84,197]{1,0} broadcast(convert.317), dimensions={}
divide.319 = f32[84,197]{1,0} divide(reduce.315, broadcast.318)
convert.320 = f32[84,197]{1,0} convert(divide.319)
reshape.321 = f32[84,197,1]{2,1,0} reshape(convert.320)
constant.322 = f32[] constant(1e-12)
broadcast.323 = f32[84,197,1]{2,1,0} broadcast(constant.322), dimensions={}
add.324 = f32[84,197,1]{2,1,0} add(reshape.321, broadcast.323)
rsqrt.325 = f32[84,197,1]{2,1,0} rsqrt(add.324)
reshape.328 = f32[84,197]{1,0} reshape(rsqrt.325)
broadcast.329 = f32[84,197,768]{2,1,0} broadcast(reshape.328), dimensions={0,1}
broadcast.327 = f32[84,197,768]{2,1,0} broadcast(Arg_0.2), dimensions={2}
multiply.330 = f32[84,197,768]{2,1,0} multiply(broadcast.329, broadcast.327)
multiply.331 = f32[84,197,768]{2,1,0} multiply(Arg_0.1, multiply.330)
broadcast.336 = f32[84,197,768]{2,1,0} broadcast(Arg_0.3), dimensions={2}
reshape.332 = f32[84,197]{1,0} reshape(reshape.303)
broadcast.333 = f32[84,197,768]{2,1,0} broadcast(reshape.332), dimensions={0,1}
multiply.334 = f32[84,197,768]{2,1,0} multiply(multiply.330, broadcast.333)
subtract.337 = f32[84,197,768]{2,1,0} subtract(broadcast.336, multiply.334)
)";
};
TEST_F(LayerNormTest, LayerNormTest0_FP32) {
std::string layer_norm_module_str =
R"(HloModule layer_norm.test, entry_computation_layout={(f32[84,197,768]{2,1,0}, f32[768]{0}, f32[768]{0})->f32[84,197,768]{2,1,0}})" +
common_hlo_region_ + R"(
ENTRY main {
Arg_0.1 = f32[84,197,768]{2,1,0} parameter(0), sharding={replicated}
)" + common_hlo_entry_computation_block_ +
R"(
ROOT add.338 = f32[84,197,768]{2,1,0} add(multiply.331, subtract.337)
}
)";
EXPECT_TRUE(RunAndCompare(layer_norm_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(layer_norm_module_str, onednn_layer_norm_);
}
TEST_F(LayerNormTest, LayerNormTest0_BF16) {
if (!xla::cpu::IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
std::string layer_norm_module_str =
R"(HloModule layer_norm.test, entry_computation_layout={(bf16[84,197,768]{2,1,0}, f32[768]{0}, f32[768]{0})->bf16[84,197,768]{2,1,0}})" +
common_hlo_region_ + R"(
ENTRY main {
Arg_0.1.0 = bf16[84,197,768]{2,1,0} parameter(0), sharding={replicated}
Arg_0.1 = f32[84,197,768]{2,1,0} convert(Arg_0.1.0)
)" + common_hlo_entry_computation_block_ +
R"(
add.338 = f32[84,197,768]{2,1,0} add(multiply.331, subtract.337)
ROOT convert.339 = bf16[84,197,768]{2,1,0} convert(add.338)
}
)";
EXPECT_TRUE(RunAndCompare(layer_norm_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(layer_norm_module_str, onednn_layer_norm_);
}
TEST_F(LayerNormTest, LayerNormTest0_F16) {
if (!xla::cpu::IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
std::string layer_norm_module_str =
R"(HloModule layer_norm.test, entry_computation_layout={(f16[84,197,768]{2,1,0}, f32[768]{0}, f32[768]{0})->f16[84,197,768]{2,1,0}})" +
common_hlo_region_ + R"(
ENTRY main {
Arg_0.1.0 = f16[84,197,768]{2,1,0} parameter(0), sharding={replicated}
Arg_0.1 = f32[84,197,768]{2,1,0} convert(Arg_0.1.0)
)" + common_hlo_entry_computation_block_ +
R"(
add.338 = f32[84,197,768]{2,1,0} add(multiply.331, subtract.337)
ROOT convert.339 = f16[84,197,768]{2,1,0} convert(add.338)
}
)";
EXPECT_TRUE(RunAndCompare(layer_norm_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(layer_norm_module_str, onednn_layer_norm_);
}
TEST_F(LayerNormTest, LayerNormTest1_F16) {
if (!xla::cpu::IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
const char* layer_norm_module_str = R"(
HloModule layer_norm.test
region_add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add_0 = f32[] add(Arg_0, Arg_1)
}
ENTRY main {
Arg_2 = f16[2,4,8] parameter(0), sharding={replicated}
convert_0 = f32[2,4,8] convert(Arg_2)
constant_0 = f32[] constant(0)
convert_1 = f32[] convert(constant_0)
reduce_0 = f32[2,4] reduce(convert_0, convert_1), dimensions={2}, to_apply=region_add
constant_1 = s32[] constant(8)
convert_2 = f32[] convert(constant_1)
broadcast_0 = f32[2,4] broadcast(convert_2), dimensions={}
divide_0 = f32[2,4] divide(reduce_0, broadcast_0)
convert_3 = f16[2,4] convert(divide_0)
reshape_0 = f16[2,4,1] reshape(convert_3)
reshape_1 = f16[2,4] reshape(reshape_0)
broadcast_1 = f16[2,4,8] broadcast(reshape_1), dimensions={0,1}
subtract_0 = f16[2,4,8] subtract(Arg_2, broadcast_1)
multiply_0 = f16[2,4,8] multiply(subtract_0, subtract_0)
convert_4 = f32[2,4,8] convert(multiply_0)
constant_2 = f32[] constant(0)
convert_5 = f32[] convert(constant_2)
reduce_2 = f32[2,4] reduce(convert_4, convert_5), dimensions={2}, to_apply=region_add
constant_3 = s32[] constant(8)
convert_6 = f32[] convert(constant_3)
broadcast_2 = f32[2,4] broadcast(convert_6), dimensions={}
divide_1 = f32[2,4] divide(reduce_2, broadcast_2)
convert_7 = f16[2,4] convert(divide_1)
reshape_2 = f16[2,4,1] reshape(convert_7)
rsqrt_0 = f16[2,4,1] rsqrt(reshape_2)
reshape_3 = f16[2,4] reshape(rsqrt_0)
broadcast_3 = f16[2,4,8] broadcast(reshape_3), dimensions={0,1}
constant_4 = f16[8]{0} constant({1,1,1,1,1,1,1,1})
broadcast_4 = f16[2,4,8] broadcast(constant_4), dimensions={2}
multiply_1 = f16[2,4,8] multiply(broadcast_3, broadcast_4)
multiply_2 = f16[2,4,8] multiply(Arg_2, multiply_1)
constant_5 = f16[8]{0} constant({1,1,1,1,1,1,1,1})
broadcast_5 = f16[2,4,8] broadcast(constant_5), dimensions={2}
reshape_4 = f16[2,4] reshape(reshape_0)
broadcast_6 = f16[2,4,8] broadcast(reshape_4), dimensions={0,1}
multiply_3 = f16[2,4,8] multiply(multiply_1, broadcast_6)
subtract_1 = f16[2,4,8] subtract(broadcast_5, multiply_3)
ROOT add_1 = f16[2,4,8] add(multiply_2, subtract_1)
}
)";
EXPECT_TRUE(RunAndCompare(layer_norm_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(layer_norm_module_str, onednn_layer_norm_);
}
TEST_F(LayerNormTest, LayerNormTest2_F16) {
if (!xla::cpu::IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
const char* layer_norm_module_str = R"(
HloModule layer_norm.test
region_add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add_0 = f32[] add(Arg_0, Arg_1)
}
ENTRY main {
Arg_2= f16[2,4,8] parameter(0), sharding={replicated}
convert_0 = f32[2,4,8] convert(Arg_2)
constant_0 = f32[] constant(0)
convert_1 = f32[] convert(constant_0)
reduce_0 = f32[2,4] reduce(convert_0, convert_1), dimensions={2}, to_apply=region_add
constant_1 = s32[] constant(8)
convert_2 = f32[] convert(constant_1)
broadcast_0 = f32[2,4] broadcast(convert_2), dimensions={}
divide_0 = f32[2,4] divide(reduce_0, broadcast_0)
convert_3 = f16[2,4] convert(divide_0)
reshape_0 = f16[2,4,1] reshape(convert_3)
reshape_1 = f16[2,4] reshape(reshape_0)
broadcast_1 = f16[2,4,8] broadcast(reshape_1), dimensions={0,1}
subtract_0 = f16[2,4,8] subtract(broadcast_1, Arg_2)
multiply_0 = f16[2,4,8] multiply(subtract_0, subtract_0)
convert_4 = f32[2,4,8] convert(multiply_0)
constant_2 = f32[] constant(0)
convert_5 = f32[] convert(constant_2)
reduce_1 = f32[2,4] reduce(convert_4, convert_5), dimensions={2}, to_apply=region_add
constant_3 = s32[] constant(8)
convert_6 = f32[] convert(constant_3)
broadcast_2 = f32[2,4] broadcast(convert_6), dimensions={}
divide_1= f32[2,4] divide(reduce_1, broadcast_2)
convert_7 = f16[2,4] convert(divide_1)
reshape_2 = f16[2,4,1] reshape(convert_7)
rsqrt_0 = f16[2,4,1] rsqrt(reshape_2)
reshape_3 = f16[2,4] reshape(rsqrt_0)
broadcast_3 = f16[2,4,8] broadcast(reshape_3), dimensions={0,1}
constant_4 = f16[8] constant({1,1,1,1,1,1,1,1})
broadcast_4 = f16[2,4,8] broadcast(constant_4), dimensions={2}
multiply_1 = f16[2,4,8] multiply(broadcast3, broadcast_4)
multiply_2 = f16[2,4,8] multiply(multiply_1, Arg_2)
constant_5 = f16[8] constant({1,1,1,1,1,1,1,1})
broadcast_5 = f16[2,4,8] broadcast(constant_5), dimensions={2}
reshape_4 = f16[2,4] reshape(reshape_0)
broadcast_5 = f16[2,4,8] broadcast(reshape_4), dimensions={0,1}
multiply_3 = f16[2,4,8] multiply(multiply_1, broadcast_5)
subtract_1 = f16[2,4,8] subtract(broadcast_5, multiply_3)
ROOT add_1 = f16[2,4,8] add(multiply_2, subtract_1)
}
)";
EXPECT_TRUE(RunAndCompare(layer_norm_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(layer_norm_module_str, onednn_layer_norm_);
}
TEST_F(LayerNormTest, LayerNormTest1_BF16) {
if (!xla::cpu::IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* layer_norm_module_str = R"(
HloModule layer_norm.test
region_add {
Arg_0.7555 = f32[] parameter(0)
Arg_1.7556 = f32[] parameter(1)
ROOT add.7557 = f32[] add(Arg_0.7555, Arg_1.7556)
}
ENTRY main {
Arg_0.1 = bf16[160,197,768] parameter(0), sharding={replicated}
Arg_0.2 = bf16[768] parameter(1), sharding={replicated}
Arg_0.3 = bf16[768] parameter(2), sharding={replicated}
convert.80 = f32[160,197,768] convert(Arg_0.1)
constant.81 = f32[] constant(0)
convert.82 = f32[] convert(constant.81)
reduce.87 = f32[160,197] reduce(convert.80, convert.82), dimensions={2}, to_apply=region_add
constant.88 = s32[] constant(768)
convert.89 = f32[] convert(constant.88)
broadcast.90 = f32[160,197] broadcast(convert.89), dimensions={}
divide.91 = f32[160,197] divide(reduce.87, broadcast.90)
convert.92 = bf16[160,197] convert(divide.91)
reshape.93 = bf16[160,197,1] reshape(convert.92)
reshape.94 = bf16[160,197] reshape(reshape.93)
broadcast.95 = bf16[160,197,768] broadcast(reshape.94), dimensions={0,1}
subtract.96 = bf16[160,197,768] subtract(Arg_0.1, broadcast.95)
multiply.97 = bf16[160,197,768] multiply(subtract.96, subtract.96)
convert.98 = f32[160,197,768] convert(multiply.97)
constant.99 = f32[] constant(0)
convert.100 = f32[] convert(constant.99)
reduce.105 = f32[160,197] reduce(convert.98, convert.100), dimensions={2}, to_apply=region_add
constant.106 = s32[] constant(768)
convert.107 = f32[] convert(constant.106)
broadcast.108 = f32[160,197] broadcast(convert.107), dimensions={}
divide.109 = f32[160,197] divide(reduce.105, broadcast.108)
convert.110 = bf16[160,197] convert(divide.109)
reshape.111 = bf16[160,197,1] reshape(convert.110)
constant.112 = bf16[] constant(1.002e-12)
broadcast.113 = bf16[160,197,1] broadcast(constant.112), dimensions={}
add.114 = bf16[160,197,1] add(reshape.111, broadcast.113)
rsqrt.115 = bf16[160,197,1] rsqrt(add.114)
reshape.118 = bf16[160,197] reshape(rsqrt.115)
broadcast.119 = bf16[160,197,768] broadcast(reshape.118), dimensions={0,1}
broadcast.117 = bf16[160,197,768] broadcast(Arg_0.2), dimensions={2}
multiply.120 = bf16[160,197,768] multiply(broadcast.119, broadcast.117)
multiply.121 = bf16[160,197,768] multiply(Arg_0.1, multiply.120)
broadcast.126 = bf16[160,197,768] broadcast(Arg_0.3), dimensions={2}
reshape.122 = bf16[160,197] reshape(reshape.93)
broadcast.123 = bf16[160,197,768] broadcast(reshape.122), dimensions={0,1}
multiply.124 = bf16[160,197,768] multiply(multiply.120, broadcast.123)
subtract.127 = bf16[160,197,768] subtract(broadcast.126, multiply.124)
ROOT add.128 = bf16[160,197,768] add(multiply.121, subtract.127)
}
)";
EXPECT_TRUE(RunAndCompare(layer_norm_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(layer_norm_module_str, onednn_layer_norm_);
}
}
}
#endif |
2,014 | cpp | tensorflow/tensorflow | ir_emission_utils | third_party/xla/xla/service/gpu/ir_emission_utils.cc | third_party/xla/xla/service/gpu/ir_emission_utils_test.cc | #ifndef XLA_SERVICE_GPU_IR_EMISSION_UTILS_H_
#define XLA_SERVICE_GPU_IR_EMISSION_UTILS_H_
#include <cstdint>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Value.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/literal.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
namespace xla {
namespace gpu {
inline constexpr int64_t kMinDimensionToTransposeTiled = 16;
inline constexpr int64_t kMinDimensionToTransposeTiled2 = 8;
inline constexpr int64_t kMinTotalDimensionsToTransposeTiled = 64 * 128;
bool IsMatrixMultiplication(const HloInstruction& dot);
bool IsMatrixVectorMultiplication(const HloInstruction& dot);
inline constexpr int64_t WarpSize() { return 32; }
inline constexpr absl::string_view kCustomFusionKind = "__custom_fusion";
inline constexpr absl::string_view kTritonFusionKind = "__triton";
inline constexpr absl::string_view kTritonGemmFusionKind = "__triton_gemm";
inline constexpr absl::string_view kCuDnnFusionKind = "__cudnn$fusion";
inline constexpr absl::string_view kUncompilableFusion =
"__uncompilable_fusion";
inline constexpr absl::string_view kTopKCustomCallTarget = "__gpu$TopK";
bool IsCustomCallToCusolver(const HloInstruction& hlo);
bool IsCustomCallToTopK(const HloInstruction& hlo);
extern const char* const kCusolverCholeskyCallTarget;
bool IsSliceWithUnitStrides(const HloInstruction* instr);
bool IsContiguousSlice(const HloInstruction& instr);
bool IsContiguousSlice(const Shape& orig, const Shape& sliced);
llvm::Value* EmitFullWarpShuffleDown(
llvm::Value* value, llvm::Value* offset, llvm::IRBuilder<>* builder,
const se::DeviceDescription& gpu_device_info);
llvm::Value* IsBlock0Thread0(llvm::IRBuilder<>* b);
absl::StatusOr<BufferAllocation::Slice> GetAllocationSlice(
const BufferAssignment& buffer_assignment, const HloInstruction* instr,
const ShapeIndex& index);
absl::StatusOr<bool> CanEmitFusedDynamicUpdateSliceInPlaceForGpu(
const HloFusionInstruction* fusion,
std::function<absl::StatusOr<BufferAllocation::Slice>(
const HloInstruction* instr, const ShapeIndex& index)>
get_allocation_slice,
absl::Span<HloInstructionAdaptor const> roots);
std::vector<const HloInstruction*> GetOutputDefiningDynamicUpdateSlices(
absl::Span<HloInstructionAdaptor const> roots);
HloInstructionAdaptor FindNonTrivialHero(const HloInstructionAdaptor& instr);
const HloInstruction& FindNonTrivialHero(const HloInstruction& instr);
struct TransposeDescription {
const HloInstruction* instr;
Vector3 dimensions;
Vector3 permutation;
TransposeDescription(Vector3 dimensions, Vector3 permutation)
: TransposeDescription(nullptr, dimensions, permutation) {}
TransposeDescription(const HloInstruction* instr, Vector3 dimensions,
Vector3 permutation)
: instr(instr), dimensions(dimensions), permutation(permutation) {}
const Shape& input_shape() const { return instr->operand(0)->shape(); }
bool IsEquivalent(const TransposeDescription& other) const {
return dimensions == other.dimensions && permutation == other.permutation;
}
};
std::optional<TransposeDescription> GetDescriptionForTiledTransposeEmitter(
const HloInstruction& root, const HloInstruction& hero);
bool IsIntermediate(const HloInstruction* instr, int allowed_operand_count = 1);
void VLogModule(int level, const llvm::Module& module);
void VerifyModule(const llvm::Module& module);
llvm::Type* GetIndexTypeForKernel(const HloInstruction* hlo,
int64_t launch_size, llvm::IRBuilder<>* b);
bool IsAMDGPU(const llvm::Module* module);
bool IsSPIR(const llvm::Module* module);
class DenseDataIntermediate {
public:
static DenseDataIntermediate Own(std::vector<uint8_t> owned) {
DenseDataIntermediate di;
di.data_ = std::move(owned);
return di;
}
static DenseDataIntermediate Alias(absl::Span<const uint8_t> aliased) {
DenseDataIntermediate di;
di.data_ = aliased;
return di;
}
absl::Span<const uint8_t> span() const {
return data_.index() == 0 ? absl::Span<const uint8_t>(std::get<0>(data_))
: std::get<1>(data_);
}
private:
std::variant<std::vector<uint8_t>, absl::Span<const uint8_t>> data_;
};
absl::StatusOr<DenseDataIntermediate> LiteralToXlaFormat(
const Literal& literal);
}
}
#endif
#include "xla/service/gpu/ir_emission_utils.h"
#include <cstdint>
#include <functional>
#include <optional>
#include <queue>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/FPEnv.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsNVPTX.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
#include "llvm/IR/Verifier.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/TargetParser/Triple.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/primitive_util.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/target_util.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/llvm_ir/buffer_assignment_util.h"
#include "xla/service/llvm_ir/llvm_type_conversion_util.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/translate/mhlo_to_hlo/location_exporter.h"
#include "xla/translate/mhlo_to_hlo/type_to_shape.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
bool IsRank2(const Shape& shape, int64_t batch_dimensions_size) {
return shape.rank() == batch_dimensions_size + 2;
}
bool IsRank1(const Shape& shape, int64_t batch_dimensions_size) {
return shape.rank() == batch_dimensions_size + 1;
}
}
bool IsMatrixMultiplication(const HloInstruction& dot) {
if (dot.opcode() != HloOpcode::kDot) {
return false;
}
const Shape& lhs_shape = dot.operand(0)->shape();
const Shape& rhs_shape = dot.operand(1)->shape();
const DotDimensionNumbers& dim_numbers = dot.dot_dimension_numbers();
PrimitiveType output_primitive_type = dot.shape().element_type();
bool type_is_allowed =
(output_primitive_type == F8E4M3FN || output_primitive_type == F8E5M2 ||
output_primitive_type == F8E4M3FNUZ ||
output_primitive_type == F8E5M2FNUZ || output_primitive_type == F16 ||
output_primitive_type == BF16 || output_primitive_type == F32 ||
output_primitive_type == F64 || output_primitive_type == C64 ||
output_primitive_type == C128) ||
(output_primitive_type == S32 && lhs_shape.element_type() == S8 &&
rhs_shape.element_type() == S8);
bool shapes_are_valid =
type_is_allowed &&
IsRank2(lhs_shape, dim_numbers.lhs_batch_dimensions_size()) &&
IsRank2(rhs_shape, dim_numbers.lhs_batch_dimensions_size()) &&
IsRank2(dot.shape(), dim_numbers.lhs_batch_dimensions_size()) &&
!ShapeUtil::IsZeroElementArray(lhs_shape) &&
!ShapeUtil::IsZeroElementArray(rhs_shape);
return shapes_are_valid;
}
bool IsMatrixVectorMultiplication(const HloInstruction& dot) {
if (dot.opcode() != HloOpcode::kDot) {
return false;
}
const Shape& lhs_shape = dot.operand(0)->shape();
const Shape& rhs_shape = dot.operand(1)->shape();
const DotDimensionNumbers& dim_numbers = dot.dot_dimension_numbers();
PrimitiveType output_primitive_type = dot.shape().element_type();
bool type_is_allowed =
(output_primitive_type == F8E4M3FN || output_primitive_type == F8E5M2 ||
output_primitive_type == F16 || output_primitive_type == BF16 ||
output_primitive_type == F32 || output_primitive_type == F64 ||
output_primitive_type == C64 || output_primitive_type == C128) ||
(output_primitive_type == S32 && lhs_shape.element_type() == S8 &&
rhs_shape.element_type() == S8);
bool shapes_are_valid =
type_is_allowed &&
((IsRank2(lhs_shape, dim_numbers.lhs_batch_dimensions_size()) &&
IsRank1(rhs_shape, dim_numbers.lhs_batch_dimensions_size())) ||
(IsRank1(lhs_shape, dim_numbers.lhs_batch_dimensions_size()) &&
IsRank2(rhs_shape, dim_numbers.lhs_batch_dimensions_size()))) &&
IsRank1(dot.shape(), dim_numbers.lhs_batch_dimensions_size()) &&
!ShapeUtil::IsZeroElementArray(lhs_shape) &&
!ShapeUtil::IsZeroElementArray(rhs_shape);
return shapes_are_valid;
}
const char* const kCusolverCholeskyCallTarget = "__cusolver$cholesky";
bool IsCustomCallToCusolver(const HloInstruction& hlo) {
if (hlo.opcode() != HloOpcode::kCustomCall) {
return false;
}
return hlo.custom_call_target() == kCusolverCholeskyCallTarget;
}
bool IsCustomCallToTopK(const HloInstruction& hlo) {
return hlo.opcode() == HloOpcode::kCustomCall &&
hlo.custom_call_target() == kTopKCustomCallTarget;
}
bool IsSliceWithUnitStrides(const HloInstruction* instr) {
auto slice = DynCast<HloSliceInstruction>(instr);
return slice && absl::c_all_of(slice->slice_strides(),
[](int64_t stride) { return stride == 1; });
}
bool IsContiguousSlice(const HloInstruction& instr) {
auto slice = DynCast<HloSliceInstruction>(&instr);
if (!slice) return false;
const Shape& src_shape = slice->operand(0)->shape();
const Shape& dst_shape = slice->shape();
return IsContiguousSlice(src_shape, dst_shape);
}
bool IsContiguousSlice(const Shape& orig, const Shape& sliced) {
bool sliced_dim_found = false;
for (auto dim : orig.layout().minor_to_major()) {
if (!sliced_dim_found) {
sliced_dim_found = sliced.dimensions(dim) < orig.dimensions(dim);
continue;
}
if (sliced.dimensions(dim) != 1) return false;
}
return true;
}
llvm::Value* EmitAMDGPUShflDown(llvm::Value* value, llvm::Value* offset,
llvm::IRBuilder<>* b) {
llvm::Module* module = b->GetInsertBlock()->getModule();
CHECK_EQ(value->getType()->getPrimitiveSizeInBits(), 32);
auto* i32_ty = b->getInt32Ty();
llvm::FunctionCallee shfl_fn = module->getOrInsertFunction(
llvm_ir::AsStringRef("__ockl_readuplane_i32"),
llvm::FunctionType::get(i32_ty, {i32_ty, i32_ty},
false));
llvm::Value* result =
b->CreateCall(shfl_fn, {b->CreateBitCast(value, i32_ty), offset});
return b->CreateBitCast(result, value->getType());
}
llvm::Value* EmitAMDGPUShflDownSwizzle(llvm::Value* value, llvm::Value* offset,
llvm::IRBuilder<>* b) {
llvm::Module* module = b->GetInsertBlock()->getModule();
CHECK_EQ(value->getType()->getPrimitiveSizeInBits(), 32);
auto* i32_ty = b->getInt32Ty();
llvm::Function* intrinsic = llvm::cast<llvm::Function>(
module
->getOrInsertFunction(
"llvm.amdgcn.ds.swizzle",
llvm::FunctionType::get(i32_ty, {i32_ty, i32_ty},
false))
.getCallee());
llvm::Value* bitcast_value = b->CreateBitCast(value, i32_ty);
llvm::Value* control_value =
b->CreateAdd(b->CreateMul(offset, b->getInt32(0x20)), b->getInt32(0x1f));
llvm::Value* result =
b->CreateCall(intrinsic, {bitcast_value, control_value});
return b->CreateBitCast(result, value->getType());
}
llvm::Value* EmitNVPTXShflDown(llvm::Value* value, llvm::Value* offset,
llvm::IRBuilder<>* b) {
llvm::Module* module = b->GetInsertBlock()->getModule();
llvm::Intrinsic::ID llvm_intrinsic_id;
CHECK_EQ(value->getType()->getPrimitiveSizeInBits(), 32);
if (value->getType()->isFloatTy()) {
llvm_intrinsic_id = llvm::Intrinsic::nvvm_shfl_sync_down_f32;
} else {
llvm_intrinsic_id = llvm::Intrinsic::nvvm_shfl_sync_down_i32;
}
llvm::Function* intrinsic =
llvm::Intrinsic::getDeclaration(module, llvm_intrinsic_id, {});
return b->CreateCall(
intrinsic, {b->getInt32(-1), value, offset, b->getInt32(WarpSize() - 1)});
}
llvm::Value* EmitSPIRShflDown(llvm::Value* value, llvm::Value* offset,
llvm::IRBuilder<>* b) {
CHECK_EQ(value->getType()->getPrimitiveSizeInBits(), 32);
if (value->getType()->isFloatTy()) {
return EmitDeviceFunctionCall(
"_Z34__spirv_GroupNonUniformShuffleDownffj",
{b->getInt32(3), value, offset}, {U32, F32, U32}, F32,
llvm::AttrBuilder(b->getContext())
.addAttribute(llvm::Attribute::NoUnwind)
.addAttribute(llvm::Attribute::Convergent),
b);
} else {
return EmitDeviceFunctionCall(
"_Z34__spirv_GroupNonUniformShuffleDownjjj",
{b->getInt32(3), value, offset}, {U32, U32, U32}, U32,
llvm::AttrBuilder(b->getContext())
.addAttribute(llvm::Attribute::NoUnwind)
.addAttribute(llvm::Attribute::Convergent),
b);
}
}
llvm::Value* EmitFullWarpShuffleDown(
llvm::Value* value, llvm::Value* offset, llvm::IRBuilder<>* builder,
const se::DeviceDescription& gpu_device_info) {
int bit_width = value->getType()->getPrimitiveSizeInBits();
llvm::Module* module = builder->GetInsertBlock()->getModule();
llvm::Triple target_triple = llvm::Triple(module->getTargetTriple());
if (value->getType()->isFloatTy() && bit_width == 32) {
if (target_triple.isNVPTX()) {
return EmitNVPTXShflDown(value, offset, builder);
} else if (target_triple.getArch() == llvm::Triple::amdgcn) {
if (gpu_device_info.rocm_compute_capability().gfx9_mi100_or_later()) {
return EmitAMDGPUShflDownSwizzle(value, offset, builder);
}
return EmitAMDGPUShflDown(value, offset, builder);
} else if (target_triple.isSPIR()) {
return EmitSPIRShflDown(value, offset, builder);
} else {
LOG(FATAL) << "Invalid triple " << target_triple.str();
}
}
int num_segments = CeilOfRatio(bit_width, 32);
llvm::Value* x = builder->CreateBitCast(
builder->CreateZExt(
builder->CreateBitCast(value, builder->getIntNTy(bit_width)),
builder->getIntNTy(32 * num_segments)),
llvm::VectorType::get(builder->getInt32Ty(), num_segments, false));
for (int i = 0; i < num_segments; ++i) {
llvm::Value* insert_val;
if (target_triple.isNVPTX()) {
insert_val = EmitNVPTXShflDown(builder->CreateExtractElement(x, i),
offset, builder);
} else if (target_triple.getArch() == llvm::Triple::amdgcn) {
if (gpu_device_info.rocm_compute_capability().gfx9_mi100_or_later()) {
insert_val = EmitAMDGPUShflDownSwizzle(
builder->CreateExtractElement(x, i), offset, builder);
} else {
insert_val = EmitAMDGPUShflDown(builder->CreateExtractElement(x, i),
offset, builder);
}
} else if (target_triple.isSPIR()) {
insert_val = EmitSPIRShflDown(builder->CreateExtractElement(x, i), offset,
builder);
} else {
LOG(FATAL) << "Invalid triple " << target_triple.str();
}
x = builder->CreateInsertElement(x, insert_val, i);
}
return builder->CreateBitCast(
builder->CreateTrunc(
builder->CreateBitCast(x, builder->getIntNTy(32 * num_segments)),
builder->getIntNTy(bit_width)),
value->getType());
}
llvm::Value* IsBlock0Thread0(llvm::IRBuilder<>* b) {
llvm::Value* is_thread0 = b->CreateICmpEQ(
b->getInt32(0),
EmitCallToTargetIntrinsic(TargetIntrinsicID::kThreadIdx, {}, {}, b));
llvm::Value* is_block0 = b->CreateICmpEQ(
b->getInt32(0),
EmitCallToTargetIntrinsic(TargetIntrinsicID::kBlockIdx, {}, {}, b));
return b->CreateAnd(is_thread0, is_block0);
}
absl::StatusOr<BufferAllocation::Slice> GetAllocationSlice(
const BufferAssignment& buffer_assignment, const HloInstruction* instr,
const ShapeIndex& index) {
return buffer_assignment.GetUniqueSlice(instr, index);
}
std::vector<const HloInstruction*> GetOutputDefiningDynamicUpdateSlices(
absl::Span<HloInstructionAdaptor const> roots) {
std::vector<const HloInstruction*> dus_ops;
for (HloInstructionAdaptor root : roots) {
while (root.opcode() == HloOpcode::kBitcast) {
root = root.GetOperand(0);
}
if (root.opcode() == HloOpcode::kDynamicUpdateSlice) {
dus_ops.push_back(&root.instruction());
}
}
return dus_ops;
}
template <typename T>
absl::InlinedVector<const HloInstruction*, 4> GetStartIndices(T instr) {
absl::InlinedVector<const HloInstruction*, 4> result;
for (int i = instr->first_index_operand_number(); i < instr->operand_count();
i++) {
const HloInstruction* index = instr->operand(i);
result.push_back(index);
}
return result;
}
absl::StatusOr<bool> CanEmitFusedDynamicUpdateSliceInPlaceForGpu(
const HloFusionInstruction* fusion,
std::function<absl::StatusOr<BufferAllocation::Slice>(
const HloInstruction* instr, const ShapeIndex& index)>
get_allocation_slice,
absl::Span<HloInstructionAdaptor const> roots) {
std::vector<const HloInstruction*> dus_instrs =
GetOutputDefiningDynamicUpdateSlices(roots);
std::vector<BufferAllocation::Slice> output_buffers;
TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(
fusion->shape(), [&](const Shape& shape, const ShapeIndex index) {
if (shape.IsArray()) {
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice buffer,
get_allocation_slice(fusion, index));
output_buffers.push_back(buffer);
}
return absl::OkStatus();
}));
if (dus_instrs.size() != output_buffers.size()) {
return false;
}
if (output_buffers.empty()) {
return Internal("Output buffers should not be empty");
}
Shape update_shape = dus_instrs[0]->operand(1)->shape();
for (int i = 0; i < dus_instrs.size(); ++i) {
auto* dus = Cast<HloDynamicUpdateSliceInstruction>(dus_instrs[i]);
if (!dus->IsRoot() && dus->user_count() != 1) return false;
HloInstruction* dus_user = dus->IsRoot() ? nullptr : dus->users().front();
if (dus_user && dus_user->opcode() == HloOpcode::kBitcast) {
if (!dus_user->IsRoot() && dus_user->user_count() != 1) return false;
dus_user = dus_user->IsRoot() ? nullptr : dus_user->users().front();
}
if (dus_user && dus_user->opcode() == HloOpcode::kTuple) {
if (!dus_user->IsRoot()) return false;
dus_user = nullptr;
}
if (dus_user != nullptr) return false;
const HloInstruction* operand = dus->operand(0);
if (operand->opcode() == HloOpcode::kBitcast) {
operand = operand->operand(0);
}
auto* parameter = DynCast<HloParameterInstruction>(operand);
if (!parameter) return false;
std::queue<const HloInstruction*> q;
absl::flat_hash_set<const HloInstruction*> visited;
q.push(parameter);
visited.insert(parameter);
visited.insert(dus);
while (!q.empty()) {
const HloInstruction* instr = q.front();
q.pop();
for (const HloInstruction* user : instr->users()) {
if (user->opcode() == HloOpcode::kDynamicSlice &&
dus->operand(0) == user->operand(0) &&
update_shape == user->shape()) {
absl::InlinedVector<const HloInstruction*, 4> user_start_indices =
GetStartIndices(Cast<HloDynamicSliceInstruction>(user));
absl::InlinedVector<const HloInstruction*, 4> dus_start_indices =
GetStartIndices(dus);
if (ShapeUtil::ElementsIn(update_shape) != 1 &&
user_start_indices != dus_start_indices) {
return false;
}
} else if (user != dus && !user->IsElementwise() &&
user->opcode() != HloOpcode::kBitcast &&
user->opcode() != HloOpcode::kTuple) {
return false;
}
if (visited.insert(user).second) {
q.push(user);
}
}
}
if (dus->update()->shape() != update_shape) {
return false;
}
const HloInstruction* lhs = fusion->operand(parameter->parameter_number());
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice lhs_buffer,
get_allocation_slice(lhs, {}));
BufferAllocation::Slice rhs_buffer = output_buffers[i];
if (lhs_buffer != rhs_buffer) {
return false;
}
}
return true;
}
static std::optional<TransposeDescription> FindTiledTranspose(
const HloInstruction& instr) {
if (instr.opcode() != HloOpcode::kCopy) {
return std::nullopt;
}
if (std::optional<Vector3> tr = ShapeUtil::GetNormalizedTransposeShape(
instr.operand(0)->shape(), instr.shape(), Vector3{0, 2, 1})) {
if ((tr->at(1) >= kMinDimensionToTransposeTiled &&
tr->at(2) >= kMinDimensionToTransposeTiled) ||
(tr->at(1) >= kMinDimensionToTransposeTiled2 &&
tr->at(2) >= kMinDimensionToTransposeTiled2 &&
tr->at(1) * tr->at(2) >= kMinTotalDimensionsToTransposeTiled)) {
return TransposeDescription{&instr, *tr,
Vector3{0, 2, 1}};
}
}
if (std::optional<Vector3> tr = ShapeUtil::GetNormalizedTransposeShape(
instr.operand(0)->shape(), instr.shape(), Vector3{2, 1, 0})) {
if ((tr->at(0) >= kMinDimensionToTransposeTiled &&
tr->at(2) >= kMinDimensionToTransposeTiled) ||
(tr->at(0) >= kMinDimensionToTransposeTiled2 &&
tr->at(2) >= kMinDimensionToTransposeTiled2 &&
tr->at(0) * tr->at(2) >= kMinTotalDimensionsToTransposeTiled)) {
return TransposeDescription{&instr, *tr,
Vector3{2, 1, 0}};
}
}
return std::nullopt;
}
static std::optional<TransposeDescription> FindTiledLogicalTranspose(
const HloInstruction& instr) {
if (instr.opcode() != HloOpcode::kTranspose) {
return std::nullopt;
}
if (std::optional<Vector3> tr = ShapeUtil::GetNormalizedLogicalTransposeShape(
instr.operand(0)->shape(), instr.shape(), instr.dimensions(),
Vector3{0, 2, 1})) {
if ((tr->at(1) >= kMinDimensionToTransposeTiled &&
tr->at(2) >= kMinDimensionToTransposeTiled) || | #include "xla/service/gpu/ir_emission_utils.h"
#include <cstdint>
#include <memory>
#include <vector>
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
using ::tsl::testing::IsOkAndHolds;
class IrEmissionUtilsTest : public HloTestBase {};
TEST_F(IrEmissionUtilsTest, FindTiledLogicalTranspose) {
const char* hlo = R"(
HloModule module
ENTRY entry {
p = f32[32,48,64]{2,1,0} parameter(0)
ROOT t = f32[64,32,48]{2,1,0} transpose(p), dimensions={2,0,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* tr = module->entry_computation()->root_instruction();
auto result = GetDescriptionForTiledTransposeEmitter(*tr, *tr);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result->instr, tr);
EXPECT_EQ(result->dimensions, Vector3({1, 64, 1536}));
EXPECT_EQ(result->permutation, Vector3({0, 2, 1}));
}
TEST_F(IrEmissionUtilsTest, FindAnyTiledTranspose) {
const char* hlo = R"(
HloModule module
ENTRY entry {
p = f32[32,48,64]{2,1,0} parameter(0)
ROOT t = f32[64,48,32]{2,1,0} transpose(p), dimensions={2,1,0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* r = module->entry_computation()->root_instruction();
auto result = GetDescriptionForTiledTransposeEmitter(*r, *r);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result->instr, r);
EXPECT_EQ(result->dimensions, Vector3({64, 48, 32}));
EXPECT_EQ(result->permutation, Vector3({2, 1, 0}));
}
TEST_F(IrEmissionUtilsTest, FindAnyTiledTransposeWithIntermediateUnaryOp) {
const char* hlo = R"(
HloModule module
ENTRY entry {
p = f32[32,48,64]{2,1,0} parameter(0)
t = f32[64,48,32]{2,1,0} transpose(p), dimensions={2,1,0}
ROOT n = f32[64,48,32]{2,1,0} negate(t)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* r = module->entry_computation()->root_instruction();
auto result = GetDescriptionForTiledTransposeEmitter(*r, *r->operand(0));
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result->instr, r->operand(0));
EXPECT_EQ(result->dimensions, Vector3({64, 48, 32}));
EXPECT_EQ(result->permutation, Vector3({2, 1, 0}));
}
TEST_F(IrEmissionUtilsTest, FindAnyTiledTransposeWithIntermediateUnaryOpS8) {
const char* hlo = R"(
HloModule module
fusion {
p = f32[32,48,64]{2,1,0} parameter(0)
t = f32[64,48,32]{2,1,0} transpose(p), dimensions={2,1,0}
ROOT c = s8[64,48,32]{2,1,0} convert(t)
}
ENTRY main {
p0 = f32[32,48,64]{2,1,0} parameter(0)
ROOT f = s8[64,48,32]{2,1,0} fusion(p0), kind=kInput, calls=fusion
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* r =
module->entry_computation()->root_instruction()->fused_expression_root();
EXPECT_FALSE(
GetDescriptionForTiledTransposeEmitter(*r, *r->operand(0)).has_value());
EXPECT_EQ(FindNonTrivialHero(*r).name(), "t");
}
TEST_F(IrEmissionUtilsTest, FindReduceHeroEpilogueFusion) {
const char* hlo = R"(
HloModule module
%add {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
%fused_computation (param_0.4: f32[128,64], param_1.4: bf16[]) -> bf16[64] {
%param_0 = f32[128,64]{1,0} parameter(0)
%param_1 = bf16[] parameter(1)
%convert.0 = f32[] convert(bf16[] %param_1)
%reduce.0 = f32[64]{0} reduce(f32[128,64]{1,0} %param_0, f32[] %convert.0), dimensions={0}, to_apply=%add
ROOT %convert.1 = bf16[64]{0} convert(f32[64]{0} %reduce.0)
}
ENTRY %main {
%param_0 = f32[128,64]{1,0} parameter(0)
%param_1 = bf16[] parameter(1)
ROOT fusion = bf16[64]{0} fusion(%param_0, %param_1), kind=kInput, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* r = module->entry_computation()->root_instruction();
auto fusion = HloFusionAdaptor::ForInstruction(r);
const auto& result = FindNonTrivialHero(fusion->GetRoots()[0]);
EXPECT_EQ(result.name(), "reduce.0");
}
TEST_F(IrEmissionUtilsTest, FindReduceHeroEpilogueFusionTwoRootUsers) {
const char* hlo = R"(
HloModule module
Add {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
fused_computation {
param_0 = f32[4,2]{1,0} parameter(0)
neg = f32[4,2]{1,0} negate(param_0)
constant_0 = f32[] constant(0)
reduce.1 = f32[4]{0} reduce(param_0, constant_0), dimensions={1}, to_apply=Add
bitcast.1 = f32[1,1,4]{2,1,0} bitcast(reduce.1)
sign.1 = f32[1,1,4]{2,1,0} sign(bitcast.1)
ROOT tuple.12 = (f32[4,2]{1,0}, f32[1,1,4]{2,1,0}, f32[1,1,4]{2,1,0}) tuple(neg, bitcast.1, sign.1)
}
ENTRY main.7749 {
Arg_2.1 = f32[4,2]{1,0} parameter(0)
ROOT fusion = (f32[4,2]{1,0}, f32[1,1,4]{2,1,0}, f32[1,1,4]{2,1,0}) fusion(Arg_2.1), kind=kInput, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* r = module->entry_computation()->root_instruction();
auto fusion = HloFusionAdaptor::ForInstruction(r);
const auto& result = FindNonTrivialHero(fusion->GetRoots()[1]);
EXPECT_EQ(result.name(), "reduce.1");
const auto& result2 = FindNonTrivialHero(fusion->GetRoots()[2]);
EXPECT_EQ(result2.name(), "reduce.1");
}
TEST_F(IrEmissionUtilsTest, FindReduceHeroEpilogueFusionHeroAlsoUsedAsNonHero) {
const char* hlo = R"(
HloModule module
Add {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
fused_computation {
p0 = f32[4]{0} parameter(0)
zero = f32[] constant(0.0)
reduce.0 = f32[] reduce(f32[4]{0} p0, f32[] zero), dimensions={0}, to_apply=Add
broadcast = f32[4]{0} broadcast(f32[] reduce.0), dimensions={}
reduce.1 = f32[] reduce(f32[4]{0} broadcast, f32[] zero), dimensions={0}, to_apply=Add
bitcast = f32[1]{0} bitcast(f32[] reduce.0)
ROOT tuple.1 = (f32[], f32[4]{0}, f32[1]{0}) tuple(f32[] reduce.1, f32[4]{0} broadcast, f32[1]{0} bitcast)
}
ENTRY main {
Arg0 = f32[4]{0} parameter(0)
ROOT fusion = (f32[], f32[4]{0}, f32[1]{0}) fusion(Arg0), kind=kInput, calls=fused_computation
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* r = module->entry_computation()->root_instruction();
auto fusion = HloFusionAdaptor::ForInstruction(r);
const auto& result = FindNonTrivialHero(fusion->GetRoots()[1]);
EXPECT_EQ(result.name(), "broadcast");
const auto& result2 = FindNonTrivialHero(fusion->GetRoots()[2]);
EXPECT_EQ(result2.name(), "reduce.0");
}
TEST_F(IrEmissionUtilsTest, FindAnyTiledTransposeWithIntermediateBinaryOp) {
const char* hlo = R"(
HloModule module
ENTRY entry {
p = f32[32,48,64]{2,1,0} parameter(0)
p2 = f32[64,48,32]{2,1,0} parameter(1)
t = f32[64,48,32]{2,1,0} transpose(p), dimensions={2,1,0}
ROOT add = f32[64,48,32]{2,1,0} add(t, p2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* r = module->entry_computation()->root_instruction();
auto result = GetDescriptionForTiledTransposeEmitter(*r, *r->operand(0));
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result->instr, r->operand(0));
EXPECT_EQ(result->dimensions, Vector3({64, 48, 32}));
EXPECT_EQ(result->permutation, Vector3({2, 1, 0}));
}
TEST_F(IrEmissionUtilsTest, FindAnyTiledTransposeWithTwoIntermediateBinaryOps) {
const char* hlo = R"(
HloModule module
fusion {
p = f32[32,48,64]{2,1,0} parameter(0)
p2 = f32[64,48,32]{2,1,0} parameter(1)
t = f32[64,48,32]{2,1,0} transpose(p), dimensions={2,1,0}
mul = f32[64,48,32]{2,1,0} multiply(t, p2)
ROOT add = f32[64,48,32]{2,1,0} add(mul, p2)
}
ENTRY main {
param0 = f32[32,48,64]{2,1,0} parameter(0)
param1 = f32[64,48,32]{2,1,0} parameter(1)
ROOT fusion = f32[64,48,32]{2,1,0} fusion(param0, param1), kind=kInput, calls=fusion
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* r =
module->entry_computation()->root_instruction()->fused_expression_root();
auto result =
GetDescriptionForTiledTransposeEmitter(*r, FindNonTrivialHero(*r));
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result->instr, r->operand(0)->operand(0));
EXPECT_EQ(result->dimensions, Vector3({64, 48, 32}));
EXPECT_EQ(result->permutation, Vector3({2, 1, 0}));
}
TEST_F(IrEmissionUtilsTest,
FindAnyTiledTransposeWithIntermediateBinaryOpTwoTransposes) {
const char* hlo = R"(
HloModule module
fusion {
p = f32[32,48,64]{2,1,0} parameter(0)
p2 = f32[48,32,64]{2,1,0} parameter(1)
t = f32[64,48,32]{2,1,0} transpose(p), dimensions={2,1,0}
t2 = f32[64,48,32]{2,1,0} transpose(p2), dimensions={2,0,1}
ROOT add = f32[64,48,32]{2,1,0} add(t, t2)
}
ENTRY main {
param0 = f32[32,48,64]{2,1,0} parameter(0)
param1 = f32[48,32,64]{2,1,0} parameter(1)
ROOT fusion = f32[64,48,32]{2,1,0} fusion(param0, param1), kind=kInput, calls=fusion
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* r =
module->entry_computation()->root_instruction()->fused_expression_root();
EXPECT_FALSE(
GetDescriptionForTiledTransposeEmitter(*r, FindNonTrivialHero(*r))
.has_value());
EXPECT_EQ(&FindNonTrivialHero(*r), r);
}
TEST_F(IrEmissionUtilsTest, FindNonTrivialHeroOutsideFusion) {
const char* hlo = R"(
HloModule module
f {
p0 = f32[100,200,300]{2,1,0} parameter(0)
ROOT add = f32[100,200,300]{2,1,0} add(p0, p0)
}
ENTRY entry {
p0 = f32[300,200,100]{2,1,0} parameter(0)
t = f32[100,200,300]{2,1,0} transpose(p0), dimensions={2,1,0}
fusion = f32[100,200,300]{2,1,0} fusion(t), kind=kLoop, calls=f
ROOT add = f32[100,200,300]{2,1,0} add(t, fusion)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* transpose =
module->entry_computation()->GetInstructionWithName("t");
HloInstruction* fusion =
module->entry_computation()->GetInstructionWithName("fusion");
auto fusion_adaptor =
HloFusionAdaptor::ForProducerConsumer(transpose, fusion);
HloInstructionAdaptor r(
*module->GetComputationWithName("f")->root_instruction(),
fusion_adaptor.get());
EXPECT_EQ(&FindNonTrivialHero(r).instruction(), transpose);
}
TEST_F(IrEmissionUtilsTest, FindNonTrivialTransposeHeroInsideFusion) {
const char* hlo = R"(
HloModule module
f {
p0 = f32[300,200,100]{2,1,0} parameter(0)
t = f32[100,200,300]{2,1,0} transpose(p0), dimensions={2,1,0}
ROOT add = f32[100,200,300]{2,1,0} add(t, t)
}
ENTRY entry {
p0 = f32[300,200,100]{2,1,0} parameter(0)
p1 = f32[100,200,300]{2,1,0} parameter(1)
fusion = f32[100,200,300]{2,1,0} fusion(p0), kind=kLoop, calls=f
ROOT add = f32[100,200,300]{2,1,0} add(p1, fusion)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* r = module->entry_computation()->root_instruction();
HloInstruction* transpose = module->GetComputationWithName("f")
->parameter_instruction(0)
->users()
.front();
HloInstruction* fusion =
module->entry_computation()->GetInstructionWithName("fusion");
auto fusion_adaptor = HloFusionAdaptor::ForProducerConsumer(fusion, r);
EXPECT_EQ(&FindNonTrivialHero(HloInstructionAdaptor(*r, fusion_adaptor.get()))
.instruction(),
transpose);
}
TEST_F(IrEmissionUtilsTest, FindNonTrivialCopyHeroInsideFusion) {
const char* hlo = R"(
HloModule module
f {
p0 = f32[100,200,300]{2,1,0} parameter(0)
t = f32[100,200,300]{0,1,2} copy(p0)
ROOT add = f32[100,200,300]{0,1,2} add(t, t)
}
ENTRY entry {
p0 = f32[100,200,300]{2,1,0} parameter(0)
p1 = f32[100,200,300]{0,1,2} parameter(1)
fusion = f32[100,200,300]{0,1,2} fusion(p0), kind=kLoop, calls=f
ROOT add = f32[100,200,300]{0,1,2} add(p1, fusion)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* r = module->entry_computation()->root_instruction();
HloInstruction* copy = module->GetComputationWithName("f")
->parameter_instruction(0)
->users()
.front();
HloInstruction* fusion =
module->entry_computation()->GetInstructionWithName("fusion");
auto fusion_adaptor = HloFusionAdaptor::ForProducerConsumer(fusion, r);
EXPECT_EQ(&FindNonTrivialHero(HloInstructionAdaptor(*r, fusion_adaptor.get()))
.instruction(),
copy);
}
TEST_F(IrEmissionUtilsTest, TransposeReachableViaTrivialAndNontrivialOps) {
const char* hlo = R"(
HloModule module
fusion {
p = f64[16,16]{1,0} parameter(0)
trans = f64[16,16]{1,0} transpose(p), dimensions={1,0}
rev = f64[16,16]{1,0} reverse(trans), dimensions={0,1}
sub = f64[16,16]{1,0} subtract(trans, trans)
ROOT add = f64[16,16]{1,0} add(rev, sub)
}
ENTRY main {
param = f64[16,16]{1,0} parameter(0)
ROOT fusion = f64[16,16]{1,0} fusion(param), kind=kLoop, calls=fusion
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* r =
module->entry_computation()->root_instruction()->fused_expression_root();
EXPECT_FALSE(
GetDescriptionForTiledTransposeEmitter(*r, FindNonTrivialHero(*r))
.has_value());
EXPECT_EQ(&FindNonTrivialHero(*r), r);
}
TEST_F(IrEmissionUtilsTest, FindTiledTransposeOneSwapDimIsSmall) {
const char* hlo = R"(
HloModule module
fusion {
p = f32[100,11,12,8]{3,2,1,0} parameter(0)
ROOT c = f32[100,11,12,8]{1,0,2,3} copy(p)
}
ENTRY main {
param = f32[100,11,12,8]{3,2,1,0} parameter(0)
ROOT fusion = f32[100,11,12,8]{1,0,2,3} fusion(param), kind=kInput, calls=fusion
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* copy =
module->entry_computation()->root_instruction()->fused_expression_root();
auto result =
GetDescriptionForTiledTransposeEmitter(*copy, FindNonTrivialHero(*copy));
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result->instr, copy);
EXPECT_EQ(result->dimensions, Vector3({8, 12, 1100}));
EXPECT_EQ(result->permutation, Vector3({2, 1, 0}));
}
TEST_F(IrEmissionUtilsTest, FindTiledLogicalTransposeOneSwapDimIsSmall) {
const char* hlo = R"(
HloModule module
fusion {
p = f32[100,11,12,8]{3,2,1,0} parameter(0)
ROOT t = f32[8,12,100,11]{3,2,1,0} transpose(p), dimensions={3,2,0,1}
}
ENTRY main {
param = f32[100,11,12,8]{3,2,1,0} parameter(0)
ROOT fusion = f32[8,12,100,11]{3,2,1,0} fusion(param), kind=kInput, calls=fusion
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* tr =
module->entry_computation()->root_instruction()->fused_expression_root();
auto result =
GetDescriptionForTiledTransposeEmitter(*tr, FindNonTrivialHero(*tr));
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result->instr, tr);
EXPECT_EQ(result->dimensions, Vector3({8, 12, 1100}));
EXPECT_EQ(result->permutation, Vector3({2, 1, 0}));
}
TEST_F(IrEmissionUtilsTest, FindTiledTransposeOtherSwapDimIsSmall) {
const char* hlo = R"(
HloModule module
fusion {
p = f32[8,12,100,11]{3,2,1,0} parameter(0)
ROOT c = f32[8,12,100,11]{0,1,3,2} copy(p)
}
ENTRY main {
param = f32[8,12,100,11]{3,2,1,0} parameter(0)
ROOT fusion = f32[8,12,100,11]{0,1,3,2} fusion(param), kind=kInput, calls=fusion
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* copy =
module->entry_computation()->root_instruction()->fused_expression_root();
auto result =
GetDescriptionForTiledTransposeEmitter(*copy, FindNonTrivialHero(*copy));
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result->instr, copy);
EXPECT_EQ(result->dimensions, Vector3({1100, 12, 8}));
EXPECT_EQ(result->permutation, Vector3({2, 1, 0}));
}
TEST_F(IrEmissionUtilsTest, FindTiledLogicalTransposeOtherSwapDimIsSmall) {
const char* hlo = R"(
HloModule module
fusion {
p = f32[8,12,100,11]{3,2,1,0} parameter(0)
ROOT t = f32[100,11,12,8]{3,2,1,0} transpose(p), dimensions={2,3,1,0}
}
ENTRY main {
param = f32[8,12,100,11]{3,2,1,0} parameter(0)
ROOT fusion = f32[100,11,12,8]{3,2,1,0} fusion(param), kind=kInput, calls=fusion
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* tr =
module->entry_computation()->root_instruction()->fused_expression_root();
auto result =
GetDescriptionForTiledTransposeEmitter(*tr, FindNonTrivialHero(*tr));
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result->instr, tr);
EXPECT_EQ(result->dimensions, Vector3({1100, 12, 8}));
EXPECT_EQ(result->permutation, Vector3({2, 1, 0}));
}
TEST_F(IrEmissionUtilsTest, IsContiguousSlice) {
const char* hlo = R"(
HloModule module
ENTRY entry {
p = f32[8,12,100,11]{3,2,1,0} parameter(0)
slice.1 = f32[2,12,100,11]{3,2,1,0} slice(p), slice={[1:3], [0:12], [0:100], [0:11]}
slice.2 = f32[1,1,1,11]{3,2,1,0} slice(p), slice={[1:2], [0:1], [0:1], [0:11]}
slice.3 = f32[1,1,10,11]{3,2,1,0} slice(p), slice={[1:2], [0:1], [0:10], [0:11]}
slice.4 = f32[1,2,10,11]{3,2,1,0} slice(p), slice={[1:2], [0:2], [0:10], [0:11]}
slice.5 = f32[8,2,100,11]{3,2,1,0} slice(p), slice={[0:8], [10:12], [0:100], [0:11]}
c = f32[8,12,100,11]{0,1,3,2} copy(p)
slice.6 = f32[8,12,40,11]{0,1,3,2} slice(c), slice={[0:8], [0:12], [10:50], [0:11]}
slice.7 = f32[8,12,1,2]{0,1,3,2} slice(c), slice={[0:8], [0:12], [0:1], [0:2]}
slice.8 = f32[8,2,100,11]{0,1,3,2} slice(c), slice={[0:8], [0:2], [0:100], [0:11]}
slice.9 = f32[8,2,40,11]{0,1,3,2} slice(c), slice={[0:8], [10:12], [10:50], [0:11]}
slice.10 = f32[8,2,50,11]{3,2,1,0} slice(p), slice={[0:8:1], [10:12:1], [0:100:2], [0:11:1]}
ROOT t = (f32[2,12,100,11]{3,2,1,0},
f32[1,1,1,11]{3,2,1,0},
f32[1,1,10,11]{3,2,1,0},
f32[1,2,10,11]{3,2,1,0},
f32[8,2,100,11]{3,2,1,0},
f32[8,12,40,11]{0,1,3,2},
f32[8,12,1,2]{0,1,3,2},
f32[8,2,100,11]{0,1,3,2},
f32[8,2,40,11]{0,1,3,2},
f32[8,2,50,11]{3,2,1,0}) tuple(slice.1, slice.2, slice.3, slice.4, slice.5, slice.6, slice.7, slice.8, slice.9, slice.10)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* slice1 =
module->entry_computation()->GetInstructionWithName("slice.1");
HloInstruction* slice2 =
module->entry_computation()->GetInstructionWithName("slice.2");
HloInstruction* slice3 =
module->entry_computation()->GetInstructionWithName("slice.3");
HloInstruction* slice4 =
module->entry_computation()->GetInstructionWithName("slice.4");
HloInstruction* slice5 =
module->entry_computation()->GetInstructionWithName("slice.5");
HloInstruction* slice6 =
module->entry_computation()->GetInstructionWithName("slice.6");
HloInstruction* slice7 =
module->entry_computation()->GetInstructionWithName("slice.7");
HloInstruction* slice8 =
module->entry_computation()->GetInstructionWithName("slice.8");
HloInstruction* slice9 =
module->entry_computation()->GetInstructionWithName("slice.9");
HloInstruction* slice10 =
module->entry_computation()->GetInstructionWithName("slice.10");
EXPECT_TRUE(IsContiguousSlice(*slice1));
EXPECT_TRUE(IsContiguousSlice(*slice2));
EXPECT_TRUE(IsContiguousSlice(*slice3));
EXPECT_TRUE(!IsContiguousSlice(*slice4));
EXPECT_TRUE(!IsContiguousSlice(*slice5));
EXPECT_TRUE(IsContiguousSlice(*slice6));
EXPECT_TRUE(IsContiguousSlice(*slice7));
EXPECT_TRUE(!IsContiguousSlice(*slice8));
EXPECT_TRUE(!IsContiguousSlice(*slice9));
EXPECT_TRUE(!IsContiguousSlice(*slice10));
}
TEST_F(IrEmissionUtilsTest, LiteralToAttrToXlaFormat) {
{
Literal literal = LiteralUtil::CreateR2<int16_t>({{0, 1, 2}, {3, 4, 5}});
TF_ASSERT_OK_AND_ASSIGN(DenseDataIntermediate data,
LiteralToXlaFormat(literal));
EXPECT_EQ(data.span().size(), literal.size_bytes());
EXPECT_EQ(reinterpret_cast<const char*>(data.span().data()),
literal.untyped_data());
}
{
Literal literal = LiteralUtil::CreateR2<s4>(
{{s4(0), s4(1), s4(2)}, {s4(3), s4(4), s4(5)}});
TF_ASSERT_OK_AND_ASSIGN(DenseDataIntermediate data,
LiteralToXlaFormat(literal));
EXPECT_EQ(data.span(), std::vector<uint8_t>({0x01, 0x23, 0x45}));
EXPECT_NE(reinterpret_cast<const void*>(data.span().data()),
literal.untyped_data());
}
{
Literal literal = LiteralUtil::CreateR2<u4>(
{{u4(0), u4(1), u4(2)}, {u4(3), u4(4), u4(5)}, {u4(6), u4(7), u4(8)}});
TF_ASSERT_OK_AND_ASSIGN(DenseDataIntermediate data,
LiteralToXlaFormat(literal));
EXPECT_EQ(data.span(),
std::vector<uint8_t>({0x01, 0x23, 0x45, 0x67, 0x80}));
EXPECT_NE(reinterpret_cast<const void*>(data.span().data()),
literal.untyped_data());
}
}
TEST_F(IrEmissionUtilsTest,
CanEmitFusedDynamicUpdateSliceInPlaceForGpu_HandlesBitcasts) {
const char* hlo = R"(
HloModule fusion, is_scheduled=true
fused_computation {
param_0.1 = s32[6]{0} parameter(0)
bitcast = s32[2,3]{1,0} bitcast(param_0.1)
zero = s32[] constant(0)
param_1.1 = s32[] parameter(1)
dynamic-slice = s32[1,1]{1,0} dynamic-slice(bitcast, param_1.1, zero), dynamic_slice_sizes={1,1}
one = s32[] constant(1)
bitcasted_one = s32[1,1]{1,0} bitcast(one)
add = s32[1,1] add(dynamic-slice, bitcasted_one)
dynamic-update-slice = s32[2,3]{1,0} dynamic-update-slice(bitcast, add, param_1.1, zero)
ROOT bitcast.1 = s32[6]{0} bitcast(dynamic-update-slice)
}
ENTRY main {
param_0 = s32[6]{0} parameter(0)
param_1 = s32[] parameter(1)
ROOT fusion = s32[6]{0} fusion(param_0, param_1), kind=kInput, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
auto fusion = module->entry_computation()->root_instruction();
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice slice0(&alloc, 0, 10);
EXPECT_THAT(CanEmitFusedDynamicUpdateSliceInPlaceForGpu(
Cast<HloFusionInstruction>(fusion),
[&slice0](const HloInstruction*, const ShapeIndex&) {
return slice0;
},
HloFusionAdaptor::ForInstruction(fusion)->GetRoots()),
IsOkAndHolds(true));
}
TEST_F(
IrEmissionUtilsTest,
CanEmitFusedDynamicUpdateSliceInPlaceForGpu_ElementwiseOnPathToParameter) {
const char* hlo = R"(
HloModule fusion, is_scheduled=true
fused_computation {
param_0.1 = s32[2,3]{1,0} parameter(0)
bitcast = s32[2,3]{1,0} negate(param_0.1)
zero = s32[] constant(0)
param_1.1 = s32[] parameter(1)
dynamic-slice = s32[1,1]{1,0} dynamic-slice(bitcast, param_1.1, zero), dynamic_slice_sizes={1,1}
one = s32[] constant(1)
bitcasted_one = s32[1,1]{1,0} bitcast(one)
add = s32[1,1] add(dynamic-slice, bitcasted_one)
dynamic-update-slice = s32[2,3]{1,0} dynamic-update-slice(bitcast, add, param_1.1, zero)
ROOT bitcast.1 = s32[6]{0} bitcast(dynamic-update-slice)
}
ENTRY main {
param_0 = s32[2,3]{1,0} parameter(0)
param_1 = s32[] parameter(1)
ROOT fusion = s32[6]{0} fusion(param_0, param_1), kind=kInput, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
auto fusion = module->entry_computation()->root_instruction();
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice slice0(&alloc, 0, 10);
EXPECT_THAT(CanEmitFusedDynamicUpdateSliceInPlaceForGpu(
Cast<HloFusionInstruction>(fusion),
[&slice0](const HloInstruction*, const ShapeIndex&) {
return slice0;
},
HloFusionAdaptor::ForInstruction(fusion)->GetRoots()),
IsOkAndHolds(false));
}
TEST_F(IrEmissionUtilsTest,
CanEmitFusedDynamicUpdateSliceInPlaceForGpu_SlicesDifferent) {
const char* hlo = R"(
HloModule fusion, is_scheduled=true
fused_computation {
param_0.1 = s32[6]{0} parameter(0)
bitcast = s32[2,3]{1,0} bitcast(param_0.1)
zero = s32[] constant(0)
param_1.1 = s32[] parameter(1)
dynamic-slice = s32[1,1]{1,0} dynamic-slice(bitcast, param_1.1, zero), dynamic_slice_sizes={1,1}
one = s32[] constant(1)
bitcasted_one = s32[1,1]{1,0} bitcast(one)
add = s32[1,1] add(dynamic-slice, bitcasted_one)
dynamic-update-slice = s32[2,3]{1,0} dynamic-update-slice(bitcast, add, param_1.1, zero)
ROOT bitcast.1 = s32[6]{0} bitcast(dynamic-update-slice)
}
ENTRY main {
param_0 = s32[6]{0} parameter(0)
param_1 = s32[] parameter(1)
ROOT fusion = s32[6]{0} fusion(param_0, param_1), kind=kInput, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
auto fusion = module->entry_computation()->root_instruction();
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice slice0(&alloc, 0, 10);
BufferAllocation::Slice slice1(&alloc, 10, 20);
EXPECT_THAT(CanEmitFusedDynamicUpdateSliceInPlaceForGpu(
Cast<HloFusionInstruction>(fusion),
[fusion, &slice0, &slice1](const HloInstruction* instr,
const ShapeIndex&) {
if (instr == fusion) {
return slice0;
}
return slice1;
},
HloFusionAdaptor::ForInstruction(fusion)->GetRoots()),
IsOkAndHolds(false));
}
TEST_F(
IrEmissionUtilsTest,
CanEmitFusedDynamicUpdateSliceInPlaceForGpu_DynamicUpdateSliceWithDifferentDynamicSliceAccess) {
const char* hlo = R"(
HloModule fusion, input_output_alias={ {}: (0, {}) }
fused_computation {
param_0.1 = s32[6]{0} parameter(0)
bitcast = s32[2,3]{1,0} bitcast(param_0.1)
zero = s32[] constant(0)
one = s32[] constant(1)
param_1.1 = s32[] parameter(1)
dynamic-slice = s32[2,2]{1,0} dynamic-slice(bitcast, param_1.1, one), dynamic_slice_sizes={2,2}
broadcasted_one = s32[2,2]{1,0} broadcast(one), dimensions={}
add = s32[2,2] add(dynamic-slice, broadcasted_one)
dynamic-update-slice = s32[2,3]{1,0} dynamic-update-slice(bitcast, add, param_1.1, zero)
ROOT bitcast.1 = s32[6]{0} bitcast(dynamic-update-slice)
}
ENTRY main {
param_0 = s32[6]{0} parameter(0)
param_1 = s32[] parameter(1)
ROOT fusion = s32[6]{0} fusion(param_0, param_1), kind=kInput, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
auto fusion = module->entry_computation()->root_instruction();
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice slice0(&alloc, 0, 10);
EXPECT_THAT(CanEmitFusedDynamicUpdateSliceInPlaceForGpu(
Cast<HloFusionInstruction>(fusion),
[&slice0](const HloInstruction*, const ShapeIndex&) {
return slice0;
},
HloFusionAdaptor::ForInstruction(fusion)->GetRoots()),
IsOkAndHolds(false));
}
TEST_F(IrEmissionUtilsTest,
CanEmitFusedDynamicUpdateSliceInPlaceForGpu_HandlesMultiOutputFusion) {
const char* hlo = R"(
HloModule MultipleInplaceDus, is_scheduled=true, input_output_alias={ {0}: (0, {}), {1}: (2, {}) }
fused_computation {
p0 = bf16[10,11,12] parameter(0)
p1 = bf16[1,11,12] parameter(1)
p2 = bf16[8,11,12] parameter(2)
p3 = bf16[1,11,12] parameter(3)
p4 = s32[] parameter(4)
c0 = s32[] constant(0)
cmp = pred[] compare(p4, c0), direction=EQ
broadcast = pred[1,11,12] broadcast(cmp), dimensions={}
select = bf16[1,11,12] select(broadcast, p1, p3)
dus0 = bf16[10,11,12] dynamic-update-slice(p0, select, c0, c0, c0)
dus1 = bf16[8,11,12] dynamic-update-slice(p2, select, c0, c0, c0)
ROOT tuple = (bf16[10,11,12], bf16[8,11,12]) tuple(dus0, dus1)
}
ENTRY main {
p0 = bf16[10,11,12] parameter(0)
p1 = bf16[1,11,12] parameter(1)
p2 = bf16[8,11,12] parameter(2)
p3 = bf16[1,11,12] parameter(3)
p4 = s32[] parameter(4)
ROOT fusion_root_multiple = (bf16[10,11,12], bf16[8,11,12]) fusion(p0, p1, p2, p3, p4), kind=kLoop, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
auto fusion = module->entry_computation()->root_instruction();
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice slice0(&alloc, 0, 10);
EXPECT_THAT(CanEmitFusedDynamicUpdateSliceInPlaceForGpu(
Cast<HloFusionInstruction>(fusion),
[&slice0](const HloInstruction*, const ShapeIndex&) {
return slice0;
},
HloFusionAdaptor::ForInstruction(fusion)->GetRoots()),
IsOkAndHolds(true));
}
TEST_F(
IrEmissionUtilsTest,
CanEmitFusedDynamicUpdateSliceInPlaceForGpu_HandlesMultiOutputFusionWithTransposeBitcasts) {
const char* hlo = R"(
HloModule MultipleInplaceDusWithTransposeBitcastToTheRoot, is_scheduled=true, input_output_alias={ {0}: (0, {}), {1}: (2, {}) }
fused_computation {
p0 = bf16[10,11,12] parameter(0)
p1 = bf16[1,11,12] parameter(1)
p2 = bf16[8,11,12] parameter(2)
p3 = bf16[1,11,12] parameter(3)
p4 = s32[] parameter(4)
c0 = s32[] constant(0)
cmp = pred[] compare(p4, c0), direction=EQ
broadcast = pred[1,11,12] broadcast(cmp), dimensions={}
select = bf16[1,11,12] select(broadcast, p1, p3)
dus0 = bf16[10,11,12] dynamic-update-slice(p0, select, c0, c0, c0)
bitcasted_dus0 = bf16[11,10,12] bitcast(dus0)
dus1 = bf16[8,11,12] dynamic-update-slice(p2, select, c0, c0, c0)
ROOT tuple = (bf16[11,10,12], bf16[8,11,12]) tuple(bitcasted_dus0, dus1)
}
ENTRY main {
p0 = bf16[10,11,12] parameter(0)
p1 = bf16[1,11,12] parameter(1)
p2 = bf16[8,11,12] parameter(2)
p3 = bf16[1,11,12] parameter(3)
p4 = s32[] parameter(4)
ROOT fusi |
2,015 | cpp | tensorflow/tensorflow | onednn_softmax | third_party/xla/xla/service/cpu/onednn_softmax.cc | third_party/xla/xla/service/cpu/tests/onednn_softmax_test.cc | #ifndef XLA_SERVICE_CPU_ONEDNN_SOFTMAX_H_
#define XLA_SERVICE_CPU_ONEDNN_SOFTMAX_H_
#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
namespace xla {
namespace cpu {
extern "C" {
extern void __xla_cpu_runtime_OneDnnSoftmax(const void* run_options_ptr,
void* input, void* result,
void* softmax_config_ptr);
}
}
}
#endif
#endif
#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include "xla/service/cpu/onednn_softmax.h"
#include <algorithm>
#include <cmath>
#include <initializer_list>
#include <vector>
#include "dnnl.hpp"
#include "absl/base/dynamic_annotations.h"
#include "xla/executable_run_options.h"
#include "xla/service/cpu/backend_config.pb.h"
#include "xla/service/cpu/onednn_memory_util.h"
#include "xla/service/cpu/runtime_lightweight_check.h"
#include "xla/tsl/util/onednn_threadpool.h"
#include "unsupported/Eigen/CXX11/Tensor"
namespace xla {
namespace cpu {
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_OneDnnSoftmax(
const void* run_options_ptr, void* input, void* result,
void* softmax_config_ptr) {
const xla::ExecutableRunOptions* run_options =
static_cast<const xla::ExecutableRunOptions*>(run_options_ptr);
XLA_LIGHTWEIGHT_CHECK(run_options != nullptr);
XLA_LIGHTWEIGHT_CHECK(run_options->intra_op_thread_pool() != nullptr);
tsl::OneDnnThreadPool thread_pool(
run_options->intra_op_thread_pool()->getPool(), false);
dnnl::engine cpu_engine(dnnl::engine::kind::cpu, 0);
#ifndef ENABLE_ONEDNN_OPENMP
auto onednn_stream = dnnl::stream(
dnnl::threadpool_interop::make_stream(cpu_engine, &thread_pool));
#else
auto onednn_stream = dnnl::stream(cpu_engine);
#endif
std::string config_str(static_cast<const char*>(softmax_config_ptr));
OneDnnSoftmaxConfig softmax_config;
softmax_config.ParseFromString(config_str);
MemrefInfo input_minfo(input);
MemrefInfo result_minfo(result);
auto src_md = input_minfo.GetOneDnnMemDesc();
auto dst_md = result_minfo.GetOneDnnMemDesc();
auto src_mem = dnnl::memory(src_md, cpu_engine, input_minfo.Data());
auto dst_mem = dnnl::memory(dst_md, cpu_engine, result_minfo.Data());
int axis = softmax_config.softmax_axis();
auto softmax_pd = dnnl::softmax_forward::primitive_desc(
cpu_engine, dnnl::prop_kind::forward_inference,
dnnl::algorithm::softmax_accurate, src_md, dst_md, axis);
auto softmax_prim = dnnl::softmax_forward(softmax_pd);
std::unordered_map<int, dnnl::memory> softmax_args;
softmax_args.insert({DNNL_ARG_SRC, src_mem});
softmax_args.insert({DNNL_ARG_DST, dst_mem});
softmax_prim.execute(onednn_stream, softmax_args);
}
}
}
#endif | #if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include <utility>
#include "absl/strings/str_replace.h"
#include "absl/strings/substitute.h"
#include "xla/literal.h"
#include "xla/service/cpu/backend_config.pb.h"
#include "xla/service/cpu/onednn_ops_rewriter.h"
#include "xla/service/cpu/onednn_util.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_macros.h"
namespace xla {
namespace cpu {
std::string TestParamsToString(
const ::testing::TestParamInfo<std::tuple<PrimitiveType, int>>& data) {
PrimitiveType data_type;
int batch_size;
std::tie(data_type, batch_size) = data.param;
return absl::StrCat(primitive_util::LowercasePrimitiveTypeName(data_type),
"_BatchSize", std::to_string(batch_size));
}
class OneDnnSoftmaxTest
: public HloTestBase,
public ::testing::WithParamInterface<std::tuple<PrimitiveType, int>> {
protected:
const char* onednn_softmax_ =
R"(
; CHECK: custom_call_target="__onednn$softmax"
)";
void TestSoftmax(std::string input_hlo_string, int expected_softmax_axis) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(input_hlo_string));
OneDnnOpsRewriter softmax_rewrite_pass;
HloInstruction* onednn_softmax;
OneDnnSoftmaxConfig softmax_config;
TF_ASSERT_OK_AND_ASSIGN(
bool changed, this->RunHloPass(&softmax_rewrite_pass, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(::xla::match::CustomCall(&onednn_softmax,
{"__onednn$softmax"})));
auto backend_config = onednn_softmax->backend_config<BackendConfig>();
softmax_config.CopyFrom(backend_config->onednn_softmax_config());
int axis_after_rewrite = softmax_config.softmax_axis();
EXPECT_EQ(expected_softmax_axis, axis_after_rewrite);
}
};
TEST_P(OneDnnSoftmaxTest, SoftmaxGenericTest) {
PrimitiveType data_type;
int batch_size;
std::tie(data_type, batch_size) = GetParam();
if (!IsSupportedType(data_type)) {
GTEST_SKIP() << "CPU does not support "
<< primitive_util::LowercasePrimitiveTypeName(data_type);
}
const std::string softmax_hlo_template_string = R"(
HloModule softmax_module
region_max {
Arg_0 = $0[] parameter(0)
Arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(Arg_0, Arg_1)
}
region_add {
Arg_0 = $0[] parameter(0)
Arg_1 = $0[] parameter(1)
ROOT add = $0[] add(Arg_0, Arg_1)
}
ENTRY main {
Arg_0 = $0[$1,128,30522]{2,1,0} parameter(0)
neg_inf = $0[] constant(-inf)
reduce_max = $0[$1,128]{1,0} reduce(Arg_0, neg_inf), dimensions={2}, to_apply=region_max
reshape.0 = $0[$1,128,1]{2,1,0} reshape(reduce_max)
broadcast.0 = $0[$1,128,1]{2,1,0} broadcast(reshape.0), dimensions={0,1,2}
reshape.1 = $0[$1,128]{1,0} reshape(broadcast.0)
broadcast.1 = $0[$1,128,30522]{2,1,0} broadcast(reshape.1), dimensions={0,1}
subtract.0 = $0[$1,128,30522]{2,1,0} subtract(Arg_0, broadcast.1)
exponential = $0[$1,128,30522]{2,1,0} exponential(subtract.0)
const_zero = $0[] constant(0)
reduce_add = $0[$1,128]{1,0} reduce(exponential, const_zero), dimensions={2}, to_apply=region_add
reshape.2 = $0[$1,128,1]{2,1,0} reshape(reduce_add)
broadcast.2 = $0[$1,128,1]{2,1,0} broadcast(reshape.2), dimensions={0,1,2}
reshape.3 = $0[$1,128]{1,0} reshape(broadcast.2)
broadcast.3 = $0[$1,128,30522]{2,1,0} broadcast(reshape.3), dimensions={0,1}
ROOT divide = $0[$1,128,30522]{2,1,0} divide(exponential, broadcast.3)
}
)";
const std::string softmax_hlo_string = absl::Substitute(
softmax_hlo_template_string,
primitive_util::LowercasePrimitiveTypeName(data_type), batch_size);
TestSoftmax(softmax_hlo_string, 2);
}
INSTANTIATE_TEST_SUITE_P(OneDnnSoftmaxTestSuite, OneDnnSoftmaxTest,
::testing::Combine(::testing::ValuesIn({F32, BF16,
F16}),
::testing::Values(1, 16)),
TestParamsToString);
TEST_F(OneDnnSoftmaxTest, SoftmaxFP32OnAxisZero) {
const std::string softmax_hlo_string = R"(
HloModule softmax_module
region_max {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT maximum = f32[] maximum(Arg_0, Arg_1)
}
region_add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0, Arg_1)
}
ENTRY main {
Arg_0 = f32[3,1,1]{2,1,0} parameter(0)
neg_inf = f32[] constant(-inf)
reduce_max = f32[1,1]{1,0} reduce(Arg_0, neg_inf), dimensions={0}, to_apply=region_max
neg_inf.1 = f32[1,1]{1,0} constant({ {-inf} })
maximum = f32[1,1]{1,0} maximum(reduce_max, neg_inf.1)
reshape.0 = f32[1,1,1]{2,1,0} reshape(maximum)
broadcast.0 = f32[1,1,1]{2,1,0} broadcast(reshape.0), dimensions={0,1,2}
reshape.1 = f32[1,1]{1,0} reshape(broadcast.0)
broadcast.1 = f32[3,1,1]{2,1,0} broadcast(reshape.1), dimensions={1,2}
subtract = f32[3,1,1]{2,1,0} subtract(Arg_0, broadcast.1)
exponential = f32[3,1,1]{2,1,0} exponential(subtract)
const_zero = f32[] constant(0)
reduce_add = f32[1,1]{1,0} reduce(exponential, const_zero), dimensions={0}, to_apply=region_add
reshape.2 = f32[1,1,1]{2,1,0} reshape(reduce_add)
broadcast.2 = f32[1,1,1]{2,1,0} broadcast(reshape.2), dimensions={0,1,2}
reshape.3 = f32[1,1]{1,0} reshape(broadcast.2)
broadcast.3 = f32[3,1,1]{2,1,0} broadcast(reshape.3), dimensions={1,2}
ROOT divide = f32[3,1,1]{2,1,0} divide(exponential, broadcast.3)
}
)";
TestSoftmax(softmax_hlo_string, 0);
}
TEST_F(OneDnnSoftmaxTest, SoftmaxWithBF16ConvertOutputFP32Pattern) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const std::string softmax_hlo_string = R"(
HloModule softmax_module
region_max {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT maximum = f32[] maximum(Arg_0, Arg_1)
}
region_add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0, Arg_1)
}
ENTRY main {
Arg_0 = f32[16,128,30522]{2,1,0} parameter(0)
neg_inf = f32[] constant(-inf)
reduce_max = f32[16,128]{1,0} reduce(Arg_0, neg_inf), dimensions={2}, to_apply=region_max
reshape.0 = f32[16,128,1]{2,1,0} reshape(reduce_max)
broadcast.0 = f32[16,128,1]{2,1,0} broadcast(reshape.0), dimensions={0,1,2}
reshape.1 = f32[16,128]{1,0} reshape(broadcast.0)
broadcast.1 = f32[16,128,30522]{2,1,0} broadcast(reshape.1), dimensions={0,1}
subtract = f32[16,128,30522]{2,1,0} subtract(Arg_0, broadcast.1)
exponential = f32[16,128,30522]{2,1,0} exponential(subtract)
const_zero = f32[] constant(0)
reduce_add = f32[16,128]{1,0} reduce(exponential, const_zero), dimensions={2}, to_apply=region_add
reshape.2 = f32[16,128,1]{2,1,0} reshape(reduce_add)
broadcast.2 = f32[16,128,1]{2,1,0} broadcast(reshape.2), dimensions={0,1,2}
reshape.3 = f32[16,128]{1,0} reshape(broadcast.2)
broadcast.3 = f32[16,128,30522]{2,1,0} broadcast(reshape.3), dimensions={0,1}
divide = f32[16,128,30522]{2,1,0} divide(exponential, broadcast.3)
ROOT convert = bf16[16,128,30522]{2,1,0} convert(divide)
}
)";
TestSoftmax(softmax_hlo_string, 2);
}
}
}
#endif |
2,016 | cpp | tensorflow/tensorflow | parallel_task_assignment | third_party/xla/xla/service/cpu/parallel_task_assignment.cc | third_party/xla/xla/service/cpu/parallel_task_assignment_test.cc | #ifndef XLA_SERVICE_CPU_PARALLEL_TASK_ASSIGNMENT_H_
#define XLA_SERVICE_CPU_PARALLEL_TASK_ASSIGNMENT_H_
#include <cstdint>
#include <memory>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/cpu/target_machine_features.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/util.h"
namespace xla {
namespace cpu {
class ParallelCostModel {
public:
virtual ~ParallelCostModel() = default;
virtual int64_t GetParallelTaskCount(HloInstruction* instruction) = 0;
};
class ParallelTaskAssignment {
public:
ParallelTaskAssignment(int64_t max_parallelism,
const HloCostAnalysis::ShapeSizeFunction& shape_size,
HloModule* module,
const TargetMachineFeatures* target_machine_features);
~ParallelTaskAssignment() {}
int64_t GetTargetParallelTaskCount(HloInstruction* instruction);
private:
std::unique_ptr<ParallelCostModel> cost_model_;
const TargetMachineFeatures& target_machine_features_;
};
class ParallelTaskAssigner : public HloModulePass {
public:
ParallelTaskAssigner(const int64_t max_parallelism,
const HloCostAnalysis::ShapeSizeFunction& shape_size,
const TargetMachineFeatures* target_machine_features)
: max_parallelism_(max_parallelism),
shape_size_function_(shape_size),
target_machine_features_(*target_machine_features) {}
~ParallelTaskAssigner() override {}
absl::string_view name() const override {
return "cpu-parallel-task-assigner";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
using HloToParallelTasks =
absl::flat_hash_map<const HloInstruction*, int64_t>;
bool AssignParallelTasks(HloModule* module,
const HloToParallelTasks& hlo_to_parallel_tasks);
bool AssignParallelTasksHelper(
HloModule* module, HloComputation* computation,
const HloToParallelTasks& hlo_to_parallel_tasks);
void ComputeTargetParallelTasks(HloModule* module,
HloToParallelTasks* hlo_to_parallel_tasks);
int64_t max_parallelism_;
HloCostAnalysis::ShapeSizeFunction shape_size_function_;
const TargetMachineFeatures& target_machine_features_;
};
}
}
#endif
#include "xla/service/cpu/parallel_task_assignment.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/cpu/backend_config.pb.h"
#include "xla/service/cpu/ir_emission_utils.h"
#include "xla/service/cpu/shape_partition.h"
#include "xla/service/cpu/target_machine_features.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/llvm_ir/dynamic_update_slice_util.h"
#include "xla/util.h"
#include "tsl/platform/cpu_info.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace xla {
namespace cpu {
class SimpleCostModel : public ParallelCostModel {
public:
SimpleCostModel(const int64_t max_parallelism,
const HloCostAnalysis::ShapeSizeFunction& shape_size)
: max_parallelism_(max_parallelism), shape_size_(shape_size) {}
~SimpleCostModel() override {}
int64_t GetParallelTaskCount(HloInstruction* instruction) override {
const int64_t instruction_cost = shape_size_(instruction->shape());
const int64_t min_cost_per_thread = 256LL << 10;
return std::min(
max_parallelism_,
std::max(int64_t{1}, instruction_cost / min_cost_per_thread));
}
private:
const int64_t max_parallelism_;
const HloCostAnalysis::ShapeSizeFunction shape_size_;
};
class DefaultCostModel : public ParallelCostModel {
public:
DefaultCostModel(const int64_t max_parallelism,
const HloCostAnalysis::ShapeSizeFunction& shape_size,
std::unique_ptr<HloCostAnalysis> cost_analysis)
: max_parallelism_(max_parallelism),
shape_size_(shape_size),
cost_analysis_(std::move(cost_analysis)) {}
~DefaultCostModel() override {}
int64_t GetParallelTaskCount(HloInstruction* instruction) override {
int64_t instruction_cost;
int64_t min_cost_per_thread;
int64_t max_parallelism;
const int64_t bytes_accessed =
std::max(int64_t{1}, cost_analysis_->bytes_accessed(*instruction));
const float flops_to_bytes_ratio =
cost_analysis_->flop_count(*instruction) /
static_cast<float>(bytes_accessed);
if (flops_to_bytes_ratio <= 1.0) {
max_parallelism = std::min<int64_t>(
max_parallelism_, std::ceil(std::sqrt(tsl::port::MaxParallelism())));
instruction_cost = shape_size_(instruction->shape());
min_cost_per_thread = 256LL << 10;
} else {
max_parallelism = max_parallelism_;
instruction_cost =
1 * cost_analysis_->flop_count(*instruction) +
2 * cost_analysis_->transcendental_count(*instruction) +
10 * cost_analysis_->bytes_accessed(*instruction);
min_cost_per_thread = 100000;
}
return std::min(
max_parallelism,
std::max(int64_t{1}, instruction_cost / min_cost_per_thread));
}
private:
const int64_t max_parallelism_;
const HloCostAnalysis::ShapeSizeFunction shape_size_;
const std::unique_ptr<HloCostAnalysis> cost_analysis_;
};
ParallelTaskAssignment::ParallelTaskAssignment(
const int64_t max_parallelism,
const HloCostAnalysis::ShapeSizeFunction& shape_size, HloModule* module,
const TargetMachineFeatures* target_machine_features)
: target_machine_features_(*target_machine_features) {
VLOG(1) << "ParallelTaskAssignment max_parallelism: " << max_parallelism;
auto cost_analysis = std::make_unique<HloCostAnalysis>(shape_size);
HloComputation* computation = module->entry_computation();
absl::Status status =
computation->root_instruction()->Accept(cost_analysis.get());
if (status.ok()) {
cost_model_ = std::make_unique<DefaultCostModel>(
max_parallelism, shape_size, std::move(cost_analysis));
} else {
cost_model_ =
std::make_unique<SimpleCostModel>(max_parallelism, shape_size);
}
}
int64_t ParallelTaskAssignment::GetTargetParallelTaskCount(
HloInstruction* instruction) {
auto opcode = instruction->opcode();
if (llvm_ir::MayBeImplementedAsInPlaceDynamicUpdateSlice(instruction) ||
instruction->shape().IsTuple() || opcode == HloOpcode::kRng ||
opcode == HloOpcode::kConstant) {
return 1;
}
if (instruction->IsElementwise() || instruction->IsLoopFusion() ||
opcode == HloOpcode::kBroadcast || opcode == HloOpcode::kConcatenate ||
opcode == HloOpcode::kDynamicSlice ||
opcode == HloOpcode::kDynamicUpdateSlice ||
opcode == HloOpcode::kGather || opcode == HloOpcode::kIota ||
opcode == HloOpcode::kPad || opcode == HloOpcode::kReduce ||
opcode == HloOpcode::kReduceWindow || opcode == HloOpcode::kReshape ||
opcode == HloOpcode::kReverse || opcode == HloOpcode::kSlice ||
opcode == HloOpcode::kTranspose ||
(opcode == HloOpcode::kConvolution &&
!PotentiallyImplementedAsEigenConvolution(*instruction,
target_machine_features_))) {
return cost_model_->GetParallelTaskCount(instruction);
}
return 1;
}
absl::StatusOr<bool> ParallelTaskAssigner::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(2, "ParallelTaskAssigner ENTRY");
XLA_VLOG_LINES(3, module->ToString());
HloToParallelTasks hlo_to_parallel_tasks;
ComputeTargetParallelTasks(module, &hlo_to_parallel_tasks);
bool changed = AssignParallelTasks(module, hlo_to_parallel_tasks);
XLA_VLOG_LINES(2, "ParallelTaskAssigner EXIT");
XLA_VLOG_LINES(3, module->ToString());
return changed;
}
bool ParallelTaskAssigner::AssignParallelTasks(
HloModule* module, const HloToParallelTasks& hlo_to_parallel_tasks) {
return AssignParallelTasksHelper(module, module->entry_computation(),
hlo_to_parallel_tasks);
}
bool ParallelTaskAssigner::AssignParallelTasksHelper(
HloModule* module, HloComputation* computation,
const HloToParallelTasks& hlo_to_parallel_tasks) {
bool changed = false;
std::vector<HloInstruction*> instructions(computation->instructions().begin(),
computation->instructions().end());
for (auto* instruction : instructions) {
if (instruction->opcode() == HloOpcode::kWhile) {
changed |= AssignParallelTasksHelper(module, instruction->while_body(),
hlo_to_parallel_tasks);
continue;
} else if (instruction->opcode() == HloOpcode::kCall) {
changed |= AssignParallelTasksHelper(module, instruction->to_apply(),
hlo_to_parallel_tasks);
continue;
}
auto it = hlo_to_parallel_tasks.find(instruction);
if (it == hlo_to_parallel_tasks.end()) {
continue;
}
const int64_t target_parallel_task_count = (*it).second;
auto dim_partition_counts = ShapePartitionAssigner(instruction->shape())
.Run(target_parallel_task_count);
const int64_t total_partition_count =
ShapePartitionAssigner::GetTotalPartitionCount(dim_partition_counts);
if (total_partition_count <= 1) {
continue;
}
auto* call = module->OutlineExpressionFromComputation(
{instruction}, absl::StrCat("parallel_", instruction->name()),
computation);
auto* new_root = call->to_apply()->root_instruction();
BackendConfig backend_config;
absl::c_copy(dim_partition_counts,
tsl::protobuf::RepeatedFieldBackInserter(
backend_config.mutable_outer_dimension_partitions()));
TF_CHECK_OK(new_root->set_backend_config(backend_config));
VLOG(2) << "Assigned parallel task count: " << total_partition_count
<< " to instruction: " << new_root->name()
<< " parent: " << new_root->parent()->name();
changed = true;
}
return changed;
}
void ParallelTaskAssigner::ComputeTargetParallelTasks(
HloModule* module, HloToParallelTasks* hlo_to_parallel_tasks) {
ParallelTaskAssignment parallel_task_assignment(max_parallelism_,
shape_size_function_, module,
&target_machine_features_);
for (auto* computation : module->MakeNonfusionComputations()) {
for (auto* instruction : computation->instructions()) {
const int64_t target_parallel_task_count =
parallel_task_assignment.GetTargetParallelTaskCount(instruction);
if (target_parallel_task_count > 1) {
hlo_to_parallel_tasks->insert(
{instruction, target_parallel_task_count});
}
}
}
}
}
} | #include "xla/service/cpu/parallel_task_assignment.h"
#include "xla/service/cpu/cpu_executable.h"
#include "xla/service/cpu/target_machine_features_fake.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
class ParallelTaskAssignmentTest : public HloTestBase {
protected:
const HloCostAnalysis::ShapeSizeFunction shape_size_func_ =
cpu::CpuExecutable::ShapeSizeBytes;
const int max_parallelism_ = 10;
cpu::TargetMachineFeaturesWithFakeAlignmentLogic target_machine_features_;
ParallelTaskAssignmentTest()
: HloTestBase(), target_machine_features_([](int64_t shape_size) {
return cpu::TargetMachineFeatures::kEigenExpectedTensorAlignment;
}) {}
absl::StatusOr<bool> RunParallelTaskAssigner(HloModule* module) {
return cpu::ParallelTaskAssigner(max_parallelism_, shape_size_func_,
&target_machine_features_)
.Run(module);
}
};
TEST_F(ParallelTaskAssignmentTest, DotOperationNotParallelized) {
const std::string hlo_string = R"(
HloModule TestTaskParallel_Dot
ENTRY Dot {
dot_lhs = f32[196614,2]{1,0} parameter(0)
dot_rhs = f32[2,1]{1,0} parameter(1)
ROOT dot = f32[196614,1]{1,0} dot(dot_lhs, dot_rhs),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunParallelTaskAssigner(m.get()));
EXPECT_FALSE(changed);
}
TEST_F(ParallelTaskAssignmentTest,
FusedComputationWithDotOperationNotParallelized) {
const std::string hlo_string = R"(
HloModule TestTaskParallel_DotNestedInFusedComp
fused_computation.0 {
parameter.0 = f32[196614,2]{1,0} parameter(0)
parameter.0.1 = f32[2,1]{1,0} parameter(1)
parameter.0.2 = f32[196614,1]{1,0} parameter(2)
dot.0 = f32[196614,1]{1,0} dot(parameter.0, parameter.0.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT add.0 = f32[196614,1]{1,0} add(dot.0, parameter.0.2)
}
ENTRY DotNestedInFusedComp {
parameter = f32[196614,2]{1,0} parameter(0)
parameter.1 = f32[2,1]{1,0} parameter(1)
parameter.2 = f32[196614,1]{1,0} parameter(2)
ROOT fusion = f32[196614,1]{1,0} fusion(parameter, parameter.1,
parameter.2), kind=kOutput, calls=fused_computation.0
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunParallelTaskAssigner(m.get()));
EXPECT_FALSE(changed);
}
TEST_F(ParallelTaskAssignmentTest, RngOperationNotParallelized) {
const std::string hlo_string = R"(
HloModule TestTaskParallel_rng
ENTRY Rng {
src0 = f32[] parameter(0)
src1 = f32[] parameter(1)
ROOT rng0 = f32[1234567,2]{1,0} rng(f32[] src0, f32[] src1),
distribution=rng_uniform
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunParallelTaskAssigner(m.get()));
EXPECT_FALSE(changed);
}
TEST_F(ParallelTaskAssignmentTest, InfeedOutfeedOperationNotParallelized) {
const std::string hlo_string = R"(
HloModule TestTaskParallel_infeed_outfeed
ENTRY InfeedOutfeed {
token0 = token[] after-all()
infeed0 = (u32[12345678,2]{1,0}, token[]) infeed(token0)
infeed0.data = u32[12345678,2]{1,0} get-tuple-element((u32[12345678,2]{1,0}, token[]) infeed0), index=0
ROOT outfeed0 = token[] outfeed(infeed0.data, token0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunParallelTaskAssigner(m.get()));
EXPECT_FALSE(changed);
}
TEST_F(ParallelTaskAssignmentTest, InPlaceDynamicUpdateSliceNotParallelized) {
const std::string hlo_string = R"(
HloModule test
body {
zero = s32[] constant(0)
one = s32[] constant(1)
ten = s32[] constant(10)
loop_carry = (s32[], u32[1,100], u32[10000,100]) parameter(0)
i = s32[] get-tuple-element(loop_carry), index=0
i_plus_ten = s32[] add(i, ten)
update = u32[1,100] get-tuple-element(loop_carry), index=1
data = u32[10000,100] get-tuple-element(loop_carry), index=2
new_data = u32[10000,100] dynamic-update-slice(data, update, i_plus_ten, zero)
new_i = s32[] add(i, one)
ROOT tuple = (s32[], u32[1,100], u32[10000,100]) tuple(new_i, update, new_data)
}
cond {
loop_carry = (s32[], u32[1,100], u32[10000,100]) parameter(0)
two = s32[] constant(2)
i = s32[] get-tuple-element(loop_carry), index=0
ROOT less-than = pred[] compare(i, two), direction=LT
}
ENTRY test {
zero = s32[] constant(0)
initial_i = s32[] parameter(0)
update = u32[1,100] parameter(1)
data = u32[10000,100] parameter(2)
tuple = (s32[], u32[1,100], u32[10000,100]) tuple(initial_i, update, data)
ROOT while = (s32[], u32[1,100], u32[10000,100]) while(tuple), condition=cond, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunParallelTaskAssigner(m.get()));
EXPECT_FALSE(changed);
}
TEST_F(ParallelTaskAssignmentTest, AllReduceNotParallelized) {
constexpr char hlo_string[] = R"(
HloModule TestTaskParallel_allreduce
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY CRS {
input = f32[1234567] parameter(0)
ROOT crs = f32[1234567] all-reduce(input), replica_groups={}, to_apply=add
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunParallelTaskAssigner(m.get()));
EXPECT_FALSE(changed);
}
TEST_F(ParallelTaskAssignmentTest, ConstantNotParallelized) {
constexpr char hlo_string[] = R"(
HloModule TestTaskParallel_constant
ENTRY const {
ROOT constant = f32[1234567] constant({...})
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunParallelTaskAssigner(m.get()));
EXPECT_FALSE(changed);
}
}
} |
2,017 | cpp | tensorflow/tensorflow | xfeed_manager | third_party/xla/xla/service/cpu/xfeed_manager.cc | third_party/xla/xla/service/cpu/xfeed_manager_test.cc | #ifndef XLA_SERVICE_CPU_XFEED_MANAGER_H_
#define XLA_SERVICE_CPU_XFEED_MANAGER_H_
#include <deque>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/shape.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace cpu {
namespace runtime {
class XfeedBuffer {
public:
virtual ~XfeedBuffer() = default;
virtual int32_t length() = 0;
virtual void* data() = 0;
virtual void Done(absl::StatusOr<Shape> shape) = 0;
};
class XfeedQueueManager {
public:
XfeedQueueManager(std::string queue_name) : queue_name_(queue_name) {}
void Reset();
void EnqueueBuffersAtomically(absl::Span<XfeedBuffer* const> buffers);
XfeedBuffer* BlockingDequeueBuffer();
void ReleaseCurrentBuffer(int32_t length, void* data,
absl::StatusOr<Shape> shape);
private:
const std::string queue_name_;
absl::Mutex mu_;
absl::CondVar cv_;
std::deque<XfeedBuffer*> enqueued_buffers_;
XfeedBuffer* current_buffer_ = nullptr;
};
class XfeedManager {
public:
XfeedManager() = default;
void Reset();
XfeedQueueManager* infeed() { return &infeed_; }
XfeedQueueManager* outfeed() { return &outfeed_; }
private:
XfeedQueueManager infeed_ = {"infeed"};
XfeedQueueManager outfeed_ = {"outfeed"};
};
int64_t GetByteSizeRequirement(const Shape& shape, int64_t pointer_size);
}
}
}
#endif
#include "xla/service/cpu/xfeed_manager.h"
#include "xla/shape_util.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace cpu {
namespace runtime {
void XfeedManager::Reset() {
infeed()->Reset();
outfeed()->Reset();
}
void XfeedQueueManager::Reset() {
absl::MutexLock l(&mu_);
CHECK(current_buffer_ == nullptr);
for (auto buffer : enqueued_buffers_) {
buffer->Done(ShapeUtil::MakeNil());
}
enqueued_buffers_.clear();
}
void XfeedQueueManager::EnqueueBuffersAtomically(
absl::Span<XfeedBuffer* const> buffers) {
absl::MutexLock l(&mu_);
bool was_empty = enqueued_buffers_.empty();
for (XfeedBuffer* b : buffers) {
VLOG(3) << "Enqueueing " << queue_name_ << " buffer (of " << buffers.size()
<< " buffers) with length: " << b->length();
enqueued_buffers_.push_back(b);
}
if (was_empty && !buffers.empty()) {
cv_.Signal();
}
}
XfeedBuffer* XfeedQueueManager::BlockingDequeueBuffer() {
absl::MutexLock l(&mu_);
VLOG(3) << "Waiting for an available buffer.";
while (enqueued_buffers_.empty()) {
cv_.Wait(&mu_);
}
VLOG(3) << "A buffer is available!";
CHECK(current_buffer_ == nullptr);
current_buffer_ = enqueued_buffers_.front();
enqueued_buffers_.pop_front();
return current_buffer_;
}
void XfeedQueueManager::ReleaseCurrentBuffer(int32_t length, void* data,
absl::StatusOr<Shape> shape) {
VLOG(3) << "Releasing buffer with shape: "
<< (shape.ok() ? ShapeUtil::HumanString(shape.value())
: "<error status>");
absl::MutexLock l(&mu_);
CHECK(current_buffer_ != nullptr);
CHECK_EQ(length, current_buffer_->length());
CHECK_EQ(data, current_buffer_->data());
current_buffer_->Done(std::move(shape));
current_buffer_ = nullptr;
}
int64_t GetByteSizeRequirement(const Shape& shape, int64_t pointer_size) {
if (shape.IsTuple() || shape.is_static()) {
return ShapeUtil::ByteSizeOf(shape, pointer_size);
}
int64_t metadata_size = sizeof(int32_t) * shape.dimensions_size();
return ShapeUtil::ByteSizeOf(shape, pointer_size) + metadata_size;
}
}
}
} | #include "xla/service/cpu/xfeed_manager.h"
#include <memory>
#include "xla/service/cpu/cpu_runtime.h"
#include "xla/shape_util.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace {
class InfeedManagerTest : public ::testing::Test {};
class TestInfeedBuffer : public cpu::runtime::XfeedBuffer {
public:
explicit TestInfeedBuffer(int32_t length, bool expect_shape_match = true)
: shape_(ShapeUtil::MakeShape(U8, {length})),
done_called_(false),
length_(length),
expect_shape_match_(expect_shape_match) {}
~TestInfeedBuffer() override { EXPECT_TRUE(done_called_); }
int32_t length() override { return length_; }
void* data() override { return nullptr; }
void Done(absl::StatusOr<Shape> shape) override {
CHECK(!done_called_);
done_called_ = true;
TF_ASSERT_OK(shape.status());
EXPECT_EQ(expect_shape_match_, ShapeUtil::Equal(shape_, shape.value()))
<< "want " << ShapeUtil::HumanString(shape_) << " "
<< (expect_shape_match_ ? "==" : "!=") << " "
<< ShapeUtil::HumanString(shape.value());
delete this;
}
const Shape& shape() const { return shape_; }
private:
Shape shape_;
bool done_called_;
int32_t length_;
bool expect_shape_match_;
};
void ProcessNextBuffer(int32_t length) {
auto shape = ShapeUtil::MakeShape(U8, {length});
std::string bytes = shape.SerializeAsString();
void* buffer = __xla_cpu_runtime_AcquireInfeedBufferForDequeue(
nullptr, length, bytes.data(), bytes.size());
__xla_cpu_runtime_ReleaseInfeedBufferAfterDequeue(
nullptr, length, buffer, bytes.data(), bytes.size());
}
void ProcessNextOutfeedBuffer(int32_t length, const Shape& shape) {
std::string bytes = shape.SerializeAsString();
void* buffer = __xla_cpu_runtime_AcquireOutfeedBufferForPopulation(
nullptr, length, bytes.data(), bytes.size());
__xla_cpu_runtime_ReleaseOutfeedBufferAfterPopulation(
nullptr, length, buffer, bytes.data(), bytes.size());
}
TEST_F(InfeedManagerTest, SingleThreadedSequential) {
TestInfeedBuffer* a = new TestInfeedBuffer(64);
TestInfeedBuffer* b = new TestInfeedBuffer(32);
cpu::runtime::XfeedManager* xfeed = cpu::runtime::GetXfeedManager(0);
xfeed->infeed()->EnqueueBuffersAtomically({a});
xfeed->infeed()->EnqueueBuffersAtomically({b});
ProcessNextBuffer(a->length());
ProcessNextBuffer(b->length());
}
TEST_F(InfeedManagerTest, SingleThreadedInterleaved) {
TestInfeedBuffer* a = new TestInfeedBuffer(64);
TestInfeedBuffer* b = new TestInfeedBuffer(32);
cpu::runtime::XfeedManager* xfeed = cpu::runtime::GetXfeedManager(0);
xfeed->infeed()->EnqueueBuffersAtomically({a});
ProcessNextBuffer(a->length());
xfeed->infeed()->EnqueueBuffersAtomically({b});
ProcessNextBuffer(b->length());
}
TEST_F(InfeedManagerTest, MultiThreaded) {
tsl::thread::ThreadPool pool(tsl::Env::Default(), "test", 2);
cpu::runtime::XfeedManager* xfeed = cpu::runtime::GetXfeedManager(0);
const int32_t length = 64;
pool.Schedule([length, &xfeed]() {
int64_t start_micros = tsl::Env::Default()->NowMicros();
while (true) {
int64_t end_micros = tsl::Env::Default()->NowMicros();
if ((end_micros - start_micros) >= 100000) {
break;
}
}
TestInfeedBuffer* a = new TestInfeedBuffer(length);
xfeed->infeed()->EnqueueBuffersAtomically({a});
});
ProcessNextBuffer(length);
}
TEST_F(InfeedManagerTest, OutfeedBasic) {
TestInfeedBuffer* b = new TestInfeedBuffer(32, true);
cpu::runtime::XfeedManager* xfeed = cpu::runtime::GetXfeedManager(0);
xfeed->outfeed()->EnqueueBuffersAtomically({b});
ProcessNextOutfeedBuffer(32, ShapeUtil::MakeShape(U8, {32}));
}
TEST_F(InfeedManagerTest, OutfeedEmpty) {
TestInfeedBuffer* b = new TestInfeedBuffer(0, true);
cpu::runtime::XfeedManager* xfeed = cpu::runtime::GetXfeedManager(0);
xfeed->outfeed()->EnqueueBuffersAtomically({b});
ProcessNextOutfeedBuffer(0, ShapeUtil::MakeShape(U8, {0}));
}
TEST_F(InfeedManagerTest, OutfeedWrongShape) {
TestInfeedBuffer* b = new TestInfeedBuffer(32, false);
cpu::runtime::XfeedManager* xfeed = cpu::runtime::GetXfeedManager(0);
xfeed->outfeed()->EnqueueBuffersAtomically({b});
ProcessNextOutfeedBuffer(32, ShapeUtil::MakeShape(U8, {33}));
}
}
} |
2,018 | cpp | tensorflow/tensorflow | onednn_convolution | third_party/xla/xla/service/cpu/onednn_convolution.cc | third_party/xla/xla/service/cpu/tests/onednn_convolution_test.cc | #ifndef XLA_SERVICE_CPU_ONEDNN_CONVOLUTION_H_
#define XLA_SERVICE_CPU_ONEDNN_CONVOLUTION_H_
#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
namespace xla {
namespace cpu {
extern "C" {
extern void __xla_cpu_runtime_OneDnnConvolution(void* result, void** args);
}
}
}
#endif
#endif
#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include "xla/service/cpu/onednn_convolution.h"
#include <algorithm>
#include <cmath>
#include <cstring>
#include <initializer_list>
#include <utility>
#include <vector>
#define EIGEN_USE_THREADS
#include "dnnl.hpp"
#include "absl/base/dynamic_annotations.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "xla/executable_run_options.h"
#include "xla/service/cpu/backend_config.pb.h"
#include "xla/service/cpu/onednn_memory_util.h"
#include "xla/service/cpu/runtime_lightweight_check.h"
#include "xla/tsl/util/onednn_threadpool.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace cpu {
namespace {
using dnnl::algorithm;
using dnnl::convolution_forward;
using dnnl::memory;
using dnnl::prop_kind;
using dnnl::stream;
}
dnnl::memory ReorderMemory(const dnnl::engine& engine,
const dnnl::memory::desc& dest_md,
dnnl::memory& src_mem,
const dnnl::stream& onednn_stream) {
auto dest_mem = memory(dest_md, engine);
dnnl::reorder(src_mem, dest_mem).execute(onednn_stream, src_mem, dest_mem);
return dest_mem;
}
dnnl::memory::format_tag GetFormatTag(const int dims) {
return (dims == 3) ? dnnl::memory::format_tag::nwc
: (dims == 4) ? dnnl::memory::format_tag::nhwc
: (dims == 5) ? dnnl::memory::format_tag::ndhwc
: dnnl::memory::format_tag::any;
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_OneDnnConvolution(
void* result, void** args) {
int arg_indx = 0;
const int64_t num_args = *(static_cast<int64_t*>(args[arg_indx++]));
const xla::ExecutableRunOptions* run_options =
static_cast<const xla::ExecutableRunOptions*>(args[arg_indx++]);
XLA_LIGHTWEIGHT_CHECK(run_options != nullptr);
XLA_LIGHTWEIGHT_CHECK(run_options->intra_op_thread_pool() != nullptr);
tsl::OneDnnThreadPool thread_pool(
run_options->intra_op_thread_pool()->getPool(), false);
dnnl::engine cpu_engine(dnnl::engine::kind::cpu, 0);
#ifndef ENABLE_ONEDNN_OPENMP
auto onednn_stream =
stream(dnnl::threadpool_interop::make_stream(cpu_engine, &thread_pool));
#else
auto onednn_stream = stream(cpu_engine);
#endif
std::string config_str(static_cast<const char*>(args[arg_indx++]));
OneDnnConvolutionConfig conv_config;
conv_config.ParseFromString(config_str);
std::vector<int64_t> inp_perm_axes(conv_config.dims());
std::vector<int64_t> ker_perm_axes(conv_config.dims());
std::vector<int64_t> out_perm_axes(conv_config.dims());
int index_i = 0;
int index_o = 0;
int index_k = 0;
inp_perm_axes[conv_config.input().data().batch_dim()] = index_i++;
out_perm_axes[conv_config.output().data().batch_dim()] = index_o++;
ker_perm_axes[conv_config.kernel().filter().output_feature_dim()] = index_k++;
inp_perm_axes[conv_config.input().data().feature_dim()] = index_i++;
out_perm_axes[conv_config.output().data().feature_dim()] = index_o++;
ker_perm_axes[conv_config.kernel().filter().input_feature_dim()] = index_k++;
std::vector<int64_t> inp_dim_axes(
conv_config.input().data().spatial_dims().begin(),
conv_config.input().data().spatial_dims().end());
std::vector<int64_t> ker_dim_axes(
conv_config.kernel().filter().spatial_dims().begin(),
conv_config.kernel().filter().spatial_dims().end());
std::vector<int64_t> out_dim_axes(
conv_config.output().data().spatial_dims().begin(),
conv_config.output().data().spatial_dims().end());
std::for_each(inp_dim_axes.begin(), inp_dim_axes.end(),
[&inp_perm_axes, &index_i](int64_t& n) {
n -= 1;
inp_perm_axes[n] = index_i++;
});
std::for_each(ker_dim_axes.begin(), ker_dim_axes.end(),
[&ker_perm_axes, &index_k](int64_t& n) {
n -= 1;
ker_perm_axes[n] = index_k++;
});
std::for_each(out_dim_axes.begin(), out_dim_axes.end(),
[&out_perm_axes, &index_o](int64_t& n) {
n -= 1;
out_perm_axes[n] = index_o++;
});
memory::dims strides(conv_config.window().strides().begin(),
conv_config.window().strides().end());
memory::dims pad_left(conv_config.window().pad_left().begin(),
conv_config.window().pad_left().end());
memory::dims pad_right(conv_config.window().pad_right().begin(),
conv_config.window().pad_right().end());
memory::dims rhs_dilations(conv_config.window().window_dilations().begin(),
conv_config.window().window_dilations().end());
std::for_each(strides.begin(), strides.end(), [](int64_t& n) { n -= 1; });
std::for_each(pad_left.begin(), pad_left.end(), [](int64_t& n) { n -= 1; });
std::for_each(pad_right.begin(), pad_right.end(), [](int64_t& n) { n -= 1; });
std::for_each(rhs_dilations.begin(), rhs_dilations.end(),
[](int64_t& n) { n -= 2; });
auto groups = conv_config.feature_groups();
MemrefInfo inp_minfo(args[arg_indx++]);
MemrefInfo ker_minfo(args[arg_indx++]);
MemrefInfo res_minfo(result);
auto inp_md = inp_minfo.GetOneDnnMemDesc();
auto ker_md = ker_minfo.GetOneDnnMemDesc();
auto res_md = res_minfo.GetOneDnnMemDesc();
std::vector<int> inp_axes(inp_perm_axes.begin(), inp_perm_axes.end());
std::vector<int> ker_axes(ker_perm_axes.begin(), ker_perm_axes.end());
std::vector<int> out_axes(out_perm_axes.begin(), out_perm_axes.end());
auto new_inp_md = inp_md.permute_axes(inp_axes);
auto new_ker_md = ker_md.permute_axes(ker_axes);
auto new_res_md = res_md.permute_axes(out_axes);
if (groups > 1) {
auto corr_dims = new_ker_md.get_dims();
corr_dims.insert(corr_dims.begin(), 1, groups);
corr_dims[1] = corr_dims[1] / groups;
new_ker_md = new_ker_md.reshape(corr_dims);
}
auto any_ker_md =
memory::desc(new_ker_md.get_dims(), new_ker_md.get_data_type(),
dnnl::memory::format_tag::any);
auto any_inp_md =
memory::desc(new_inp_md.get_dims(), new_inp_md.get_data_type(),
GetFormatTag(new_inp_md.get_ndims()));
auto any_res_md =
memory::desc(new_res_md.get_dims(), new_res_md.get_data_type(),
GetFormatTag(new_res_md.get_ndims()));
XLA_LIGHTWEIGHT_CHECK(num_args == arg_indx);
dnnl::primitive_attr attrs;
auto inp_mem = memory(new_inp_md, cpu_engine, inp_minfo.Data());
auto ker_mem = memory(new_ker_md, cpu_engine, ker_minfo.Data());
auto res_mem = memory(new_res_md, cpu_engine, res_minfo.Data());
auto conv_pd = convolution_forward::primitive_desc(
cpu_engine, prop_kind::forward_inference, algorithm::convolution_direct,
any_inp_md, any_ker_md, any_res_md, strides, rhs_dilations, pad_left,
pad_right, attrs);
auto new_inp_mem = (conv_pd.src_desc() == inp_mem.get_desc())
? inp_mem
: ReorderMemory(cpu_engine, conv_pd.src_desc(),
inp_mem, onednn_stream);
auto new_ker_mem = (conv_pd.weights_desc() == ker_mem.get_desc())
? ker_mem
: ReorderMemory(cpu_engine, conv_pd.weights_desc(),
ker_mem, onednn_stream);
auto new_res_mem = (conv_pd.dst_desc() == res_mem.get_desc())
? res_mem
: memory(conv_pd.dst_desc(), cpu_engine);
auto conv_prim = convolution_forward(conv_pd);
std::unordered_map<int, memory> conv_args{{DNNL_ARG_SRC, new_inp_mem},
{DNNL_ARG_WEIGHTS, new_ker_mem},
{DNNL_ARG_DST, new_res_mem}};
conv_prim.execute(onednn_stream, conv_args);
if (conv_pd.dst_desc() == res_mem.get_desc()) {
res_mem = new_res_mem;
} else {
dnnl::reorder(new_res_mem, res_mem)
.execute(onednn_stream, new_res_mem, res_mem);
}
}
}
}
#endif | #if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include <utility>
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/service/cpu/onednn_matmul_rewriter.h"
#include "xla/service/cpu/onednn_util.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_macros.h"
#include "tsl/platform/cpu_info.h"
namespace xla {
namespace cpu {
class ConvolutionTest : public HloTestBase {
protected:
const char* conv_rewrite_str_ = R"(
; CHECK: custom_call_target="__onednn$convolution",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_conv_config":{
; CHECK-DAG: }
; CHECK: }
)";
};
TEST_F(ConvolutionTest, Simple2DTestF32) {
const char* convolution_module_str = R"(
HloModule convolution.test.f32
ENTRY convolution.test.f32 {
arg.0 = f32[1,22,22,1] parameter(0), parameter_replication={false}
reshape.0 = f32[1,22,22,1] reshape(arg.0)
arg.1 = f32[8,8,1,1] parameter(1), parameter_replication={false}
reshape.1 = f32[8,8,1,1] reshape(arg.1)
convolution.0 = f32[1,11,11,1] convolution(reshape.0, reshape.1), window={size=8x8 stride=2x2 pad=3_3x3_3}, dim_labels=b01f_01io->b01f
reshape.2 = f32[1,11,11,1] reshape(convolution.0)
tuple.0 = (f32[1,11,11,1]) tuple(reshape.2)
ROOT get-tuple-element.0 = f32[1,11,11,1] get-tuple-element(tuple.0), index=0
})";
EXPECT_TRUE(RunAndCompare(convolution_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(convolution_module_str, conv_rewrite_str_);
}
TEST_F(ConvolutionTest, Simple3DTestBF16) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* convolution_module_str = R"(
HloModule convolution.test.bf16
ENTRY convolution.test.bf16 {
p0 = bf16[8,4,5,5,1] parameter(0)
p1 = bf16[3,3,3,1,32] parameter(1)
ROOT conv = bf16[8,4,5,5,32] convolution(p0, p1), window={size=3x3x3 pad=1_1x1_1x1_1}, dim_labels=b012f_012io->b012f
})";
EXPECT_TRUE(RunAndCompare(convolution_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(convolution_module_str, conv_rewrite_str_);
}
}
}
#endif |
2,019 | cpp | tensorflow/tensorflow | cpu_layout_assignment | third_party/xla/xla/service/cpu/cpu_layout_assignment.cc | third_party/xla/xla/service/cpu/cpu_layout_assignment_test.cc | #ifndef XLA_SERVICE_CPU_CPU_LAYOUT_ASSIGNMENT_H_
#define XLA_SERVICE_CPU_CPU_LAYOUT_ASSIGNMENT_H_
#include "xla/service/computation_layout.h"
#include "xla/service/cpu/target_machine_features.h"
#include "xla/service/layout_assignment.h"
#include "tsl/platform/status.h"
namespace xla {
namespace cpu {
class CpuLayoutAssignment : public LayoutAssignment {
public:
explicit CpuLayoutAssignment(
ComputationLayout* entry_computation_layout,
const TargetMachineFeatures* target_machine_features,
ChannelLayoutConstraints* channel_constraints = nullptr)
: LayoutAssignment(entry_computation_layout, channel_constraints),
target_machine_features_(*target_machine_features) {}
~CpuLayoutAssignment() override {}
protected:
absl::Status AddBackendConstraints(LayoutConstraints* constraints) override;
const TargetMachineFeatures& target_machine_features_;
};
}
}
#endif
#include "xla/service/cpu/cpu_layout_assignment.h"
#include <cstdint>
#include <numeric>
#include <optional>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/map_util.h"
#include "xla/service/cpu/dot_op_emitter.h"
#include "xla/service/cpu/ir_emission_utils.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace cpu {
namespace {
using std::nullopt;
using std::optional;
using ShouldMakeOperandColMajorCache =
absl::flat_hash_map<const HloInstruction*, bool>;
}
static bool ShouldMakeAllUsersColMajor(const HloInstruction* instruction) {
for (auto* user : instruction->users()) {
optional<int64_t> operand_idx =
ProfitableToMakeDotOperandColumnMajor(*user);
if (!operand_idx || user->operand(*operand_idx) != instruction ||
absl::c_count(user->operands(), instruction) != 1) {
return false;
}
}
return true;
}
static optional<int64_t> ShouldMakeOperandColumnMajor(
ShouldMakeOperandColMajorCache* cache, const HloInstruction& instruction) {
optional<int64_t> operand_idx =
ProfitableToMakeDotOperandColumnMajor(instruction);
if (!operand_idx) {
return nullopt;
}
const HloInstruction* operand = instruction.operand(*operand_idx);
if (operand->opcode() != HloOpcode::kConstant) {
return nullopt;
}
auto it = cache->find(operand);
if (it == cache->end()) {
auto insert_result =
cache->insert({operand, ShouldMakeAllUsersColMajor(operand)});
CHECK(insert_result.second);
it = insert_result.first;
}
return it->second ? operand_idx : nullopt;
}
static Shape RowMajorShape(Shape shape) {
ShapeUtil::ForEachMutableSubshape(
&shape, [](Shape* subshape, const ShapeIndex& index) {
if (!subshape->IsArray()) {
return;
}
std::vector<int64_t> dimension_order(subshape->dimensions_size());
std::iota(dimension_order.rbegin(), dimension_order.rend(), 0);
*subshape->mutable_layout() = LayoutUtil::MakeLayout(dimension_order);
});
return shape;
}
static Shape ColMajorShape(const Shape& old_shape) {
Shape new_shape(old_shape);
std::vector<int64_t> dimension_order(new_shape.dimensions_size());
std::iota(dimension_order.begin(), dimension_order.end(), 0);
*new_shape.mutable_layout() = LayoutUtil::MakeLayout(dimension_order);
return new_shape;
}
static bool OperandsAndResultMustHaveRowMajorLayout(
const HloInstruction& instr,
const TargetMachineFeatures& target_machine_features) {
if (instr.opcode() == HloOpcode::kConvolution) {
return PotentiallyImplementedAsEigenConvolution(instr,
target_machine_features);
} else if (instr.opcode() == HloOpcode::kDot) {
return DotOperandsAndResultMustHaveRowMajorLayout(instr,
target_machine_features);
} else if (instr.opcode() == HloOpcode::kCustomCall) {
return instr.custom_call_target() == "TopK";
}
return false;
}
absl::Status CpuLayoutAssignment::AddBackendConstraints(
LayoutConstraints* constraints) {
ShouldMakeOperandColMajorCache cache;
const HloComputation* computation = constraints->computation();
for (auto* instruction : computation->instructions()) {
if (OperandsAndResultMustHaveRowMajorLayout(*instruction,
target_machine_features_)) {
TF_RETURN_IF_ERROR(SetInstructionLayout(
RowMajorShape(instruction->shape()), instruction));
for (int i = 0; i < instruction->operand_count(); i++) {
TF_RETURN_IF_ERROR(SetOperandLayout(
RowMajorShape(instruction->operand(i)->shape()), instruction, i));
}
} else if (optional<int64_t> op_idx =
ShouldMakeOperandColumnMajor(&cache, *instruction)) {
const HloInstruction* op = instruction->operand(*op_idx);
TF_RETURN_IF_ERROR(
SetOperandLayout(ColMajorShape(op->shape()), instruction, *op_idx));
} else if (instruction->opcode() == HloOpcode::kReduceScatter) {
auto ars = Cast<HloReduceScatterInstruction>(instruction);
TF_RETURN_IF_ERROR(SetInstructionLayout(
ShapeUtil::MoveDimToMajor(ars->shape(), ars->scatter_dimension()),
ars));
} else if (instruction->opcode() == HloOpcode::kAllGather) {
auto ag = Cast<HloAllGatherInstruction>(instruction);
TF_RETURN_IF_ERROR(SetInstructionLayout(
ShapeUtil::MoveDimToMajor(ag->shape(), ag->all_gather_dimension()),
ag));
} else {
for (int64_t operand_no = 0; operand_no < instruction->operand_count();
++operand_no) {
if (constraints->OperandLayout(instruction, operand_no) != nullptr) {
continue;
}
if (AnyOperandBufferForwarded(instruction, operand_no)) {
continue;
}
if (!instruction->operand(operand_no)->shape().IsArray()) {
continue;
}
Shape operand_shape(
RowMajorShape(instruction->operand(operand_no)->shape()));
TF_RETURN_IF_ERROR(
SetOperandLayout(operand_shape, instruction, operand_no));
}
if (computation->parent()->entry_computation() == computation &&
computation->root_instruction() == instruction) {
continue;
}
if (!instruction->shape().IsArray()) {
continue;
}
}
}
return absl::OkStatus();
}
}
} | #include "xla/service/cpu/cpu_layout_assignment.h"
#include <initializer_list>
#include <memory>
#include <utility>
#include <vector>
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/computation_layout.h"
#include "xla/service/cpu/target_machine_features_fake.h"
#include "xla/shape_layout.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_utils.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace {
class CpuLayoutAssignmentTest : public HloTestBase {
protected:
void AssignLayouts(HloModule* module,
ComputationLayout* entry_computation_layout) {
cpu::TargetMachineFeaturesWithFakeAlignmentLogic target_machine_features(
[](int64_t shape_size) {
return cpu::TargetMachineFeatures::kEigenExpectedTensorAlignment;
});
cpu::CpuLayoutAssignment layout_assignment(entry_computation_layout,
&target_machine_features);
EXPECT_IS_OK(layout_assignment.Run(module).status());
}
};
TEST_F(CpuLayoutAssignmentTest, DotWithConstantRhsTensor) {
auto builder = HloComputation::Builder(TestName());
Shape lhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {12}, {0});
Shape rhs_shape = ShapeUtil::MakeShape(F32, {12, 24});
Shape result_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {24}, {0});
auto dot_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_shape, "param0"));
auto dot_rhs = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(rhs_shape)));
auto result = builder.AddInstruction(
CreateCanonicalDot(result_shape, dot_lhs, dot_rhs));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(lhs_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(result_shape));
AssignLayouts(module.get(), &computation_layout);
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({0}),
dot_lhs->shape().layout()));
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({0, 1}),
dot_rhs->shape().layout()));
EXPECT_TRUE(
LayoutUtil::Equal(LayoutUtil::MakeLayout({0}), result->shape().layout()));
for (const auto& instruction : computation->instructions()) {
EXPECT_NE(instruction->opcode(), HloOpcode::kCopy);
}
}
TEST_F(CpuLayoutAssignmentTest, MultipleDotsWithSameConstantRhsTensor0) {
auto builder = HloComputation::Builder(TestName());
Shape lhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {12}, {0});
Shape rhs_shape = ShapeUtil::MakeShape(F32, {12, 24});
Shape result_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {24}, {0});
auto dot_a_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_shape, "param0"));
auto dot_b_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(1, lhs_shape, "param1"));
auto dot_rhs = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(rhs_shape)));
auto dot_a_result = builder.AddInstruction(
CreateCanonicalDot(result_shape, dot_a_lhs, dot_rhs));
auto dot_b_result = builder.AddInstruction(
CreateCanonicalDot(result_shape, dot_b_lhs, dot_rhs));
builder.AddInstruction(HloInstruction::CreateBinary(
result_shape, HloOpcode::kAdd, dot_a_result, dot_b_result));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(lhs_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(result_shape));
AssignLayouts(module.get(), &computation_layout);
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({0, 1}),
dot_rhs->shape().layout()));
for (HloInstruction* instruction :
{dot_a_lhs, dot_b_lhs, dot_a_result, dot_b_result}) {
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({0}),
instruction->shape().layout()));
}
for (const auto& instruction : computation->instructions()) {
EXPECT_NE(instruction->opcode(), HloOpcode::kCopy);
}
}
TEST_F(CpuLayoutAssignmentTest, MultipleDotsWithSameConstantRhsTensor1) {
auto builder = HloComputation::Builder(TestName());
Shape lhs_a_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 12}, {0, 1});
Shape lhs_b_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 12}, {0, 1});
Shape rhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {12, 24}, {0, 1});
Shape result_a_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 24}, {0, 1});
Shape result_b_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 24}, {0, 1});
auto dot_a_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_a_shape, "param0"));
auto dot_b_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(1, lhs_b_shape, "param1"));
auto dot_rhs = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(rhs_shape)));
auto dot_a_result = builder.AddInstruction(
CreateCanonicalDot(result_a_shape, dot_a_lhs, dot_rhs));
auto dot_b_result = builder.AddInstruction(
CreateCanonicalDot(result_b_shape, dot_b_lhs, dot_rhs));
auto tuple_result = builder.AddInstruction(
HloInstruction::CreateTuple({dot_a_result, dot_b_result}));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(lhs_a_shape));
*computation_layout.mutable_parameter_layout(1) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(lhs_b_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(tuple_result->shape()));
AssignLayouts(module.get(), &computation_layout);
for (HloInstruction* instruction :
{dot_rhs, dot_a_lhs, dot_b_lhs, dot_a_result, dot_b_result}) {
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({1, 0}),
instruction->shape().layout()));
}
for (const auto& instruction : computation->instructions()) {
EXPECT_NE(instruction->opcode(), HloOpcode::kCopy);
}
}
TEST_F(CpuLayoutAssignmentTest, DotWithConstantLhsTensor) {
auto builder = HloComputation::Builder(TestName());
Shape lhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 12}, {0, 1});
Shape rhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {12, 24}, {0, 1});
Shape result_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 24}, {0, 1});
auto dot_lhs = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(lhs_shape)));
auto dot_rhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, rhs_shape, "param0"));
auto dot_result = builder.AddInstruction(
CreateCanonicalDot(result_shape, dot_lhs, dot_rhs));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(rhs_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(result_shape));
AssignLayouts(module.get(), &computation_layout);
for (HloInstruction* instruction : {dot_lhs, dot_rhs, dot_result}) {
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({1, 0}),
instruction->shape().layout()));
}
for (const auto& instruction : computation->instructions()) {
EXPECT_NE(instruction->opcode(), HloOpcode::kCopy);
}
}
TEST_F(CpuLayoutAssignmentTest, DotWithConstantRhsTensorThroughGTE) {
auto builder = HloComputation::Builder(TestName());
Shape lhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 12}, {0, 1});
Shape rhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {12, 24}, {0, 1});
Shape other_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {100, 24}, {0, 1});
auto constant_shape = ShapeUtil::MakeTupleShape({other_shape, rhs_shape});
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(constant_shape)));
Shape result_shape = ShapeUtil::MakeShape(F32, {1, 24});
auto dot_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_shape, "param0"));
auto dot_rhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(rhs_shape, constant, 1));
auto dot_result = builder.AddInstruction(
CreateCanonicalDot(result_shape, dot_lhs, dot_rhs));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(lhs_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(result_shape));
AssignLayouts(module.get(), &computation_layout);
for (HloInstruction* instruction : {dot_lhs, dot_rhs, dot_result}) {
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({1, 0}),
instruction->shape().layout()));
}
for (const auto& instruction : computation->instructions()) {
EXPECT_NE(instruction->opcode(), HloOpcode::kCopy);
}
}
struct DotOutputFusionLayoutAssignmentResult {
bool layout_assignment_changed_something;
const HloInstruction* dot_lhs_fusion_param;
const HloInstruction* dot_rhs_fusion_param;
const HloInstruction* addend_fusion_param;
};
static absl::StatusOr<DotOutputFusionLayoutAssignmentResult> RunDotOutputFusion(
HloModule* module, const std::string& test_name, int m, int k, int n,
const int64_t dot_operand_idx_in_add) {
DotOutputFusionLayoutAssignmentResult result;
CHECK(dot_operand_idx_in_add == 0 || dot_operand_idx_in_add == 1);
auto builder = HloComputation::Builder(test_name);
Shape dot_lhs_shape = ShapeUtil::MakeShape(F32, {m, k});
Shape dot_rhs_shape = ShapeUtil::MakeShape(F32, {k, n});
Shape dot_shape = ShapeUtil::MakeShape(F32, {m, n});
if (m == 1) {
dot_lhs_shape = ShapeUtil::MakeShape(F32, {k});
dot_shape = ShapeUtil::MakeShape(F32, {n});
} else if (n == 1) {
dot_rhs_shape = ShapeUtil::MakeShape(F32, {k});
dot_shape = ShapeUtil::MakeShape(F32, {m});
}
HloInstruction* dot_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, dot_lhs_shape, "param0"));
HloInstruction* addend = builder.AddInstruction(
HloInstruction::CreateParameter(1, dot_shape, "param1"));
HloInstruction* dot_rhs = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(dot_rhs_shape)));
HloInstruction* dot_result =
builder.AddInstruction(CreateCanonicalDot(dot_shape, dot_lhs, dot_rhs));
HloInstruction* add_result;
if (dot_operand_idx_in_add == 0) {
add_result = builder.AddInstruction(HloInstruction::CreateBinary(
dot_shape, HloOpcode::kAdd, dot_result, addend));
} else {
add_result = builder.AddInstruction(HloInstruction::CreateBinary(
dot_shape, HloOpcode::kAdd, addend, dot_result));
}
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloInstruction* fusion_instruction =
module->entry_computation()->AddInstruction(HloInstruction::CreateFusion(
dot_shape, HloInstruction::FusionKind::kOutput, add_result));
TF_RETURN_IF_ERROR(
computation->ReplaceInstruction(add_result, fusion_instruction));
HloInstruction* fused_add =
fusion_instruction->fused_instructions_computation()->root_instruction();
HloInstruction* fused_dot = fusion_instruction->FuseInstruction(dot_result);
TF_RETURN_IF_ERROR(
computation->RemoveInstructionAndUnusedOperands(dot_result));
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(dot_lhs_shape));
*computation_layout.mutable_parameter_layout(1) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(dot_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(dot_shape));
result.dot_lhs_fusion_param =
fusion_instruction->operand(fused_dot->operand(0)->parameter_number());
result.dot_rhs_fusion_param =
fusion_instruction->operand(fused_dot->operand(1)->parameter_number());
result.addend_fusion_param = fusion_instruction->operand(
fused_add->operand(1 - dot_operand_idx_in_add)->parameter_number());
cpu::TargetMachineFeaturesWithFakeAlignmentLogic target_machine_features(
[](int64_t shape_size) {
return cpu::TargetMachineFeatures::kEigenExpectedTensorAlignment;
});
cpu::CpuLayoutAssignment layout_assignment(&computation_layout,
&target_machine_features);
TF_ASSIGN_OR_RETURN(result.layout_assignment_changed_something,
layout_assignment.Run(module));
return result;
}
static void AssertCorrectLayoutForDotOutputFusion(
const HloComputation* computation,
const DotOutputFusionLayoutAssignmentResult& layout_assignment_result,
bool expect_col_major_dot_rhs) {
Layout expected_dot_rhs_layout = expect_col_major_dot_rhs
? LayoutUtil::MakeLayout({0, 1})
: LayoutUtil::MakeLayout({1, 0});
if (layout_assignment_result.dot_rhs_fusion_param->shape().rank() == 1) {
expected_dot_rhs_layout = LayoutUtil::MakeLayout({0});
}
EXPECT_TRUE(LayoutUtil::Equal(
expected_dot_rhs_layout,
layout_assignment_result.dot_rhs_fusion_param->shape().layout()));
EXPECT_TRUE(LayoutUtil::Equal(
LayoutUtil::MakeDescendingLayout(
layout_assignment_result.dot_lhs_fusion_param->shape().rank()),
layout_assignment_result.dot_lhs_fusion_param->shape().layout()));
EXPECT_TRUE(LayoutUtil::Equal(
LayoutUtil::MakeDescendingLayout(
layout_assignment_result.addend_fusion_param->shape().rank()),
layout_assignment_result.addend_fusion_param->shape().layout()));
EXPECT_THAT(computation->instructions(), Each(Not(op::Copy())));
}
TEST_F(CpuLayoutAssignmentTest, DotOutputFusion_1x50x19_dot_idx_0) {
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
TF_ASSERT_OK_AND_ASSIGN(
DotOutputFusionLayoutAssignmentResult layout_assignment_result,
RunDotOutputFusion(module.get(), TestName(), 1, 50, 19,
0));
ASSERT_TRUE(layout_assignment_result.layout_assignment_changed_something);
AssertCorrectLayoutForDotOutputFusion(module->entry_computation(),
layout_assignment_result,
true);
}
TEST_F(CpuLayoutAssignmentTest, DotOutputFusion_1x50x19_dot_idx_1) {
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
TF_ASSERT_OK_AND_ASSIGN(
DotOutputFusionLayoutAssignmentResult layout_assignment_result,
RunDotOutputFusion(module.get(), TestName(), 1, 50, 19,
1));
ASSERT_TRUE(layout_assignment_result.layout_assignment_changed_something);
AssertCorrectLayoutForDotOutputFusion(module->entry_computation(),
layout_assignment_result,
true);
}
TEST_F(CpuLayoutAssignmentTest, DotOutputFusion_19x50x1_dot_idx_0) {
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
TF_ASSERT_OK_AND_ASSIGN(
DotOutputFusionLayoutAssignmentResult layout_assignment_result,
RunDotOutputFusion(module.get(), TestName(), 19, 50, 1,
0));
ASSERT_TRUE(layout_assignment_result.layout_assignment_changed_something);
AssertCorrectLayoutForDotOutputFusion(module->entry_computation(),
layout_assignment_result,
false);
}
TEST_F(CpuLayoutAssignmentTest, DotOutputFusion_19x50x1_dot_idx_1) {
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
TF_ASSERT_OK_AND_ASSIGN(
DotOutputFusionLayoutAssignmentResult layout_assignment_result,
RunDotOutputFusion(module.get(), TestName(), 19, 50, 1,
1));
ASSERT_TRUE(layout_assignment_result.layout_assignment_changed_something);
AssertCorrectLayoutForDotOutputFusion(module->entry_computation(),
layout_assignment_result,
false);
}
TEST_F(CpuLayoutAssignmentTest, DotOutputFusion_19x50x19_dot_idx_0) {
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
TF_ASSERT_OK_AND_ASSIGN(
DotOutputFusionLayoutAssignmentResult layout_assignment_result,
RunDotOutputFusion(module.get(), TestName(), 19, 50, 19,
0));
ASSERT_TRUE(layout_assignment_result.layout_assignment_changed_something);
AssertCorrectLayoutForDotOutputFusion(module->entry_computation(),
layout_assignment_result,
false);
}
TEST_F(CpuLayoutAssignmentTest, DotOutputFusion_19x50x19_dot_idx_1) {
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
TF_ASSERT_OK_AND_ASSIGN(
DotOutputFusionLayoutAssignmentResult layout_assignment_result,
RunDotOutputFusion(module.get(), TestName(), 19, 50, 19,
1));
ASSERT_TRUE(layout_assignment_result.layout_assignment_changed_something);
AssertCorrectLayoutForDotOutputFusion(module->entry_computation(),
layout_assignment_result,
false);
}
TEST_F(CpuLayoutAssignmentTest, BatchDotLayoutMustBeRowMajor) {
const char* hlo_string = R"(
HloModule BatchDotLayoutMustBeRowMajor
ENTRY BatchDotLayoutMustBeRowMajor {
p0 = f32[10,1,10] parameter(0)
p1 = f32[10,10,1] parameter(1)
ROOT dot = f32[10,1,1] dot(p0, p1), lhs_batch_dims={0},
lhs_contracting_dims={2},
rhs_batch_dims={0},
rhs_contracting_dims={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloComputation* computation = module->entry_computation();
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) = ShapeLayout(
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 1, 10}, {2, 1, 0}));
*computation_layout.mutable_parameter_layout(1) = ShapeLayout(
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 10, 1}, {2, 1, 0}));
*computation_layout.mutable_result_layout() = ShapeLayout(
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 1, 1}, {1, 2, 0}));
AssignLayouts(module.get(), &computation_layout);
Shape expected_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 1, 1}, {2, 1, 0});
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Copy(op::ShapeWithLayout(expected_shape)));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Copy(op::Dot(
op::ShapeWithLayout(computation_layout.parameter_layout(0).shape()),
op::ShapeWithLayout(
computation_layout.parameter_layout(1).shape()))));
}
}
} |
2,020 | cpp | tensorflow/tensorflow | conv_canonicalization | third_party/xla/xla/service/cpu/conv_canonicalization.cc | third_party/xla/xla/service/cpu/conv_canonicalization_test.cc | #ifndef XLA_SERVICE_CPU_CONV_CANONICALIZATION_H_
#define XLA_SERVICE_CPU_CONV_CANONICALIZATION_H_
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/cpu/target_machine_features.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace cpu {
class ConvCanonicalization : public HloModulePass {
public:
explicit ConvCanonicalization(
const TargetMachineFeatures* target_machine_features)
: target_machine_features_(*target_machine_features) {}
~ConvCanonicalization() override {}
absl::string_view name() const override {
return "convolution-canonicalization";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
const TargetMachineFeatures& target_machine_features_;
};
}
}
#endif
#include "xla/service/cpu/conv_canonicalization.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/permutation_util.h"
#include "xla/service/cpu/cpu_runtime.h"
#include "xla/service/cpu/ir_emission_utils.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace cpu {
absl::StatusOr<bool> ConvCanonicalization::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloInstruction* hlo :
module->entry_computation()->MakeInstructionPostOrder()) {
if (hlo->opcode() == HloOpcode::kConvolution &&
!PotentiallyImplementedAsEigenConvolution(*hlo,
target_machine_features_)) {
const ConvolutionDimensionNumbers& dnums =
hlo->convolution_dimension_numbers();
auto input_batch_dim = dnums.input_batch_dimension();
auto input_feature_dim = dnums.input_feature_dimension();
auto kernel_input_feature_dim = dnums.kernel_input_feature_dimension();
auto kernel_output_feature_dim = dnums.kernel_output_feature_dimension();
const int64_t num_spatial_dims = dnums.output_spatial_dimensions_size();
const int64_t num_dims = num_spatial_dims + 2;
HloInstruction* input = hlo->mutable_operand(0);
std::vector<int64_t> new_input_dim_order(num_dims);
std::vector<int64_t> new_input_dims(num_dims);
new_input_dim_order[0] = input_batch_dim;
new_input_dims[0] = input->shape().dimensions(input_batch_dim);
for (int64_t i = 0; i < num_spatial_dims; ++i) {
new_input_dim_order[i + 1] = dnums.input_spatial_dimensions(i);
new_input_dims[i + 1] =
input->shape().dimensions(dnums.input_spatial_dimensions(i));
}
new_input_dim_order[num_dims - 1] = input_feature_dim;
new_input_dims[num_dims - 1] =
input->shape().dimensions(input_feature_dim);
Shape new_input_shape =
ShapeUtil::MakeShape(input->shape().element_type(), new_input_dims);
HloInstruction* new_input = module->entry_computation()->AddInstruction(
HloInstruction::CreateTranspose(new_input_shape, input,
new_input_dim_order));
HloInstruction* kernel = hlo->mutable_operand(1);
std::vector<int64_t> new_kernel_dim_order(num_dims);
std::vector<int64_t> new_kernel_dims(num_dims);
for (int64_t i = 0; i < num_spatial_dims; ++i) {
new_kernel_dim_order[i] = dnums.kernel_spatial_dimensions(i);
new_kernel_dims[i] =
kernel->shape().dimensions(dnums.kernel_spatial_dimensions(i));
}
new_kernel_dim_order[num_dims - 2] = kernel_input_feature_dim;
new_kernel_dims[num_dims - 2] =
kernel->shape().dimensions(kernel_input_feature_dim);
new_kernel_dim_order[num_dims - 1] = kernel_output_feature_dim;
new_kernel_dims[num_dims - 1] =
kernel->shape().dimensions(kernel_output_feature_dim);
Shape new_kernel_shape =
ShapeUtil::MakeShape(kernel->shape().element_type(), new_kernel_dims);
HloInstruction* new_kernel = module->entry_computation()->AddInstruction(
HloInstruction::CreateTranspose(new_kernel_shape, kernel,
new_kernel_dim_order));
std::vector<int64_t> new_output_dim_order(num_dims);
std::vector<int64_t> new_conv_dims(num_dims);
auto output_batch_dim = dnums.output_batch_dimension();
auto output_feature_dim = dnums.output_feature_dimension();
new_output_dim_order[0] = output_batch_dim;
new_conv_dims[0] = hlo->shape().dimensions(output_batch_dim);
for (int64_t i = 0; i < num_spatial_dims; ++i) {
new_output_dim_order[i + 1] = dnums.output_spatial_dimensions(i);
new_conv_dims[i + 1] =
hlo->shape().dimensions(dnums.output_spatial_dimensions(i));
}
new_output_dim_order[num_dims - 1] = output_feature_dim;
new_conv_dims[num_dims - 1] = hlo->shape().dimensions(output_feature_dim);
Shape new_conv_shape =
ShapeUtil::MakeShape(hlo->shape().element_type(), new_conv_dims);
ConvolutionDimensionNumbers new_dnums;
new_dnums.set_input_batch_dimension(0);
new_dnums.set_output_batch_dimension(0);
for (int64_t i = 0; i < num_spatial_dims; ++i) {
new_dnums.add_input_spatial_dimensions(i + 1);
new_dnums.add_kernel_spatial_dimensions(i);
new_dnums.add_output_spatial_dimensions(i + 1);
}
new_dnums.set_input_feature_dimension(num_dims - 1);
new_dnums.set_output_feature_dimension(num_dims - 1);
new_dnums.set_kernel_input_feature_dimension(num_dims - 2);
new_dnums.set_kernel_output_feature_dimension(num_dims - 1);
HloInstruction* new_conv = module->entry_computation()->AddInstruction(
HloInstruction::CreateConvolve(
new_conv_shape, new_input, new_kernel, hlo->feature_group_count(),
hlo->batch_group_count(), hlo->window(), new_dnums,
hlo->precision_config()));
TF_RETURN_IF_ERROR(module->entry_computation()->ReplaceWithNewInstruction(
hlo, HloInstruction::CreateTranspose(
hlo->shape(), new_conv,
InversePermutation(new_output_dim_order))));
changed = true;
}
}
return changed;
}
}
} | #include "xla/service/cpu/conv_canonicalization.h"
#include <vector>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/cpu/target_machine_features_fake.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
namespace xla {
namespace cpu {
using ::testing::ElementsAre;
class ConvCanonicalizationTest : public HloTestBase {
public:
ConvCanonicalizationTest() {
for (int i = 0; i < 2; ++i) {
auto dim = conv_window_.add_dimensions();
dim->set_size(kWindowSize);
dim->set_stride(1);
dim->set_padding_low(0);
dim->set_padding_high(0);
dim->set_window_dilation(1);
dim->set_base_dilation(1);
}
}
protected:
Window conv_window_;
static constexpr int kBatchSize = 50;
static constexpr int kInputSize = 28;
static constexpr int kWindowSize = 5;
static constexpr int kInputFeatureCount = 32;
static constexpr int kOutputFeatureCount = 64;
};
TEST_F(ConvCanonicalizationTest, NonCanonicalToCanonical) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR4FromArray4D(Array4D<float>(
kInputFeatureCount, kBatchSize, kInputSize, kInputSize))));
auto kernel = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR4FromArray4D(Array4D<float>(
kOutputFeatureCount, kInputFeatureCount, kWindowSize, kWindowSize))));
ConvolutionDimensionNumbers dnums;
dnums.set_input_batch_dimension(1);
dnums.set_output_batch_dimension(1);
dnums.add_input_spatial_dimensions(2);
dnums.add_output_spatial_dimensions(2);
dnums.add_input_spatial_dimensions(3);
dnums.add_output_spatial_dimensions(3);
dnums.set_input_feature_dimension(0);
dnums.set_output_feature_dimension(0);
dnums.add_kernel_spatial_dimensions(2);
dnums.add_kernel_spatial_dimensions(3);
dnums.set_kernel_input_feature_dimension(1);
dnums.set_kernel_output_feature_dimension(0);
auto output_size = kInputSize - kWindowSize + 1;
builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeUtil::MakeShape(
F32, {kOutputFeatureCount, kBatchSize, output_size, output_size}),
input, kernel, 1, 1,
conv_window_, dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
cpu::TargetMachineFeaturesWithFakeAlignmentLogic target_machine_features(
[](int64_t shape_size) {
return cpu::TargetMachineFeatures::kEigenExpectedTensorAlignment;
});
ConvCanonicalization conv_canonicalization(&target_machine_features);
EXPECT_TRUE(conv_canonicalization.Run(module.get()).value());
const HloInstruction* output_reshape = entry_computation->root_instruction();
EXPECT_EQ(HloOpcode::kTranspose, output_reshape->opcode());
const HloInstruction* canonical_conv = output_reshape->operand(0);
EXPECT_EQ(HloOpcode::kConvolution, canonical_conv->opcode());
const HloInstruction* input_reshape = canonical_conv->operand(0);
EXPECT_EQ(HloOpcode::kTranspose, input_reshape->opcode());
const HloInstruction* kernel_reshape = canonical_conv->operand(1);
EXPECT_EQ(HloOpcode::kTranspose, kernel_reshape->opcode());
EXPECT_THAT(input_reshape->dimensions(), ElementsAre(1, 2, 3, 0));
EXPECT_THAT(kernel_reshape->dimensions(), ElementsAre(2, 3, 1, 0));
EXPECT_THAT(output_reshape->dimensions(), ElementsAre(3, 0, 1, 2));
}
TEST_F(ConvCanonicalizationTest, CanonicalStaysTheSame) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR4FromArray4D(Array4D<float>(
kBatchSize, kInputSize, kInputSize, kInputFeatureCount))));
auto kernel = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR4FromArray4D(Array4D<float>(
kWindowSize, kWindowSize, kInputFeatureCount, kOutputFeatureCount))));
ConvolutionDimensionNumbers dnums;
dnums.set_input_batch_dimension(0);
dnums.set_output_batch_dimension(0);
dnums.add_input_spatial_dimensions(1);
dnums.add_output_spatial_dimensions(1);
dnums.add_input_spatial_dimensions(2);
dnums.add_output_spatial_dimensions(2);
dnums.set_input_feature_dimension(3);
dnums.set_output_feature_dimension(3);
dnums.add_kernel_spatial_dimensions(0);
dnums.add_kernel_spatial_dimensions(1);
dnums.set_kernel_input_feature_dimension(2);
dnums.set_kernel_output_feature_dimension(3);
auto output_size = kInputSize - kWindowSize + 1;
builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeUtil::MakeShape(
F32, {kBatchSize, output_size, output_size, kOutputFeatureCount}),
input, kernel, 1, 1,
conv_window_, dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
cpu::TargetMachineFeaturesWithFakeAlignmentLogic target_machine_features(
[](int64_t shape_size) {
return cpu::TargetMachineFeatures::kEigenExpectedTensorAlignment;
});
ConvCanonicalization conv_canonicalization(&target_machine_features);
EXPECT_FALSE(conv_canonicalization.Run(module.get()).value());
}
}
} |
2,021 | cpp | tensorflow/tensorflow | infeed_thunk | third_party/xla/xla/backends/cpu/runtime/infeed_thunk.cc | third_party/xla/xla/backends/cpu/runtime/infeed_thunk_test.cc | #ifndef XLA_SERVICE_GPU_RUNTIME_INFEED_THUNK_H_
#define XLA_SERVICE_GPU_RUNTIME_INFEED_THUNK_H_
#include <vector>
#include "absl/status/status.h"
#include "xla/service/gpu/runtime/thunk.h"
namespace xla {
namespace gpu {
class InfeedThunk : public Thunk {
public:
InfeedThunk(ThunkInfo thunk_info, std::vector<ShapedSlice> dest_slices);
InfeedThunk(const InfeedThunk&) = delete;
InfeedThunk& operator=(const InfeedThunk&) = delete;
absl::Status ExecuteOnStream(const ExecuteParams& params) override;
private:
const std::vector<ShapedSlice> dest_slices_;
};
}
}
#endif
#include "xla/service/gpu/runtime/infeed_thunk.h"
#include <cstddef>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "xla/service/gpu/buffer_allocations.h"
#include "xla/service/gpu/infeed_manager.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_handle.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace gpu {
InfeedThunk::InfeedThunk(ThunkInfo thunk_info,
std::vector<ShapedSlice> dest_slices)
: Thunk(Kind::kInfeed, thunk_info), dest_slices_(std::move(dest_slices)) {}
absl::Status InfeedThunk::ExecuteOnStream(const ExecuteParams& params) {
se::Stream& stream = *params.stream;
const BufferAllocations& buffer_allocations = *params.buffer_allocations;
VLOG(2) << "Infeeding to GPU";
ShapeTree<se::DeviceMemoryHandle> source_buffers =
GetOrCreateInfeedManager(stream.parent())->BlockingGetNextDestination();
size_t index = 0;
for (auto& source : source_buffers.leaves()) {
const ShapeIndex& shape_index = source.first;
se::DeviceMemoryHandle& buffer = source.second;
const Shape& source_shape =
ShapeUtil::GetSubshape(source_buffers.shape(), shape_index);
TF_RET_CHECK(
ShapeUtil::ReshapeIsBitcast(dest_slices_[index].shape, source_shape))
<< "Mismatch between infeed source buffer shape "
<< ShapeUtil::HumanStringWithLayout(source_shape)
<< " and infeed dest buffer shape "
<< ShapeUtil::HumanStringWithLayout(dest_slices_[index].shape);
se::DeviceMemoryBase dest_address =
buffer_allocations.GetDeviceAddress(dest_slices_[index++].slice);
TF_RETURN_IF_ERROR(
stream.Memcpy(&dest_address, buffer.memory(), buffer.memory().size()));
}
CHECK_EQ(index, dest_slices_.size())
<< "Infeed did not populate all destination buffers";
absl::Status block_status = stream.BlockHostUntilDone();
if (!block_status.ok()) {
return Internal("Failed to complete data transfer on stream %p: %s",
&stream, block_status.message());
}
VLOG(2) << "Infeeding to GPU complete";
return absl::OkStatus();
}
}
} | #include "xla/service/cpu/runtime/infeed_thunk.h"
#include <memory>
#include "xla/runtime/buffer_use.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/cpu/runtime/thunk.h"
#include "xla/shape_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
TEST(InfeedThunkTest, BufferUses) {
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice infeed_slice(&alloc, 10, 40);
InfeedThunk::InfeedBuffer infeed_buffer = {
infeed_slice,
ShapeUtil::MakeShape(F32, {10}),
};
TF_ASSERT_OK_AND_ASSIGN(auto thunk,
InfeedThunk::Create({"infeed"}, {infeed_buffer}));
EXPECT_EQ(thunk->buffer_uses().size(), 2);
EXPECT_EQ(thunk->buffer_uses()[0], BufferUse::Write(infeed_slice));
BufferAllocation::Slice side_effect_slice(&alloc, 0, 1);
EXPECT_EQ(thunk->buffer_uses()[1], BufferUse::Write(side_effect_slice));
}
}
} |
2,022 | cpp | tensorflow/tensorflow | copy_thunk | third_party/xla/xla/backends/cpu/runtime/copy_thunk.cc | third_party/xla/xla/backends/cpu/runtime/copy_thunk_test.cc | #ifndef XLA_SERVICE_GPU_RUNTIME_COPY_THUNK_H_
#define XLA_SERVICE_GPU_RUNTIME_COPY_THUNK_H_
#include <cstdint>
#include <memory>
#include <utility>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/stream_executor/event.h"
#include "xla/stream_executor/stream_executor.h"
namespace xla {
namespace gpu {
class DeviceToDeviceCopyThunk : public Thunk {
public:
DeviceToDeviceCopyThunk(ThunkInfo thunk_info,
const BufferAllocation::Slice& source_buffer,
const BufferAllocation::Slice& destination_buffer,
uint64_t mem_size);
DeviceToDeviceCopyThunk(const DeviceToDeviceCopyThunk&) = delete;
DeviceToDeviceCopyThunk& operator=(const DeviceToDeviceCopyThunk&) = delete;
absl::Status ExecuteOnStream(const ExecuteParams& params) override;
const BufferAllocation::Slice& source() const { return source_buffer_; }
const BufferAllocation::Slice& destination() const {
return destination_buffer_;
}
uint64_t size_bytes() const { return mem_size_; }
private:
const BufferAllocation::Slice source_buffer_;
const BufferAllocation::Slice destination_buffer_;
const uint64_t mem_size_;
};
class CopyThunk : public Thunk {
public:
class AsyncEvents {
public:
absl::Status Emplace(se::StreamExecutor* executor,
const HloInstruction* instr,
std::unique_ptr<se::Event> event);
absl::StatusOr<std::unique_ptr<se::Event>> Extract(
se::StreamExecutor* executor, const HloInstruction* instr);
private:
using Key = std::pair<se::StreamExecutor*, const HloInstruction*>;
absl::Mutex mutex_;
absl::flat_hash_map<Key, std::unique_ptr<se::Event>> events_
ABSL_GUARDED_BY(mutex_);
};
CopyThunk(ThunkInfo thunk_info, const BufferAllocation::Slice& source_buffer,
const BufferAllocation::Slice& destination_buffer,
uint64_t mem_size);
absl::Status ExecuteOnStream(const ExecuteParams& params) override;
const BufferAllocation::Slice& source() const { return source_buffer_; }
const BufferAllocation::Slice& destination() const {
return destination_buffer_;
}
uint64_t size_bytes() const { return mem_size_; }
private:
const BufferAllocation::Slice source_buffer_;
const BufferAllocation::Slice destination_buffer_;
const uint64_t mem_size_;
};
class DeviceToHostCopyThunk : public CopyThunk {
public:
DeviceToHostCopyThunk(ThunkInfo thunk_info,
const BufferAllocation::Slice& source_buffer,
const BufferAllocation::Slice& destination_buffer,
uint64_t mem_size,
std::shared_ptr<CopyThunk::AsyncEvents> events,
const HloInstruction* instr);
absl::Status ExecuteOnStream(const ExecuteParams& params) override;
private:
std::shared_ptr<CopyThunk::AsyncEvents> async_events_;
const HloInstruction* instr_;
};
class HostToDeviceCopyThunk : public CopyThunk {
public:
HostToDeviceCopyThunk(ThunkInfo thunk_info,
const BufferAllocation::Slice& source_buffer,
const BufferAllocation::Slice& destination_buffer,
uint64_t mem_size,
std::shared_ptr<CopyThunk::AsyncEvents> events,
const HloInstruction* instr);
absl::Status ExecuteOnStream(const ExecuteParams& params) override;
private:
std::shared_ptr<CopyThunk::AsyncEvents> async_events_;
const HloInstruction* instr_;
};
class CopyDoneThunk : public Thunk {
public:
CopyDoneThunk(Thunk::Kind kind, ThunkInfo thunk_info,
std::shared_ptr<CopyThunk::AsyncEvents> events,
const HloInstruction* copy_start_instr);
absl::Status ExecuteOnStream(const ExecuteParams& params) override;
private:
std::shared_ptr<CopyThunk::AsyncEvents> async_events_;
const HloInstruction* copy_start_instr_;
};
}
}
#endif
#include "xla/service/gpu/runtime/copy_thunk.h"
#include <cstdint>
#include <memory>
#include <utility>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/event.h"
#include "xla/stream_executor/stream_executor.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
DeviceToDeviceCopyThunk::DeviceToDeviceCopyThunk(
ThunkInfo thunk_info, const BufferAllocation::Slice& source_buffer,
const BufferAllocation::Slice& destination_buffer, uint64_t mem_size)
: Thunk(Kind::kCopy, std::move(thunk_info)),
source_buffer_(source_buffer),
destination_buffer_(destination_buffer),
mem_size_(mem_size) {}
absl::Status DeviceToDeviceCopyThunk::ExecuteOnStream(
const ExecuteParams& params) {
se::DeviceMemoryBase destination_data =
params.buffer_allocations->GetDeviceAddress(destination_buffer_);
se::DeviceMemoryBase source_data =
params.buffer_allocations->GetDeviceAddress(source_buffer_);
VLOG(3) << "Memcpy D2D of size " << mem_size_ << " from "
<< source_data.opaque() << " to " << destination_data.opaque();
return params.stream->Memcpy(&destination_data, source_data, mem_size_);
}
CopyThunk::CopyThunk(ThunkInfo thunk_info,
const BufferAllocation::Slice& source_buffer,
const BufferAllocation::Slice& destination_buffer,
uint64_t mem_size)
: Thunk(Kind::kCopy, std::move(thunk_info)),
source_buffer_(source_buffer),
destination_buffer_(destination_buffer),
mem_size_(mem_size) {}
absl::Status CopyThunk::ExecuteOnStream(const ExecuteParams& params) {
return absl::OkStatus();
}
absl::Status CopyThunk::AsyncEvents::Emplace(se::StreamExecutor* executor,
const HloInstruction* instr,
std::unique_ptr<se::Event> event) {
Key key = {executor, instr};
absl::MutexLock lock(&mutex_);
VLOG(3) << "Emplace event " << event.get();
if (auto [it, inserted] = events_.try_emplace(key, std::move(event));
inserted) {
return absl::OkStatus();
}
return absl::InternalError("Async copy event already exists!");
}
absl::StatusOr<std::unique_ptr<se::Event>> CopyThunk::AsyncEvents::Extract(
se::StreamExecutor* executor, const HloInstruction* instr) {
Key key = {executor, instr};
absl::MutexLock lock(&mutex_);
if (auto event = events_.extract(key)) {
VLOG(3) << "Extract event " << event.mapped().get();
return std::move(event.mapped());
}
return absl::InternalError("Async copy event was not found!");
}
DeviceToHostCopyThunk::DeviceToHostCopyThunk(
ThunkInfo thunk_info, const BufferAllocation::Slice& source_buffer,
const BufferAllocation::Slice& destination_buffer, uint64_t mem_size,
std::shared_ptr<CopyThunk::AsyncEvents> async_events,
const HloInstruction* instr)
: CopyThunk(std::move(thunk_info), source_buffer, destination_buffer,
mem_size),
async_events_(std::move(async_events)),
instr_(instr) {}
absl::Status DeviceToHostCopyThunk::ExecuteOnStream(
const ExecuteParams& params) {
se::DeviceMemoryBase destination_data =
params.buffer_allocations->GetDeviceAddress(destination());
se::DeviceMemoryBase source_data =
params.buffer_allocations->GetDeviceAddress(source());
void* cpu_dst = destination_data.opaque();
TF_ASSIGN_OR_RETURN(
se::Stream * stream,
GetStreamForExecution(Thunk::execution_stream_id(), params));
TF_RETURN_IF_ERROR(stream->Memcpy(cpu_dst, source_data, size_bytes()));
if (stream == params.stream) {
VLOG(2) << "Memcpy D2H from the main stream";
return absl::OkStatus();
}
VLOG(2) << "Memcpy D2H from the other stream";
se::StreamExecutor* executor = params.stream->parent();
TF_ASSIGN_OR_RETURN(auto event, executor->CreateEvent());
TF_RETURN_IF_ERROR(stream->RecordEvent(event.get()));
VLOG(3) << "Emplace events: " << event.get()
<< " for instr: " << instr_->ToString();
return async_events_->Emplace(executor, instr_, std::move(event));
}
HostToDeviceCopyThunk::HostToDeviceCopyThunk(
ThunkInfo thunk_info, const BufferAllocation::Slice& source_buffer,
const BufferAllocation::Slice& destination_buffer, uint64_t mem_size,
std::shared_ptr<CopyThunk::AsyncEvents> async_events,
const HloInstruction* instr)
: CopyThunk(std::move(thunk_info), source_buffer, destination_buffer,
mem_size),
async_events_(std::move(async_events)),
instr_(instr) {}
absl::Status HostToDeviceCopyThunk::ExecuteOnStream(
const ExecuteParams& params) {
se::DeviceMemoryBase destination_data =
params.buffer_allocations->GetDeviceAddress(destination());
se::DeviceMemoryBase source_data =
params.buffer_allocations->GetDeviceAddress(source());
void* cpu_src = source_data.opaque();
TF_ASSIGN_OR_RETURN(
se::Stream * stream,
GetStreamForExecution(Thunk::execution_stream_id(), params));
TF_RETURN_IF_ERROR(stream->Memcpy(&destination_data, cpu_src, size_bytes()));
if (stream == params.stream) {
VLOG(2) << "Memcpy H2D from the main stream";
return absl::OkStatus();
}
VLOG(2) << "Memcpy H2D from the other stream";
se::StreamExecutor* executor = params.stream->parent();
TF_ASSIGN_OR_RETURN(auto event, executor->CreateEvent());
TF_RETURN_IF_ERROR(stream->RecordEvent(event.get()));
VLOG(3) << "Emplace events: " << event.get()
<< " for instr: " << instr_->ToString();
return async_events_->Emplace(executor, instr_, std::move(event));
}
CopyDoneThunk::CopyDoneThunk(
Thunk::Kind kind, ThunkInfo thunk_info,
std::shared_ptr<CopyThunk::AsyncEvents> async_events,
const HloInstruction* copy_start_instr)
: Thunk(kind, std::move(thunk_info)),
async_events_(std::move(async_events)),
copy_start_instr_(copy_start_instr) {}
absl::Status CopyDoneThunk::ExecuteOnStream(const ExecuteParams& params) {
VLOG(3) << "CopyDone thunk between a host and a device for: "
<< copy_start_instr_->ToString();
se::StreamExecutor* executor = params.stream->parent();
TF_ASSIGN_OR_RETURN(std::unique_ptr<se::Event> event,
async_events_->Extract(executor, copy_start_instr_));
return params.stream->WaitFor(event.get());
}
}
} | #include "xla/service/cpu/runtime/copy_thunk.h"
#include <cstddef>
#include <vector>
#include "xla/layout_util.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/cpu/runtime/buffer_allocations.h"
#include "xla/service/cpu/runtime/thunk.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
TEST(CopyThunkTest, CopySameShape) {
std::vector<MaybeOwningDeviceMemory> buffers;
std::vector<float> src = {1.0, 2.0, 3.0, 4.0};
std::vector<float> dst(4, 0.0);
size_t size_in_bytes = src.size() * sizeof(float);
buffers.emplace_back(se::DeviceMemoryBase(src.data(), size_in_bytes));
buffers.emplace_back(se::DeviceMemoryBase(dst.data(), size_in_bytes));
BufferAllocations allocations(buffers);
BufferAllocation src_alloc(0, size_in_bytes, 0);
BufferAllocation dst_alloc(1, size_in_bytes, 0);
BufferAllocation::Slice src_slice(&src_alloc, 0, size_in_bytes);
BufferAllocation::Slice dst_slice(&dst_alloc, 0, size_in_bytes);
Shape shape = ShapeUtil::MakeShape(F32, {2, 2});
TF_ASSERT_OK_AND_ASSIGN(
auto thunk,
CopyThunk::Create({"copy"}, src_slice, shape, dst_slice, shape));
Thunk::ExecuteParams params = {nullptr, &allocations};
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError());
EXPECT_EQ(src, dst);
}
TEST(CopyThunkTest, CopyTransposed) {
std::vector<MaybeOwningDeviceMemory> buffers;
std::vector<float> src = {1.0, 2.0, 3.0, 4.0};
std::vector<float> dst(4, 0.0);
size_t size_in_bytes = src.size() * sizeof(float);
buffers.emplace_back(se::DeviceMemoryBase(src.data(), size_in_bytes));
buffers.emplace_back(se::DeviceMemoryBase(dst.data(), size_in_bytes));
BufferAllocations allocations(buffers);
BufferAllocation src_alloc(0, size_in_bytes, 0);
BufferAllocation dst_alloc(1, size_in_bytes, 0);
BufferAllocation::Slice src_slice(&src_alloc, 0, size_in_bytes);
BufferAllocation::Slice dst_slice(&dst_alloc, 0, size_in_bytes);
Shape src_shape = ShapeUtil::MakeShape(F32, {2, 2});
*src_shape.mutable_layout() = LayoutUtil::MakeLayout({0, 1});
Shape dst_shape = ShapeUtil::MakeShape(F32, {2, 2});
TF_ASSERT_OK_AND_ASSIGN(
auto thunk,
CopyThunk::Create({"copy"}, src_slice, src_shape, dst_slice, dst_shape));
Thunk::ExecuteParams params = {nullptr, &allocations};
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError());
std::vector<float> expected = {1.0, 3.0, 2.0, 4.0};
EXPECT_EQ(expected, dst);
}
}
} |
2,023 | cpp | tensorflow/tensorflow | convolution_thunk | third_party/xla/xla/backends/cpu/runtime/convolution_thunk.cc | third_party/xla/xla/backends/cpu/runtime/convolution_thunk_test.cc | #ifndef XLA_SERVICE_GPU_RUNTIME_CONVOLUTION_THUNK_H_
#define XLA_SERVICE_GPU_RUNTIME_CONVOLUTION_THUNK_H_
#include <cstdint>
#include <memory>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/gpu_conv_runner.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/stream_executor.h"
namespace xla {
namespace gpu {
class ConvolutionThunk : public Thunk {
public:
ConvolutionThunk(ThunkInfo thunk_info, GpuConvConfig config,
std::vector<BufferAllocation::Slice> operand_slices,
std::vector<BufferAllocation::Slice> result_slices,
BufferAllocation::Slice scratch_slice);
ConvolutionThunk(const ConvolutionThunk&) = delete;
ConvolutionThunk& operator=(const ConvolutionThunk&) = delete;
absl::Status ExecuteOnStream(const ExecuteParams& params) override;
private:
std::vector<BufferAllocation::Slice> operand_buffers_;
std::vector<BufferAllocation::Slice> result_buffers_;
BufferAllocation::Slice scratch_buffer_;
GenericConvRunner& GetOrCreateRunner(const stream_executor::Stream* stream,
bool* runner_created);
const GpuConvConfig config_;
absl::Mutex mu_;
absl::flat_hash_map<const stream_executor::Stream*,
std::unique_ptr<GenericConvRunner>>
runner_cache_ ABSL_GUARDED_BY(mu_);
};
class ConvolutionReorderThunk : public Thunk {
public:
ConvolutionReorderThunk(
ThunkInfo thunk_info, absl::Span<int64_t> filter_nchw,
absl::InlinedVector<BufferAllocation::Slice, 2> operand_slices,
absl::InlinedVector<BufferAllocation::Slice, 2> result_slices);
ConvolutionReorderThunk(const ConvolutionReorderThunk&) = delete;
ConvolutionReorderThunk& operator=(const ConvolutionReorderThunk&) = delete;
absl::Status ExecuteOnStream(const ExecuteParams& params) override;
private:
static se::dnn::FilterDescriptor CreateFilterDescriptor(
absl::Span<int64_t> filter_nchw);
const se::dnn::FilterDescriptor filter_descriptor_;
absl::InlinedVector<BufferAllocation::Slice, 2> operand_buffers_;
absl::InlinedVector<BufferAllocation::Slice, 2> result_buffers_;
};
}
}
#endif
#include "xla/service/gpu/runtime/convolution_thunk.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/service/buffer_assignment.h"
#if TENSORFLOW_USE_ROCM
#include "xla/service/gpu/stream_executor_util.h"
#endif
#include "xla/service/gpu/gpu_conv_runner.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/scratch_allocator.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace gpu {
ConvolutionThunk::ConvolutionThunk(
ThunkInfo thunk_info, GpuConvConfig config,
std::vector<BufferAllocation::Slice> operand_slices,
std::vector<BufferAllocation::Slice> result_slices,
BufferAllocation::Slice scratch_slice)
: Thunk(Kind::kConvolution, thunk_info),
operand_buffers_(std::move(operand_slices)),
result_buffers_(std::move(result_slices)),
scratch_buffer_(scratch_slice),
config_(std::move(config)) {}
GenericConvRunner& ConvolutionThunk::GetOrCreateRunner(
const stream_executor::Stream* stream, bool* runner_created) {
absl::MutexLock lock(&mu_);
auto it = runner_cache_.find(stream);
*runner_created = (it == runner_cache_.end());
if (*runner_created) {
it = runner_cache_
.insert({stream, std::make_unique<GenericConvRunner>(config_)})
.first;
}
return *it->second;
}
absl::Status ConvolutionThunk::ExecuteOnStream(const ExecuteParams& params) {
const auto& buffer_allocations = *params.buffer_allocations;
std::vector<se::DeviceMemoryBase> operand_se_buffers, result_se_buffers;
operand_se_buffers.reserve(operand_buffers_.size());
for (BufferAllocation::Slice buffer : operand_buffers_) {
operand_se_buffers.push_back(buffer_allocations.GetDeviceAddress(buffer));
}
result_se_buffers.reserve(result_buffers_.size());
for (BufferAllocation::Slice buffer : result_buffers_) {
result_se_buffers.push_back(buffer_allocations.GetDeviceAddress(buffer));
}
se::DeviceMemoryBase scratch =
buffer_allocations.GetDeviceAddress(scratch_buffer_);
bool runner_created = false;
RunConvOptions opts;
opts.runner_cache = &GetOrCreateRunner(params.stream, &runner_created);
#if TENSORFLOW_USE_ROCM
if (runner_created) {
TF_ASSIGN_OR_RETURN(
GpuConvParams conv_params,
GetGpuConvParams(config_, operand_se_buffers, result_se_buffers));
TF_ASSIGN_OR_RETURN(se::dnn::ConvolutionKind kind,
GetDNNConvKindFromCudnnConvKind(config_.kind));
TF_ASSIGN_OR_RETURN(se::dnn::DataType input_type,
GetDNNDataTypeFromPrimitiveType(config_.input_type));
TF_ASSIGN_OR_RETURN(se::dnn::DataType output_type,
GetDNNDataTypeFromPrimitiveType(config_.output_type));
TF_ASSIGN_OR_RETURN(auto dnn,
se::dnn::internal::GetDnnFromStream(params.stream));
se::OwningScratchAllocator<> scratch_allocator(
buffer_allocations.device_ordinal(),
buffer_allocations.memory_allocator());
std::vector<se::dnn::ProfileResult> profile_results;
dnn->GetMIOpenConvolveAlgorithms(
kind, input_type, output_type, params.stream, config_.input_descriptor,
conv_params.input_buf, config_.filter_descriptor,
conv_params.filter_buf, config_.output_descriptor,
conv_params.output_buf, config_.conv_desc, &scratch_allocator,
&profile_results);
}
#endif
TF_RETURN_IF_ERROR(RunGpuConv(config_, absl::MakeSpan(operand_se_buffers),
absl::MakeSpan(result_se_buffers), scratch,
params.stream, opts));
if (!params.stream->ok()) {
return Internal("ConvolutionThunk::ExecuteOnStream failed.");
}
return absl::OkStatus();
}
ConvolutionReorderThunk::ConvolutionReorderThunk(
ThunkInfo thunk_info, absl::Span<int64_t> filter_nchw,
absl::InlinedVector<BufferAllocation::Slice, 2> operand_slices,
absl::InlinedVector<BufferAllocation::Slice, 2> result_slices)
: Thunk(Kind::kConvolutionReorder, thunk_info),
filter_descriptor_(CreateFilterDescriptor(filter_nchw)),
operand_buffers_(operand_slices),
result_buffers_(result_slices) {}
absl::Status ConvolutionReorderThunk::ExecuteOnStream(
const ExecuteParams& params) {
bool has_bias = operand_buffers_.size() > 1;
CHECK_EQ(operand_buffers_.size(), result_buffers_.size());
const auto& buffer_allocations = *params.buffer_allocations;
auto filter_input = se::DeviceMemory<int8_t>(
buffer_allocations.GetDeviceAddress(operand_buffers_[0]));
auto filter_output = se::DeviceMemory<int8_t>(
buffer_allocations.GetDeviceAddress(result_buffers_[0]));
auto bias_input =
has_bias ? std::make_optional(se::DeviceMemory<float>(
buffer_allocations.GetDeviceAddress(operand_buffers_[1])))
: std::nullopt;
auto bias_output =
has_bias ? std::make_optional(se::DeviceMemory<float>(
buffer_allocations.GetDeviceAddress(result_buffers_[1])))
: std::nullopt;
auto dnn = params.stream->parent()->AsDnn();
if (dnn == nullptr) {
return absl::InternalError("No DNN for stream.");
}
return dnn->CudnnReorderConvolutionFilterAndBias(
params.stream, filter_descriptor_, filter_input, &filter_output,
std::move(bias_input), std::move(bias_output));
}
se::dnn::FilterDescriptor ConvolutionReorderThunk::CreateFilterDescriptor(
absl::Span<int64_t> filter_nchw) {
CHECK_EQ(filter_nchw.size(), 4);
se::dnn::FilterDescriptor filter_desc(2);
filter_desc.set_layout(se::dnn::FilterLayout::kOutputInputYX32);
filter_desc.set_output_feature_map_count(filter_nchw[0]);
filter_desc.set_input_feature_map_count(filter_nchw[1]);
filter_desc.set_input_filter_height(filter_nchw[2]);
filter_desc.set_input_filter_width(filter_nchw[3]);
return filter_desc;
}
}
} | #include "xla/service/cpu/runtime/convolution_thunk.h"
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "Eigen/Core"
#include "xla/primitive_util.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/cpu/runtime/buffer_allocations.h"
#include "xla/service/cpu/runtime/thunk.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
struct ConvolutionDimensions {
int batch_size = 1;
int input_size = 3;
int input_channels = 5;
int kernel_size = 3;
int output_channels = 3;
int output_size = input_size - kernel_size + 1;
};
template <typename T>
class ConvolutionThunkTypedTest : public ::testing::Test {};
using CorrectTypes = ::testing::Types<float, Eigen::half>;
TYPED_TEST_SUITE(ConvolutionThunkTypedTest, CorrectTypes);
std::vector<int64_t> MakeInputDims(
int convolution_rank,
ConvolutionDimensions dims = ConvolutionDimensions()) {
std::vector<int64_t> input_dims = {dims.batch_size};
for (int i = 0; i < convolution_rank; ++i) {
input_dims.push_back(dims.input_size);
}
input_dims.push_back(dims.input_channels);
return input_dims;
}
std::vector<int64_t> MakeKernelDims(
int convolution_rank,
ConvolutionDimensions dims = ConvolutionDimensions()) {
std::vector<int64_t> kernel_dims = {};
for (int i = 0; i < convolution_rank; ++i) {
kernel_dims.push_back(dims.kernel_size);
}
kernel_dims.push_back(dims.input_channels);
kernel_dims.push_back(dims.output_channels);
return kernel_dims;
}
std::vector<int64_t> MakeOutputDims(
int convolution_rank,
ConvolutionDimensions dims = ConvolutionDimensions()) {
std::vector<int64_t> output_dims = {dims.batch_size};
for (int i = 0; i < convolution_rank; ++i) {
output_dims.push_back(dims.output_size);
}
output_dims.push_back(dims.output_channels);
return output_dims;
}
template <typename ElementType>
std::vector<ElementType> MakeDataVector(const std::vector<int64_t>& dims) {
auto size = absl::c_accumulate(dims, 1, std::multiplies<int>());
return std::vector<ElementType>(size, ElementType(0.0));
}
template <typename ElementType>
std::vector<MaybeOwningDeviceMemory> MakeBuffers(
const std::vector<ElementType>& input,
const std::vector<ElementType>& kernel,
const std::vector<ElementType>& output) {
std::vector<MaybeOwningDeviceMemory> buffers;
size_t input_size_in_bytes = input.size() * sizeof(ElementType);
buffers.emplace_back(se::DeviceMemoryBase(input.data(), input_size_in_bytes));
size_t kernel_size_in_bytes = kernel.size() * sizeof(ElementType);
buffers.emplace_back(
se::DeviceMemoryBase(kernel.data(), kernel_size_in_bytes));
size_t output_size_in_bytes = output.size() * sizeof(ElementType);
buffers.emplace_back(
se::DeviceMemoryBase(output.data(), output_size_in_bytes));
return buffers;
}
ConvolutionThunk::Options MakeConvolutionOptions() {
ConvolutionThunk::Options options;
options.multi_threaded = false;
options.use_acl = false;
return options;
}
ConvolutionDimensionNumbers MakeConvolutionDimensionNumbers(
int convolution_rank) {
ConvolutionDimensionNumbers dnums;
int dim = 0;
dnums.set_input_batch_dimension(dim++);
for (int i = 0; i < convolution_rank; ++i) {
dnums.add_input_spatial_dimensions(dim++);
}
dnums.set_input_feature_dimension(dim++);
dim = 0;
for (int i = 0; i < convolution_rank; ++i) {
dnums.add_kernel_spatial_dimensions(dim++);
}
dnums.set_kernel_input_feature_dimension(dim++);
dnums.set_kernel_output_feature_dimension(dim++);
dim = 0;
dnums.set_output_batch_dimension(dim++);
for (int i = 0; i < convolution_rank; ++i) {
dnums.add_output_spatial_dimensions(dim++);
}
dnums.set_output_feature_dimension(dim++);
return dnums;
}
Window MakeWindow(int convolution_rank) {
Window window;
for (int i = 0; i < convolution_rank; ++i) {
WindowDimension* window_dim = window.add_dimensions();
window_dim->set_stride(1);
window_dim->set_padding_low(0);
window_dim->set_padding_high(0);
window_dim->set_window_dilation(1);
window_dim->set_base_dilation(1);
}
return window;
}
template <typename ElementType>
class ConvolutionThunkBuilder {
public:
auto Build(int convolution_rank,
ConvolutionDimensions dims = ConvolutionDimensions()) {
auto input_dims = MakeInputDims(convolution_rank, dims);
auto kernel_dims = MakeKernelDims(convolution_rank, dims);
auto output_dims = MakeOutputDims(convolution_rank, dims);
input_ = MakeDataVector<ElementType>(input_dims);
kernel_ = MakeDataVector<ElementType>(kernel_dims);
output_ = MakeDataVector<ElementType>(output_dims);
size_t input_size_in_bytes = input_.size() * sizeof(ElementType);
buffers_.emplace_back(
se::DeviceMemoryBase(input_.data(), input_size_in_bytes));
size_t kernel_size_in_bytes = kernel_.size() * sizeof(ElementType);
buffers_.emplace_back(
se::DeviceMemoryBase(kernel_.data(), kernel_size_in_bytes));
size_t output_size_in_bytes = output_.size() * sizeof(ElementType);
buffers_.emplace_back(
se::DeviceMemoryBase(output_.data(), output_size_in_bytes));
allocations_ = std::make_unique<BufferAllocations>(buffers_);
input_alloc_ =
std::make_unique<BufferAllocation>(0, input_size_in_bytes, 0);
kernel_alloc_ =
std::make_unique<BufferAllocation>(1, kernel_size_in_bytes, 0);
output_alloc_ =
std::make_unique<BufferAllocation>(2, output_size_in_bytes, 0);
BufferAllocation::Slice input_slice(input_alloc_.get(), 0,
input_size_in_bytes);
BufferAllocation::Slice kernel_slice(kernel_alloc_.get(), 0,
kernel_size_in_bytes);
BufferAllocation::Slice output_slice(output_alloc_.get(), 0,
output_size_in_bytes);
auto primitive_type = primitive_util::NativeToPrimitiveType<ElementType>();
Shape input_shape = ShapeUtil::MakeShape(primitive_type, input_dims);
Shape kernel_shape = ShapeUtil::MakeShape(primitive_type, kernel_dims);
Shape output_shape = ShapeUtil::MakeShape(primitive_type, output_dims);
auto options = MakeConvolutionOptions();
auto dnums = MakeConvolutionDimensionNumbers(convolution_rank);
auto window = MakeWindow(convolution_rank);
return ConvolutionThunk::Create(
{"convolution"}, options, std::move(input_slice), input_shape,
std::move(kernel_slice), kernel_shape, std::move(output_slice),
output_shape, dnums, window,
1);
}
auto GetExecutionParams() {
return Thunk::ExecuteParams{nullptr, allocations_.get()};
}
private:
std::vector<ElementType> input_;
std::vector<ElementType> kernel_;
std::vector<ElementType> output_;
std::vector<MaybeOwningDeviceMemory> buffers_;
std::unique_ptr<BufferAllocations> allocations_;
std::unique_ptr<BufferAllocation> input_alloc_;
std::unique_ptr<BufferAllocation> kernel_alloc_;
std::unique_ptr<BufferAllocation> output_alloc_;
};
template <typename ElementType>
void SuccessfulConvolution(int convolution_rank) {
ConvolutionThunkBuilder<ElementType> builder;
TF_ASSERT_OK_AND_ASSIGN(auto thunk, builder.Build(convolution_rank))
Thunk::ExecuteParams params = builder.GetExecutionParams();
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError()) << execute_event.GetError();
}
TYPED_TEST(ConvolutionThunkTypedTest, SuccessfulConvolution1D) {
SuccessfulConvolution<TypeParam>(1);
}
TYPED_TEST(ConvolutionThunkTypedTest, SuccessfulConvolution2D) {
SuccessfulConvolution<TypeParam>(2);
}
TYPED_TEST(ConvolutionThunkTypedTest, SuccessfulConvolution3D) {
SuccessfulConvolution<TypeParam>(3);
}
TEST(ConvolutionThunkTest, CreationErrorOnUnsupportedType) {
ConvolutionThunkBuilder<int> builder;
auto status_or_thunk = builder.Build(2);
EXPECT_EQ(status_or_thunk.status().code(),
absl::StatusCode::kInvalidArgument);
EXPECT_THAT(status_or_thunk.status().message(),
::testing::HasSubstr("Unsupported element type (S32)"));
}
TEST(ConvolutionThunkTest, CreationErrorOnIncorrectConvolutionRank) {
ConvolutionThunkBuilder<float> builder;
auto status_or_thunk = builder.Build(4);
EXPECT_EQ(status_or_thunk.status().code(),
absl::StatusCode::kInvalidArgument);
EXPECT_THAT(status_or_thunk.status().message(),
::testing::HasSubstr("Incorrect convolution rank (4)"));
}
}
} |
2,024 | cpp | tensorflow/tensorflow | outfeed_thunk | third_party/xla/xla/backends/cpu/runtime/outfeed_thunk.cc | third_party/xla/xla/backends/cpu/runtime/outfeed_thunk_test.cc | #ifndef XLA_SERVICE_GPU_RUNTIME_OUTFEED_THUNK_H_
#define XLA_SERVICE_GPU_RUNTIME_OUTFEED_THUNK_H_
#include <vector>
#include "absl/status/status.h"
#include "xla/service/gpu/runtime/thunk.h"
namespace xla {
namespace gpu {
class OutfeedThunk : public Thunk {
public:
OutfeedThunk(ThunkInfo thunk_info, std::vector<ShapedSlice> source_slices);
OutfeedThunk(const OutfeedThunk&) = delete;
OutfeedThunk& operator=(const OutfeedThunk&) = delete;
absl::Status ExecuteOnStream(const ExecuteParams& params) override;
private:
const std::vector<ShapedSlice> source_slices_;
};
}
}
#endif
#include "xla/service/gpu/runtime/outfeed_thunk.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/buffer_allocations.h"
#include "xla/service/gpu/outfeed_manager.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace gpu {
OutfeedThunk::OutfeedThunk(ThunkInfo thunk_info,
std::vector<ShapedSlice> source_slices)
: Thunk(Kind::kOutfeed, thunk_info),
source_slices_(std::move(source_slices)) {}
absl::Status OutfeedThunk::ExecuteOnStream(const ExecuteParams& params) {
se::Stream& stream = *params.stream;
const BufferAllocations& buffer_allocations = *params.buffer_allocations;
VLOG(2) << "Outfeeding from GPU";
OutfeedManager* outfeed_manager = GetOrCreateOutfeedManager(stream.parent());
ShapeTree<std::unique_ptr<OutfeedBuffer>>* output_buffers =
outfeed_manager->BlockingGetNextDestination();
if (source_slices_.empty()) {
return absl::OkStatus();
}
const int64_t leaf_count = output_buffers->leaf_count();
TF_RET_CHECK(source_slices_.size() == leaf_count)
<< "Mismatch between number of outfeed inputs (" << source_slices_.size()
<< ") and outputs (" << leaf_count << ")";
auto output_leaf_it = output_buffers->leaf_begin();
for (int64_t index = 0; index < leaf_count; ++index) {
const ShapeIndex& shape_index = output_leaf_it->first;
std::unique_ptr<OutfeedBuffer>& buffer = output_leaf_it->second;
++output_leaf_it;
const Shape& output_shape =
ShapeUtil::GetSubshape(output_buffers->shape(), shape_index);
TF_RET_CHECK(
ShapeUtil::ReshapeIsBitcast(source_slices_[index].shape, output_shape))
<< "Mismatch between outfeed output buffer shape "
<< ShapeUtil::HumanStringWithLayout(output_shape)
<< " and outfeed source buffer shape "
<< ShapeUtil::HumanStringWithLayout(source_slices_[index].shape);
BufferAllocation::Slice source_slice = source_slices_[index].slice;
if (!source_slice.allocation())
return Internal("outfeed source missing buffer allocation");
se::DeviceMemoryBase data_address =
buffer_allocations.GetDeviceAddress(source_slice);
TF_RETURN_IF_ERROR(stream.Memcpy(buffer->destination()->untyped_data(),
data_address, buffer->length()));
TF_RETURN_IF_ERROR(stream.DoHostCallback([&buffer]() { buffer->Done(); }));
}
absl::Status block_status = stream.BlockHostUntilDone();
if (!block_status.ok()) {
return Internal("Failed to complete data transfer on stream %p: %s",
&stream, block_status.message());
}
VLOG(2) << "Outfeeding from GPU complete";
return absl::OkStatus();
}
}
} | #include "xla/service/cpu/runtime/outfeed_thunk.h"
#include <memory>
#include "xla/runtime/buffer_use.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/cpu/runtime/thunk.h"
#include "xla/shape_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
TEST(OutfeedThunkTest, BufferUses) {
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice outfeed_slice(&alloc, 10, 40);
OutfeedThunk::OutfeedBuffer outfeed_buffer = {
outfeed_slice,
ShapeUtil::MakeShape(F32, {10}),
};
TF_ASSERT_OK_AND_ASSIGN(auto thunk,
OutfeedThunk::Create({"outfeed"}, {outfeed_buffer}));
EXPECT_EQ(thunk->buffer_uses().size(), 2);
EXPECT_EQ(thunk->buffer_uses()[0], BufferUse::Read(outfeed_slice));
BufferAllocation::Slice side_effect_slice(&alloc, 0, 1);
EXPECT_EQ(thunk->buffer_uses()[1], BufferUse::Write(side_effect_slice));
}
}
} |
2,025 | cpp | tensorflow/tensorflow | while_thunk | third_party/xla/xla/backends/cpu/runtime/while_thunk.cc | third_party/xla/xla/backends/cpu/runtime/while_thunk_test.cc | #ifndef XLA_SERVICE_GPU_RUNTIME_WHILE_THUNK_H_
#define XLA_SERVICE_GPU_RUNTIME_WHILE_THUNK_H_
#include <cstdint>
#include <memory>
#include <optional>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/runtime/sequential_thunk.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/stream_executor/memory_allocation.h"
#include "xla/stream_executor/stream_executor.h"
namespace xla {
namespace gpu {
class WhileThunk : public Thunk {
public:
WhileThunk(ThunkInfo thunk_info,
const BufferAllocation::Slice& condition_result_buffer_index,
std::unique_ptr<SequentialThunk> condition_thunk_sequence,
std::unique_ptr<SequentialThunk> body_thunk_sequence,
std::optional<int64_t> trip_count = std::nullopt);
WhileThunk(const WhileThunk&) = delete;
WhileThunk& operator=(const WhileThunk&) = delete;
absl::Status Prepare(const PrepareParams& params,
ResourceRequests& resource_requests) override;
absl::Status Initialize(const InitializeParams& params) override;
absl::Status ExecuteOnStream(const ExecuteParams& params) override;
SequentialThunk* condition_thunk_sequence() const {
return condition_thunk_sequence_.get();
}
SequentialThunk* body_thunk_sequence() const {
return body_thunk_sequence_.get();
}
const BufferAllocation::Slice& condition_result_buffer() const {
return condition_result_buffer_index_;
}
static absl::StatusOr<int64_t> CurrentLoopIteration(int64_t depth = 0);
private:
const BufferAllocation::Slice condition_result_buffer_index_;
std::unique_ptr<SequentialThunk> condition_thunk_sequence_;
std::unique_ptr<SequentialThunk> body_thunk_sequence_;
std::optional<int64_t> trip_count_;
absl::Mutex mutex_;
absl::flat_hash_map<se::StreamExecutor*,
std::unique_ptr<se::MemoryAllocation>>
predicates_ ABSL_GUARDED_BY(mutex_);
};
}
}
#endif
#include "xla/service/gpu/runtime/while_thunk.h"
#include <cstdint>
#include <iterator>
#include <list>
#include <memory>
#include <optional>
#include <utility>
#include "absl/cleanup/cleanup.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/mutex.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/runtime/sequential_thunk.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/memory_allocation.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
static std::list<int64_t>& LoopCounters() {
static thread_local std::list<int64_t> loop_counters;
return loop_counters;
}
absl::StatusOr<int64_t> WhileThunk::CurrentLoopIteration(int64_t depth) {
if (depth >= LoopCounters().size()) {
return absl::InvalidArgumentError(absl::StrFormat(
"Loop depth %d is greater than the number of tracked loops %d", depth,
LoopCounters().size()));
}
auto counter = LoopCounters().begin();
std::advance(counter, depth);
return *counter;
}
WhileThunk::WhileThunk(
ThunkInfo thunk_info,
const BufferAllocation::Slice& condition_result_buffer_index,
std::unique_ptr<SequentialThunk> condition_thunk_sequence,
std::unique_ptr<SequentialThunk> body_thunk_sequence,
std::optional<int64_t> trip_count)
: Thunk(Kind::kWhile, thunk_info),
condition_result_buffer_index_(condition_result_buffer_index),
condition_thunk_sequence_(std::move(condition_thunk_sequence)),
body_thunk_sequence_(std::move(body_thunk_sequence)),
trip_count_(trip_count) {}
absl::Status WhileThunk::Prepare(const PrepareParams& params,
ResourceRequests& resource_requests) {
TF_RETURN_IF_ERROR(
condition_thunk_sequence_->Prepare(params, resource_requests));
TF_RETURN_IF_ERROR(body_thunk_sequence_->Prepare(params, resource_requests));
return absl::OkStatus();
}
absl::Status WhileThunk::Initialize(const InitializeParams& params) {
TF_RETURN_IF_ERROR(condition_thunk_sequence_->Initialize(params));
TF_RETURN_IF_ERROR(body_thunk_sequence_->Initialize(params));
absl::MutexLock lock(&mutex_);
if (auto it = predicates_.find(params.executor); it == predicates_.end()) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<se::MemoryAllocation> allocation,
params.executor->HostMemoryAllocate(sizeof(bool)));
predicates_.emplace(params.executor, std::move(allocation));
}
return absl::OkStatus();
}
absl::Status WhileThunk::ExecuteOnStream(const ExecuteParams& params) {
auto& stream = *params.stream;
int64_t& iter = LoopCounters().emplace_front();
absl::Cleanup cleanup = [&] { LoopCounters().pop_front(); };
se::DeviceMemoryBase condition_result_data =
params.buffer_allocations->GetDeviceAddress(
condition_result_buffer_index_);
if (trip_count_.has_value()) {
VLOG(2) << "Executing WhileThunk for " << *trip_count_ << " iterations";
for (iter = 0; iter < trip_count_; ++iter) {
VLOG(3) << "Executing iteration # " << iter;
TF_RETURN_IF_ERROR(body_thunk_sequence_->ExecuteOnStream(params));
}
return absl::OkStatus();
}
bool* condition_result = [&] {
absl::MutexLock lock(&mutex_);
return reinterpret_cast<bool*>(predicates_.at(stream.parent())->opaque());
}();
while (true) {
VLOG(3) << "Executing WhileThunk condition computation; iter=" << iter;
TF_RETURN_IF_ERROR(condition_thunk_sequence_->ExecuteOnStream(params));
TF_RETURN_IF_ERROR(
stream.Memcpy(condition_result, condition_result_data, sizeof(bool)));
if (absl::Status blocked = stream.BlockHostUntilDone(); !blocked.ok()) {
return absl::InternalError(absl::StrFormat(
"Failed to complete all kernels launched on stream %p: %s", &stream,
blocked.message()));
}
VLOG(3) << "condition_result = " << *condition_result;
if (!*condition_result) {
VLOG(3) << "Break WhileThunk loop; iter=" << iter;
break;
}
VLOG(3) << "Executing WhileThunk body computation; iter=" << iter;
TF_RETURN_IF_ERROR(body_thunk_sequence_->ExecuteOnStream(params));
++iter;
}
return absl::OkStatus();
}
}
} | #include "xla/service/cpu/runtime/while_thunk.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "xla/runtime/buffer_use.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/cpu/runtime/thunk.h"
#include "xla/service/cpu/runtime/thunk_testlib.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
TEST(WhileThunkTest, BufferUses) {
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice predicate_slice(&alloc, 0, sizeof(int32_t));
BufferAllocation::Slice cond_read_slice(&alloc, 10, 10);
BufferAllocation::Slice body_read_slice(&alloc, 20, 10);
ThunkSequence cond_sequence;
cond_sequence.push_back(
std::make_unique<BufferUseThunk>(BufferUse::Read(cond_read_slice)));
ThunkSequence body_sequence;
body_sequence.push_back(
std::make_unique<BufferUseThunk>(BufferUse::Read(body_read_slice)));
TF_ASSERT_OK_AND_ASSIGN(
auto thunk,
WhileThunk::Create({"while"}, predicate_slice, std::move(cond_sequence),
std::move(body_sequence)));
EXPECT_EQ(thunk->buffer_uses().size(), 3);
EXPECT_EQ(thunk->buffer_uses()[0], BufferUse::Write(predicate_slice));
EXPECT_EQ(thunk->buffer_uses()[1], BufferUse::Read(cond_read_slice));
EXPECT_EQ(thunk->buffer_uses()[2], BufferUse::Read(body_read_slice));
}
}
} |
2,026 | cpp | tensorflow/tensorflow | thunk_executor | third_party/xla/xla/backends/cpu/runtime/thunk_executor.cc | third_party/xla/xla/backends/cpu/runtime/thunk_executor_test.cc | #ifndef XLA_SERVICE_CPU_RUNTIME_THUNK_EXECUTOR_H_
#define XLA_SERVICE_CPU_RUNTIME_THUNK_EXECUTOR_H_
#include <atomic>
#include <cstdint>
#include <limits>
#include <string>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/fixed_array.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/any_invocable.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/service/cpu/runtime/thunk.h"
#include "xla/tsl/concurrency/async_value_ref.h"
namespace xla::cpu {
class ThunkExecutor {
public:
using BufferUses = Thunk::BufferUses;
using ExecuteEvent = Thunk::ExecuteEvent;
using Task = absl::AnyInvocable<void()>;
using TaskRunner = absl::AnyInvocable<void(Task)>;
using NodeId = int64_t;
static constexpr NodeId kInvalidNodeId = std::numeric_limits<NodeId>::min();
ThunkExecutor(ThunkExecutor&&) = default;
ThunkExecutor& operator=(ThunkExecutor&&) = default;
static absl::StatusOr<ThunkExecutor> Create(ThunkSequence thunk_sequence);
struct NodeDef {
NodeId id = kInvalidNodeId;
std::vector<NodeId> in_edges;
std::vector<NodeId> out_edges;
};
tsl::AsyncValueRef<ExecuteEvent> Execute(const Thunk::ExecuteParams& params,
TaskRunner runner = nullptr);
absl::Span<const NodeDef> nodes_defs() const { return nodes_defs_; }
const NodeDef& node_def(NodeId id) const { return nodes_defs_[id]; }
absl::Span<const NodeId> source() const { return source_; }
absl::Span<const NodeId> sink() const { return sink_; }
BufferUses buffer_uses() const { return thunk_sequence_.buffer_uses(); }
std::string ToString() const;
bool is_sequential() const { return is_sequential_; }
private:
using ReadyQueue = absl::InlinedVector<NodeId, 8>;
ThunkExecutor(ThunkSequence thunk_sequence, std::vector<NodeDef> nodes_defs);
struct Node {
NodeId id = kInvalidNodeId;
std::atomic<int64_t>* counter = nullptr;
const std::vector<NodeId>* out_edges = nullptr;
};
struct ExecuteState {
ExecuteState(ThunkExecutor* executor, TaskRunner runner);
ThunkExecutor* executor;
TaskRunner runner;
absl::FixedArray<std::atomic<int64_t>> counters;
absl::InlinedVector<Node, 32> nodes;
std::atomic<bool> abort;
absl::Mutex abort_mutex;
absl::Status abort_status ABSL_GUARDED_BY(abort_mutex);
std::atomic<int64_t> pending_sink_nodes;
tsl::AsyncValueRef<ExecuteEvent> execute_event;
};
tsl::AsyncValueRef<ExecuteEvent> ExecuteSequential(
const Thunk::ExecuteParams& params);
void ResumeExecuteSequential(int64_t index,
const Thunk::ExecuteParams& params,
tsl::AsyncValueRef<ExecuteEvent> event);
void Execute(ExecuteState* state, const Thunk::ExecuteParams& params,
ReadyQueue ready_queue);
void ProcessOutEdges(ExecuteState* state,
tsl::AsyncValuePtr<Thunk::ExecuteEvent> node_event,
Node& node, ReadyQueue& ready_queue);
int64_t TransitiveReduction();
ThunkSequence thunk_sequence_;
std::vector<NodeDef> nodes_defs_;
std::vector<NodeId> source_;
std::vector<NodeId> sink_;
bool is_sequential_;
};
}
#endif
#include "xla/service/cpu/runtime/thunk_executor.h"
#include <atomic>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/runtime/buffer_use.h"
#include "xla/service/cpu/runtime/thunk.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/logging.h"
#include "tsl/profiler/lib/traceme.h"
namespace xla::cpu {
ThunkExecutor::ThunkExecutor(ThunkSequence thunk_sequence,
std::vector<NodeDef> nodes_defs)
: thunk_sequence_(std::move(thunk_sequence)),
nodes_defs_(std::move(nodes_defs)),
is_sequential_(true) {
for (NodeId i = 0; i < nodes_defs_.size(); ++i) {
if (nodes_defs_[i].in_edges.empty()) {
source_.push_back(i);
}
if (nodes_defs_[i].out_edges.empty()) {
sink_.push_back(i);
}
}
int64_t num_erased_edges = TransitiveReduction();
for (NodeId i = 1; i < nodes_defs_.size() && is_sequential_; ++i) {
is_sequential_ &= (absl::c_count(nodes_defs_[i].in_edges, i - 1) != 0);
}
VLOG(2) << absl::StreamFormat(
"Constructed ThunkExecutor with %d nodes: #source_nodes=%d "
"#sink_nodes=%d, #erased_edges=%d, is_sequential=%v",
nodes_defs_.size(), source_.size(), sink_.size(), num_erased_edges,
is_sequential_);
DCHECK((!source_.empty() && !sink_.empty() && !thunk_sequence_.empty()) ||
(source_.empty() && sink_.empty() && thunk_sequence_.empty()));
}
absl::StatusOr<ThunkExecutor> ThunkExecutor::Create(
ThunkSequence thunk_sequence) {
std::vector<NodeDef> defs(thunk_sequence.size());
std::vector<BufferUse::ReadWriteSet> rwsets(thunk_sequence.size());
std::vector<Thunk::BufferUses> buffer_uses(thunk_sequence.size());
for (NodeId i = 0; i < thunk_sequence.size(); ++i) {
defs[i].id = i;
Thunk& thunk = *thunk_sequence[i];
rwsets[i].AddAll(thunk.buffer_uses());
for (NodeId j = i - 1; j >= 0; --j) {
if (rwsets[j].HasConflicts(rwsets[i])) {
defs[j].out_edges.push_back(i);
defs[i].in_edges.push_back(j);
}
}
}
return ThunkExecutor(std::move(thunk_sequence), std::move(defs));
}
ThunkExecutor::ExecuteState::ExecuteState(ThunkExecutor* executor,
TaskRunner runner)
: executor(executor),
runner(std::move(runner)),
counters(executor->nodes_defs().size()),
nodes(executor->nodes_defs().size()),
abort(false),
pending_sink_nodes(executor->sink().size()),
execute_event(tsl::MakeConstructedAsyncValueRef<ExecuteEvent>()) {
for (NodeId id = 0; id < nodes.size(); ++id) {
const NodeDef& node_def = executor->node_def(id);
counters[id].store(node_def.in_edges.size(), std::memory_order_release);
nodes[id] = Node{id, &counters[id], &node_def.out_edges};
}
}
tsl::AsyncValueRef<ThunkExecutor::ExecuteEvent> ThunkExecutor::Execute(
const Thunk::ExecuteParams& params, TaskRunner runner) {
if (ABSL_PREDICT_FALSE(thunk_sequence_.empty())) {
return Thunk::OkExecuteEvent();
}
if (ABSL_PREDICT_FALSE(thunk_sequence_.size() == 1)) {
return thunk_sequence_[0]->Execute(params);
}
if (is_sequential_) {
return ExecuteSequential(params);
}
auto state = std::make_unique<ExecuteState>(this, std::move(runner));
Execute(state.get(), params, ReadyQueue(source_.begin(), source_.end()));
auto execute_event = state->execute_event;
execute_event.AndThen([state = std::move(state)] {
CHECK_EQ(state->pending_sink_nodes.load(std::memory_order_acquire), 0)
<< "All sink nodes must be completed before execute_event is marked "
"available.";
});
return execute_event;
}
tsl::AsyncValueRef<ThunkExecutor::ExecuteEvent>
ThunkExecutor::ExecuteSequential(const Thunk::ExecuteParams& params) {
for (int64_t i = 0; i < thunk_sequence_.size(); ++i) {
Thunk& thunk = *thunk_sequence_[i];
auto execute_event = thunk.Execute(params);
if (ABSL_PREDICT_FALSE(!execute_event.IsAvailable())) {
auto event = tsl::MakeConstructedAsyncValueRef<ExecuteEvent>();
execute_event.AndThen([this, ¶ms, i, event](absl::Status status) {
if (ABSL_PREDICT_FALSE(!status.ok())) {
event.SetError(std::move(status));
} else {
ResumeExecuteSequential(i + 1, params, std::move(event));
}
});
return event;
}
if (ABSL_PREDICT_FALSE(execute_event.IsError())) {
return execute_event;
}
}
return Thunk::OkExecuteEvent();
}
void ThunkExecutor::ResumeExecuteSequential(
int64_t index, const Thunk::ExecuteParams& params,
tsl::AsyncValueRef<ExecuteEvent> event) {
for (int64_t i = index; i < thunk_sequence_.size(); ++i) {
Thunk& thunk = *thunk_sequence_[i];
auto execute_event = thunk.Execute(params);
if (ABSL_PREDICT_FALSE(!execute_event.IsAvailable())) {
execute_event.AndThen(
[this, ¶ms, i, event = std::move(event)](absl::Status status) {
if (ABSL_PREDICT_FALSE(!status.ok())) {
event.SetError(std::move(status));
} else {
ResumeExecuteSequential(i + 1, params, std::move(event));
}
});
return;
}
if (ABSL_PREDICT_FALSE(execute_event.IsError())) {
event.SetError(execute_event.GetError());
return;
}
}
event.SetStateConcrete();
}
void ThunkExecutor::Execute(ExecuteState* state,
const Thunk::ExecuteParams& params,
ReadyQueue ready_queue) {
tsl::profiler::TraceMe trace("ThunkExecutor::Execute");
if (ready_queue.empty()) return;
bool has_runner = state->runner != nullptr;
for (int64_t i = 0; i < ready_queue.size(); ++i) {
NodeId id = ready_queue[i];
Node& node = state->nodes[id];
int64_t cnt = node.counter->load(std::memory_order_acquire);
CHECK_EQ(cnt, 0) << "Node counter must be 0";
if (has_runner && i < ready_queue.size() - 1) {
ReadyQueue tail(ready_queue.begin() + i + 1, ready_queue.end());
ready_queue.erase(ready_queue.begin() + i + 1, ready_queue.end());
state->runner([¶ms, state, tail = std::move(tail)]() mutable {
state->executor->Execute(state, params, std::move(tail));
});
}
Thunk& thunk = *state->executor->thunk_sequence_[id];
auto execute_event = state->abort.load(std::memory_order_relaxed)
? Thunk::OkExecuteEvent()
: thunk.Execute(params);
if (ABSL_PREDICT_FALSE(!execute_event.IsAvailable())) {
execute_event.AndThen([&, state, execute_event = execute_event.AsPtr()] {
ReadyQueue ready_queue;
ProcessOutEdges(state, execute_event, node, ready_queue);
Execute(state, params, std::move(ready_queue));
});
} else {
ProcessOutEdges(state, execute_event.AsPtr(), node, ready_queue);
}
}
}
void ThunkExecutor::ProcessOutEdges(
ExecuteState* state, tsl::AsyncValuePtr<Thunk::ExecuteEvent> node_event,
Node& node, ReadyQueue& ready_queue) {
if (ABSL_PREDICT_FALSE(node_event.IsError())) {
absl::MutexLock lock(&state->abort_mutex);
state->abort = true;
state->abort_status.Update(node_event.GetError());
}
bool is_sink = node.out_edges->empty();
for (NodeId out_edge : *node.out_edges) {
Node& out_node = state->nodes[out_edge];
int64_t cnt = out_node.counter->fetch_sub(1, std::memory_order_release);
CHECK_GE(cnt, 1) << "Node counter can't drop below 0";
if (cnt == 1) ready_queue.push_back(out_edge);
}
if (ABSL_PREDICT_FALSE(is_sink)) {
bool is_done =
state->pending_sink_nodes.fetch_sub(1, std::memory_order_acq_rel) == 1;
if (ABSL_PREDICT_TRUE(!is_done)) return;
if (ABSL_PREDICT_FALSE(state->abort.load(std::memory_order_relaxed))) {
auto take_error = [&] {
absl::MutexLock lock(&state->abort_mutex);
CHECK(!state->abort_status.ok())
<< "Abort status must be set if execution is aborted";
return std::move(state->abort_status);
};
state->execute_event.SetError(take_error());
} else {
state->execute_event.SetStateConcrete();
}
}
}
int64_t ThunkExecutor::TransitiveReduction() {
int64_t num_erased_edges = 0;
auto erase_edge = [&](NodeDef& from, NodeDef& to) {
auto out_edge_it = absl::c_find(from.out_edges, to.id);
auto in_edge_it = absl::c_find(to.in_edges, from.id);
bool has_out_edge = out_edge_it != from.out_edges.end();
bool has_in_edge = in_edge_it != to.in_edges.end();
DCHECK_EQ(has_out_edge, has_in_edge) << "Edges must be symmetric";
if (has_out_edge && has_in_edge) {
from.out_edges.erase(out_edge_it);
to.in_edges.erase(in_edge_it);
++num_erased_edges;
}
};
std::vector<int64_t> stack;
std::vector<bool> visited;
auto add_to_stack = [&](int64_t node_id) {
if (!visited[node_id]) {
stack.push_back(node_id);
visited[node_id] = true;
}
};
for (int64_t i = 0; i < nodes_defs_.size(); ++i) {
NodeDef& source_node = nodes_defs_[i];
stack.clear();
visited.assign(nodes_defs_.size(), false);
for (int64_t out_id : source_node.out_edges) {
NodeDef& out_node = nodes_defs_[out_id];
for (int64_t start_id : out_node.out_edges) add_to_stack(start_id);
}
while (!stack.empty()) {
int64_t node_id = stack.back();
stack.pop_back();
NodeDef& node = nodes_defs_[node_id];
erase_edge(source_node, node);
for (int64_t out_id : node.out_edges) add_to_stack(out_id);
}
}
return num_erased_edges;
}
std::string ThunkExecutor::ToString() const {
std::string str = absl::StrFormat(
"ThunkExecutor: #thunks=%d #source_nodes=%d #sink_nodes=%d",
thunk_sequence_.size(), source_.size(), sink_.size());
std::vector<std::vector<std::string>> in_edges(thunk_sequence_.size());
for (const auto& node_def : nodes_defs_) {
for (NodeId in_edge : node_def.in_edges) {
in_edges[node_def.id].push_back(thunk_sequence_[in_edge]->info().op_name);
}
}
for (NodeId i = 0; i < thunk_sequence_.size(); ++i) {
const Thunk& thunk = *thunk_sequence_[i];
bool is_source = absl::c_find(source_, i) != source_.end();
bool is_sink = absl::c_find(sink_, i) != sink_.end();
absl::StrAppendFormat(
&str,
"\n thunk #%05d: op_name=%s, dependencies=[%s], source=%v, sink=%v", i,
thunk.info().op_name, absl::StrJoin(in_edges[i], ", "), is_source,
is_sink);
}
return str;
}
} | #include "xla/service/cpu/runtime/thunk_executor.h"
#define EIGEN_USE_THREADS
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <random>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "xla/runtime/buffer_use.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/cpu/runtime/buffer_allocations.h"
#include "xla/service/cpu/runtime/task.h"
#include "xla/service/cpu/runtime/thunk.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
#include "tsl/platform/threadpool.h"
namespace xla::cpu {
namespace {
using ::testing::ElementsAre;
class AddI32Thunk final : public Thunk {
public:
AddI32Thunk(std::string name, std::vector<BufferAllocation::Slice> srcs,
std::vector<BufferAllocation::Slice> dsts,
std::vector<std::string>* trace, bool inject_error,
bool inject_side_effect);
static std::unique_ptr<Thunk> Create(
std::string name, std::vector<BufferAllocation::Slice> srcs,
std::vector<BufferAllocation::Slice> dsts,
std::vector<std::string>* trace = nullptr, bool inject_error = false,
bool inject_side_effect = false);
static std::vector<MaybeOwningDeviceMemory> AsDeviceMemory(
absl::Span<std::vector<int32_t>* const> data);
static absl::Status Execute(const BufferAllocations* allocations,
BufferAllocation::Slice src_slice,
BufferAllocation::Slice dst_slice);
tsl::AsyncValueRef<ExecuteEvent> Execute(const ExecuteParams&) final;
BufferUses buffer_uses() const final;
private:
std::vector<BufferAllocation::Slice> srcs_;
std::vector<BufferAllocation::Slice> dsts_;
std::vector<std::string>* trace_;
bool inject_error_;
bool inject_side_effect_;
};
std::unique_ptr<Thunk> AddI32Thunk::Create(
std::string name, std::vector<BufferAllocation::Slice> srcs,
std::vector<BufferAllocation::Slice> dsts, std::vector<std::string>* trace,
bool inject_error, bool inject_side_effect) {
return std::make_unique<AddI32Thunk>(std::move(name), std::move(srcs),
std::move(dsts), trace, inject_error,
inject_side_effect);
}
std::vector<MaybeOwningDeviceMemory> AddI32Thunk::AsDeviceMemory(
absl::Span<std::vector<int32_t>* const> data) {
std::vector<MaybeOwningDeviceMemory> buffers;
for (auto& vec : data) {
buffers.emplace_back(
se::DeviceMemoryBase(vec->data(), vec->size() * sizeof(int32_t)));
}
return buffers;
}
AddI32Thunk::AddI32Thunk(std::string name,
std::vector<BufferAllocation::Slice> srcs,
std::vector<BufferAllocation::Slice> dsts,
std::vector<std::string>* trace, bool inject_error,
bool inject_side_effect)
: Thunk(Kind::kKernel, Info{name}),
srcs_(std::move(srcs)),
dsts_(std::move(dsts)),
trace_(trace),
inject_error_(inject_error),
inject_side_effect_(inject_side_effect) {}
absl::Status AddI32Thunk::Execute(const BufferAllocations* allocations,
BufferAllocation::Slice src_slice,
BufferAllocation::Slice dst_slice) {
TF_ASSIGN_OR_RETURN(se::DeviceMemoryBase src,
allocations->GetDeviceAddress(src_slice));
TF_ASSIGN_OR_RETURN(se::DeviceMemoryBase dst,
allocations->GetDeviceAddress(dst_slice));
CHECK_EQ(src.size() % sizeof(int32_t), 0);
CHECK_EQ(dst.size() % sizeof(int32_t), 0);
int32_t* src_ptr = static_cast<int32_t*>(src.opaque());
int32_t* dst_ptr = static_cast<int32_t*>(dst.opaque());
size_t len = std::min(src.size(), dst.size()) / sizeof(int32_t);
for (int j = 0; j < len; ++j) dst_ptr[j] += src_ptr[j];
return absl::OkStatus();
}
tsl::AsyncValueRef<Thunk::ExecuteEvent> AddI32Thunk::Execute(
const ExecuteParams& params) {
if (trace_) trace_->push_back(info().op_name);
auto execute = [&]() -> absl::Status {
CHECK_EQ(srcs_.size(), dsts_.size());
for (int i = 0; i < srcs_.size(); ++i) {
TF_RETURN_IF_ERROR(
Execute(params.buffer_allocations, srcs_.at(i), dsts_.at(i)));
}
return absl::OkStatus();
};
if (params.intra_op_threadpool) {
auto event = tsl::MakeConstructedAsyncValueRef<ExecuteEvent>();
params.intra_op_threadpool->getPool()->Schedule([&, event, execute] {
if (inject_error_) {
event.SetError(absl::InternalError("Injected error"));
} else {
CHECK_OK(execute());
event.SetStateConcrete();
}
});
return event;
}
if (inject_error_) {
return tsl::MakeErrorAsyncValueRef(absl::InternalError("Injected error"));
}
TF_RETURN_IF_ERROR(execute());
return Thunk::OkExecuteEvent();
}
AddI32Thunk::BufferUses AddI32Thunk::buffer_uses() const {
BufferUses buffer_uses;
for (const auto& src : srcs_) buffer_uses.push_back(BufferUse::Read(src));
for (const auto& dst : dsts_) buffer_uses.push_back(BufferUse::Write(dst));
if (inject_side_effect_) {
static auto* fake_alloc = new BufferAllocation(0, 1, 0);
buffer_uses.push_back(
BufferUse::Write(BufferAllocation::Slice(fake_alloc, 0, 1)));
}
return buffer_uses;
}
TEST(ThunkExecutorTest, DependencyOrdering) {
BufferAllocation alloc(0, 80, 0);
BufferAllocation::Slice slice0(&alloc, 0, 40);
BufferAllocation::Slice slice1(&alloc, 40, 40);
BufferAllocation::Slice slice2(&alloc, 20, 40);
ThunkSequence sequence;
sequence.push_back(AddI32Thunk::Create("a", {slice0}, {slice0}));
sequence.push_back(AddI32Thunk::Create("b", {slice1}, {slice1}));
sequence.push_back(AddI32Thunk::Create("c", {slice2}, {slice2}));
TF_ASSERT_OK_AND_ASSIGN(ThunkExecutor executor,
ThunkExecutor::Create(std::move(sequence)));
EXPECT_FALSE(executor.is_sequential());
EXPECT_THAT(executor.source(), ElementsAre(0, 1));
EXPECT_THAT(executor.sink(), ElementsAre(2));
}
TEST(ThunkExecutorTest, SequentialOrdering) {
BufferAllocation alloc(0, 80, 0);
BufferAllocation::Slice slice(&alloc, 0, 40);
ThunkSequence sequence;
sequence.push_back(AddI32Thunk::Create("a", {slice}, {slice}));
sequence.push_back(AddI32Thunk::Create("b", {slice}, {slice}));
sequence.push_back(AddI32Thunk::Create("c", {slice}, {slice}));
TF_ASSERT_OK_AND_ASSIGN(ThunkExecutor executor,
ThunkExecutor::Create(std::move(sequence)));
EXPECT_TRUE(executor.is_sequential());
EXPECT_THAT(executor.source(), ElementsAre(0));
EXPECT_THAT(executor.sink(), ElementsAre(2));
}
TEST(ThunkExecutorTest, TransitiveReduction) {
BufferAllocation alloc(0, 80, 0);
BufferAllocation::Slice slice(&alloc, 0, 40);
ThunkSequence sequence;
sequence.push_back(AddI32Thunk::Create("a", {slice}, {slice}));
sequence.push_back(AddI32Thunk::Create("b", {slice}, {slice}));
sequence.push_back(AddI32Thunk::Create("c", {slice}, {slice}));
TF_ASSERT_OK_AND_ASSIGN(ThunkExecutor executor,
ThunkExecutor::Create(std::move(sequence)));
EXPECT_THAT(executor.source(), ElementsAre(0));
EXPECT_THAT(executor.sink(), ElementsAre(2));
EXPECT_THAT(executor.node_def(0).out_edges, ElementsAre(1));
EXPECT_THAT(executor.node_def(1).in_edges, ElementsAre(0));
EXPECT_THAT(executor.node_def(1).out_edges, ElementsAre(2));
EXPECT_THAT(executor.node_def(2).in_edges, ElementsAre(1));
}
TEST(ThunkExecutorTest, Execute) {
BufferAllocation alloc(0, 80, 0);
BufferAllocation::Slice slice0(&alloc, 0, 40);
BufferAllocation::Slice slice1(&alloc, 40, 40);
BufferAllocation::Slice slice2(&alloc, 20, 40);
std::vector<std::string> trace;
ThunkSequence sequence;
sequence.push_back(AddI32Thunk::Create("a", {slice0}, {slice0}, &trace));
sequence.push_back(AddI32Thunk::Create("b", {slice1}, {slice1}, &trace));
sequence.push_back(AddI32Thunk::Create("c", {slice2}, {slice2}, &trace));
TF_ASSERT_OK_AND_ASSIGN(ThunkExecutor executor,
ThunkExecutor::Create(std::move(sequence)));
std::vector<int32_t> data(20, 1);
auto buffers = AddI32Thunk::AsDeviceMemory({&data});
BufferAllocations allocations(buffers);
Thunk::ExecuteParams params = {nullptr, &allocations};
auto execute_event = executor.Execute(params, [&](ThunkExecutor::Task task) {
trace.push_back("<TaskRunner>");
task();
});
tsl::BlockUntilReady(execute_event);
ASSERT_TRUE(execute_event.IsConcrete());
EXPECT_THAT(trace, ElementsAre("<TaskRunner>", "b", "a", "c"));
EXPECT_THAT(data, ElementsAre(2, 2, 2, 2, 2,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
2, 2, 2, 2, 2));
}
struct GeneratedThunkSequence {
BufferAllocation src_alloc;
BufferAllocation dst_alloc;
std::vector<int32_t> src;
std::vector<int32_t> dst;
std::vector<int32_t> expected;
std::vector<MaybeOwningDeviceMemory> expected_buffers;
std::vector<MaybeOwningDeviceMemory> buffers;
ThunkSequence sequence;
};
static absl::StatusOr<std::unique_ptr<GeneratedThunkSequence>>
GenerateThunkSequence(size_t num_elements, size_t num_thunks,
bool inject_errors, bool inject_side_effects) {
auto g = std::make_unique<GeneratedThunkSequence>(GeneratedThunkSequence{
BufferAllocation(0, num_elements * sizeof(int32_t), 0),
BufferAllocation(1, num_elements * sizeof(int32_t), 0),
std::vector<int32_t>(num_elements, 1),
std::vector<int32_t>(num_elements, 0),
std::vector<int32_t>(num_elements, 0),
});
g->expected_buffers = AddI32Thunk::AsDeviceMemory({&g->src, &g->expected});
g->buffers = AddI32Thunk::AsDeviceMemory({&g->src, &g->dst});
std::minstd_rand0 engine;
std::uniform_int_distribution<size_t> offset_dist(0, num_elements - 1);
std::uniform_int_distribution<size_t> size_dist(32, 64);
std::uniform_int_distribution<size_t> inject_error_dist(0, num_thunks / 10);
auto random_slice = [&](BufferAllocation* alloc) {
size_t start = offset_dist(engine);
size_t size = std::min(num_elements - start, size_dist(engine));
return BufferAllocation::Slice(alloc, start * sizeof(int32_t),
size * sizeof(int32_t));
};
for (int i = 0; i < num_thunks; ++i) {
BufferAllocation::Slice src = random_slice(&g->src_alloc);
BufferAllocation::Slice dst = random_slice(&g->dst_alloc);
BufferAllocations allocations(g->expected_buffers);
TF_RETURN_IF_ERROR(AddI32Thunk::Execute(&allocations, src, dst));
bool inject_error = inject_errors && inject_error_dist(engine) == 0;
g->sequence.push_back(AddI32Thunk::Create(absl::StrCat(i), {src}, {dst},
nullptr, inject_error,
inject_side_effects));
}
return g;
}
class ThunkExecutorStressTest
: public testing::TestWithParam<
std::tuple<int32_t, bool, bool, bool, bool>> {
public:
void SetUp() override {
auto& [_, use_task_runner, use_device, inject_errors, inject_side_effects] =
GetParam();
use_task_runner_ = use_task_runner;
use_device_ = use_device;
if (use_task_runner_ || use_device_) {
thread_pool_.emplace(tsl::Env::Default(), "thunk-executor", 8);
device_.emplace(thread_pool_->AsEigenThreadPool(),
thread_pool_->NumThreads());
}
}
ThunkExecutor::TaskRunner task_runner() {
if (!use_task_runner_) return nullptr;
return [&](ThunkExecutor::Task task) {
thread_pool_->Schedule(ToCopyableTask(std::move(task)));
};
}
Eigen::ThreadPoolDevice* device() {
if (!use_device_) return nullptr;
return &*device_;
}
private:
bool use_task_runner_;
bool use_device_;
std::optional<tsl::thread::ThreadPool> thread_pool_;
std::optional<Eigen::ThreadPoolDevice> device_;
};
TEST_P(ThunkExecutorStressTest, Execute) {
auto [num_thunks, use_task_runner, use_device, inject_errors,
inject_side_effects] = GetParam();
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<GeneratedThunkSequence> g,
GenerateThunkSequence(1024, num_thunks, inject_errors,
inject_side_effects));
TF_ASSERT_OK_AND_ASSIGN(ThunkExecutor executor,
ThunkExecutor::Create(std::move(g->sequence)));
BufferAllocations allocations(g->buffers);
Thunk::ExecuteParams params = {nullptr, &allocations, nullptr, device()};
auto execute_event = executor.Execute(params, task_runner());
tsl::BlockUntilReady(execute_event);
if (inject_errors) {
ASSERT_TRUE(execute_event.IsError());
EXPECT_EQ(execute_event.GetError(), absl::InternalError("Injected error"));
} else {
ASSERT_TRUE(execute_event.IsConcrete());
EXPECT_EQ(g->dst, g->expected);
}
}
INSTANTIATE_TEST_SUITE_P(ThunkExecutor, ThunkExecutorStressTest,
testing::Combine(testing::ValuesIn({10, 100, 1000}),
testing::Bool(), testing::Bool(),
testing::Bool(), testing::Bool()));
static void BM_SyncThunkExecutor(benchmark::State& state) {
const size_t num_thunks = state.range(0);
auto g = GenerateThunkSequence(1024, num_thunks,
false,
false)
.value();
auto e = ThunkExecutor::Create(std::move(g->sequence)).value();
BufferAllocations allocations(g->buffers);
Thunk::ExecuteParams params = {nullptr, &allocations};
for (auto _ : state) {
auto execute_event = e.Execute(params, nullptr);
tsl::BlockUntilReady(execute_event);
CHECK(execute_event.IsConcrete());
}
}
static void BM_AsyncThunkExecutor(benchmark::State& state) {
const size_t num_thunks = state.range(0);
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), "thunk-executor", 8);
Eigen::ThreadPoolDevice device(thread_pool.AsEigenThreadPool(),
thread_pool.NumThreads());
auto g = GenerateThunkSequence(1024, num_thunks,
false,
false)
.value();
auto e = ThunkExecutor::Create(std::move(g->sequence)).value();
BufferAllocations allocations(g->buffers);
Thunk::ExecuteParams params = {nullptr, &allocations, nullptr, &device};
for (auto _ : state) {
auto execute_event = e.Execute(params, [&](ThunkExecutor::Task task) {
thread_pool.Schedule(ToCopyableTask(std::move(task)));
});
tsl::BlockUntilReady(execute_event);
CHECK(execute_event.IsConcrete());
}
}
BENCHMARK(BM_SyncThunkExecutor)
->MeasureProcessCPUTime()
->Arg(1)
->Arg(16)
->Arg(64)
->Arg(128)
->Arg(258)
->Arg(512);
BENCHMARK(BM_AsyncThunkExecutor)
->MeasureProcessCPUTime()
->Arg(1)
->Arg(16)
->Arg(64)
->Arg(128)
->Arg(258)
->Arg(512);
}
} |
2,027 | cpp | tensorflow/tensorflow | kernel_thunk | third_party/xla/xla/backends/cpu/runtime/kernel_thunk.cc | third_party/xla/xla/backends/cpu/runtime/kernel_thunk_test.cc | #ifndef XLA_SERVICE_GPU_RUNTIME_KERNEL_THUNK_H_
#define XLA_SERVICE_GPU_RUNTIME_KERNEL_THUNK_H_
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/kernel_arguments.h"
#include "xla/service/gpu/kernels/custom_kernel.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/types.h"
namespace xla {
namespace gpu {
class GpuExecutable;
class KernelThunk : public Thunk {
public:
KernelThunk(const HloInstruction* instr, std::string kernel_name,
absl::Span<const KernelArgument> kernel_arguments,
LaunchDimensions launch_dimensions,
std::optional<se::ClusterDim> cluster_dim, int64_t shmem_bytes);
KernelThunk(const KernelThunk&) = delete;
KernelThunk& operator=(const KernelThunk&) = delete;
~KernelThunk() override = default;
std::string ToString(int indent) const override;
absl::Status Initialize(const InitializeParams& params) override;
absl::Status ExecuteOnStream(const ExecuteParams& params) override;
const std::vector<BufferAllocation::Slice>& arguments() const {
return args_;
}
const std::vector<bool>& written() const { return written_; }
const std::string& kernel_name() const { return kernel_name_; }
const LaunchDimensions& launch_dimensions() const {
return launch_dimensions_;
}
int64_t shmem_bytes() const { return shmem_bytes_; }
private:
std::vector<BufferAllocation::Slice> args_;
std::vector<bool> written_;
const std::string kernel_name_;
const LaunchDimensions launch_dimensions_;
const std::optional<se::ClusterDim> cluster_dim_;
int64_t shmem_bytes_;
mutable absl::Mutex mutex_;
absl::flat_hash_map<se::StreamExecutor*, std::unique_ptr<se::Kernel>>
kernel_cache_ ABSL_GUARDED_BY(mutex_);
};
class CustomKernelThunk : public Thunk {
public:
CustomKernelThunk(const HloInstruction* inst, CustomKernel custom_kernel,
absl::Span<const KernelArgument> kernel_arguments);
std::string ToString(int indent) const override;
absl::Status Initialize(const InitializeParams& params) override;
absl::Status ExecuteOnStream(const ExecuteParams& params) override;
const CustomKernel& custom_kernel() const { return custom_kernel_; }
const std::vector<BufferAllocation::Slice>& arguments() const {
return args_;
}
std::string_view custom_kernel_name() const { return custom_kernel_.name(); }
const std::vector<bool>& written() const { return written_; }
LaunchDimensions launch_dimensions() const {
return LaunchDimensions(custom_kernel_.block_dims(),
custom_kernel_.thread_dims());
}
int64_t shmem_bytes() const { return custom_kernel_.shared_memory_bytes(); }
private:
std::vector<BufferAllocation::Slice> args_;
std::vector<bool> written_;
CustomKernel custom_kernel_;
mutable absl::Mutex mutex_;
absl::flat_hash_map<se::StreamExecutor*, std::unique_ptr<se::Kernel>>
kernel_cache_ ABSL_GUARDED_BY(mutex_);
};
}
}
#endif
#include "xla/service/gpu/runtime/kernel_thunk.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/kernel_arguments.h"
#include "xla/service/gpu/kernels/custom_kernel.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/kernel_factory.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/stream_executor.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
KernelThunk::KernelThunk(const HloInstruction* instr, std::string kernel_name,
absl::Span<const KernelArgument> kernel_arguments,
LaunchDimensions launch_dimensions,
std::optional<se::ClusterDim> cluster_dim,
int64_t shmem_bytes)
: Thunk(Kind::kKernel, Thunk::ThunkInfo::WithProfileAnnotation(instr)),
kernel_name_(std::move(kernel_name)),
launch_dimensions_(std::move(launch_dimensions)),
cluster_dim_(std::move(cluster_dim)),
shmem_bytes_(shmem_bytes) {
args_.reserve(kernel_arguments.size());
written_.reserve(kernel_arguments.size());
for (const auto& kernel_argument : kernel_arguments) {
if (!kernel_argument.first_with_same_slice().has_value()) {
args_.push_back(kernel_argument.slice());
written_.push_back(kernel_argument.written());
}
}
}
std::string KernelThunk::ToString(int indent) const {
return absl::StrFormat(
", kernel = %s, launch dimensions = %s, cluster_dim = %s", kernel_name_,
launch_dimensions_.ToString(),
cluster_dim_.has_value() ? cluster_dim_->ToString() : "nullopt");
}
absl::Status KernelThunk::Initialize(const InitializeParams& params) {
absl::MutexLock lock(&mutex_);
auto it = kernel_cache_.find(params.executor);
if (kernel_cache_.end() == it) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<se::Kernel> kernel,
CreateKernel(kernel_name_, args_.size(), params.src.text,
params.src.binary, params.executor, shmem_bytes_));
kernel_cache_.emplace(params.executor, std::move(kernel));
}
return absl::OkStatus();
}
static void PrintBufferContents(
se::Stream* stream, absl::Span<const se::DeviceMemoryBase> buffer_args) {
int input_idx = 0;
for (const se::DeviceMemoryBase& buf : buffer_args) {
auto host_buffer = std::make_unique<char[]>(buf.size());
CHECK_OK(stream->Memcpy(host_buffer.get(), buf, buf.size()));
CHECK_OK(stream->BlockHostUntilDone());
std::string buffer_contents;
for (int i = 0; i < buf.size(); i++) {
absl::StrAppendFormat(&buffer_contents, "%x ",
static_cast<unsigned>(host_buffer[i]));
}
VLOG(100) << "BUF(" << input_idx++ << ") = " << buffer_contents;
}
}
absl::Status KernelThunk::ExecuteOnStream(const ExecuteParams& params) {
se::StreamExecutor* executor = params.stream->parent();
LaunchDimensions launch_dimensions;
std::optional<se::ClusterDim> cluster_dim;
const se::Kernel* kernel = nullptr;
TF_ASSIGN_OR_RETURN(
se::Stream * stream,
GetStreamForExecution(Thunk::execution_stream_id(), params));
{
absl::MutexLock lock(&mutex_);
auto it = kernel_cache_.find(executor);
CHECK(it != kernel_cache_.end())
<< "Initialize() not called for StreamExecutor " << executor;
launch_dimensions = launch_dimensions_;
cluster_dim = cluster_dim_;
kernel = it->second.get();
}
VLOG(3) << "Launching " << kernel->name();
absl::InlinedVector<se::DeviceMemoryBase, 4> buffer_args;
for (const BufferAllocation::Slice& arg : args_) {
se::DeviceMemoryBase buf = params.buffer_allocations->GetDeviceAddress(arg);
VLOG(3) << " Arg: alloc #" << arg.index() << ", offset: " << arg.offset()
<< ": " << buf.opaque() << " (" << buf.size() << "B)";
buffer_args.push_back(buf);
}
if (VLOG_IS_ON(100)) {
PrintBufferContents(stream, buffer_args);
}
if (cluster_dim.has_value()) {
return ExecuteKernelOnStream(*kernel, buffer_args, launch_dimensions,
cluster_dim.value(), stream);
} else {
return ExecuteKernelOnStream(*kernel, buffer_args, launch_dimensions,
stream);
}
}
CustomKernelThunk::CustomKernelThunk(
const HloInstruction* instr, CustomKernel custom_kernel,
absl::Span<const KernelArgument> kernel_arguments)
: Thunk(Kind::kCustomKernel,
Thunk::ThunkInfo::WithProfileAnnotation(instr)),
custom_kernel_(std::move(custom_kernel)) {
args_.reserve(kernel_arguments.size());
written_.reserve(kernel_arguments.size());
for (const auto& kernel_argument : kernel_arguments) {
if (!kernel_argument.first_with_same_slice().has_value()) {
args_.push_back(kernel_argument.slice());
written_.push_back(kernel_argument.written());
}
}
}
std::string CustomKernelThunk::ToString(int indent) const {
return custom_kernel_.ToString();
}
absl::Status CustomKernelThunk::Initialize(const InitializeParams& params) {
absl::MutexLock lock(&mutex_);
auto it = kernel_cache_.find(params.executor);
if (kernel_cache_.end() == it) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<se::Kernel> kernel,
se::KernelFactory::Create(
params.executor, custom_kernel_.kernel_spec()));
kernel_cache_.emplace(params.executor, std::move(kernel));
}
return absl::OkStatus();
}
absl::Status CustomKernelThunk::ExecuteOnStream(const ExecuteParams& params) {
se::StreamExecutor* executor = params.stream->parent();
const se::Kernel* kernel = [&] {
absl::MutexLock lock(&mutex_);
return kernel_cache_[executor].get();
}();
VLOG(3) << "Launching " << custom_kernel_.ToString() << " as device kernel "
<< kernel->name();
absl::InlinedVector<se::DeviceMemoryBase, 4> buffer_args;
for (const BufferAllocation::Slice& arg : args_) {
se::DeviceMemoryBase buf = params.buffer_allocations->GetDeviceAddress(arg);
VLOG(3) << " Arg: alloc #" << arg.index() << ", offset: " << arg.offset()
<< ": " << buf.opaque() << " (" << buf.size() << "B)";
buffer_args.push_back(buf);
}
if (VLOG_IS_ON(100)) {
PrintBufferContents(params.stream, buffer_args);
}
se::KernelArgsDeviceMemoryArray args(buffer_args,
custom_kernel_.shared_memory_bytes());
if (auto cluster = custom_kernel_.cluster_dims(); cluster.has_value()) {
return params.stream->Launch(custom_kernel_.thread_dims(),
custom_kernel_.block_dims(), *cluster, *kernel,
args);
} else {
return params.stream->Launch(custom_kernel_.thread_dims(),
custom_kernel_.block_dims(), *kernel, args);
}
}
}
} | #include "xla/service/cpu/runtime/kernel_thunk.h"
#include <cstddef>
#include <cstdint>
#include <string_view>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/cpu/runtime/buffer_allocations.h"
#include "xla/service/cpu/runtime/thunk.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/host/host_kernel_c_api.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
class AddF32HostKernels : public Thunk::HostKernels {
public:
absl::StatusOr<SE_HOST_Kernel*> Find(std::string_view name) override {
return +[](const SE_HOST_KernelCallFrame* call_frame) {
const SE_HOST_KernelArg& in = call_frame->args[0];
const SE_HOST_KernelArg& out = call_frame->args[1];
float* in_ptr = reinterpret_cast<float*>(in.data);
float* out_ptr = reinterpret_cast<float*>(out.data);
uint64_t i = call_frame->thread->x;
*(out_ptr + i) = *(in_ptr + i) + *(in_ptr + i);
return static_cast<SE_HOST_KernelError*>(nullptr);
};
}
};
TEST(KernelThunkTest, CheckAlignment) {
auto thunk = KernelThunk::Create({"test"}, {}, {}, "test", se::ThreadDim(),
3);
EXPECT_TRUE(absl::StrContains(thunk.status().message(),
"minimum alignment 3 is not a power of 2"));
}
TEST(KernelThunkTest, AddF32) {
std::vector<MaybeOwningDeviceMemory> buffers;
std::vector<float> in = {1.0, 2.0, 3.0, 4.0};
std::vector<float> out(4, 0.0);
size_t size_in_bytes = in.size() * sizeof(float);
buffers.emplace_back(se::DeviceMemoryBase(in.data(), size_in_bytes));
buffers.emplace_back(se::DeviceMemoryBase(out.data(), size_in_bytes));
BufferAllocations allocations(buffers);
BufferAllocation in_alloc(0, size_in_bytes, 0);
BufferAllocation out_alloc(1, size_in_bytes, 0);
BufferAllocation::Slice in_slice(&in_alloc, 0, size_in_bytes);
BufferAllocation::Slice out_slice(&out_alloc, 0, size_in_bytes);
TF_ASSERT_OK_AND_ASSIGN(
auto thunk, KernelThunk::Create({"add_f32"}, {in_slice}, {out_slice},
"add_f32", se::ThreadDim(4)));
AddF32HostKernels host_kernels;
Thunk::ExecuteParams params = {&host_kernels, &allocations};
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError());
std::vector<float> expected = {2.0, 4.0, 6.0, 8.0};
EXPECT_EQ(out, expected);
}
}
} |
2,028 | cpp | tensorflow/tensorflow | thunk | third_party/xla/xla/backends/cpu/runtime/thunk.cc | third_party/xla/xla/backends/cpu/runtime/thunk_test.cc | #ifndef XLA_SERVICE_GPU_RUNTIME_THUNK_H_
#define XLA_SERVICE_GPU_RUNTIME_THUNK_H_
#include <cstddef>
#include <cstdint>
#include <functional>
#include <map>
#include <memory>
#include <ostream>
#include <string>
#include <string_view>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "mlir/IR/Operation.h"
#include "xla/executable_run_options.h"
#include "xla/ffi/execution_context.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/global_device_id.h"
#include "xla/service/gpu/buffer_allocations.h"
#include "xla/service/gpu/runtime/nccl_api.h"
#include "xla/service/gpu/runtime/nccl_clique.h"
#include "xla/service/gpu/runtime/nccl_clique_key.h"
#include "xla/service/service_executable_run_options.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "tsl/lib/gtl/int_type.h"
namespace xla {
namespace gpu {
TSL_LIB_GTL_DEFINE_INT_TYPE(ExecutionStreamId, uint64_t);
class Thunk {
public:
using ExecutionStreamIdMap =
absl::flat_hash_map<ExecutionStreamId, se::Stream*>;
static constexpr auto kDefaultExecutionStreamId = ExecutionStreamId(0);
enum Kind {
kAddressComputation,
kCholesky,
kConditional,
kConvolution,
kConvolutionReorder,
kCopy,
kCopyDone,
kCommandBuffer,
kCubSort,
kCublasLtMatmul,
kCustomCall,
kCustomKernel,
kFft,
kGemm,
kInfeed,
kKernel,
kMemset32BitValue,
kMemzero,
kNcclAllGather,
kNcclAllGatherStart,
kNcclAllGatherDone,
kNcclAllReduce,
kNcclAllReduceStart,
kNcclAllReduceDone,
kNcclCollectiveBroadcast,
kNcclCollectiveBroadcastStart,
kNcclCollectiveBroadcastDone,
kNcclCollectivePermute,
kNcclCollectivePermuteStart,
kNcclCollectivePermuteDone,
kNcclReduceScatter,
kNcclReduceScatterStart,
kNcclReduceScatterDone,
kNcclAllToAll,
kNcclAllToAllStart,
kNcclAllToAllDone,
kNcclSend,
kNcclSendDone,
kNcclRecv,
kNcclRecvDone,
kNorm,
kOutfeed,
kPartitionId,
kRecv,
kRecvDone,
kReplicaId,
kSequential,
kSend,
kSendDone,
kTriangularSolve,
kWhile,
kFusedMHA,
kWaitForStreams,
kCuDnn
};
using BinaryMap = absl::flat_hash_map<std::string, std::string>;
struct ExecutableSource {
std::string_view text;
absl::Span<const uint8_t> binary;
BinaryMap dnn_compiled_graphs;
};
struct ThunkInfo {
ThunkInfo() = default;
static ThunkInfo WithProfileAnnotation(const HloInstruction* instr);
std::string profile_annotation;
ExecutionStreamId execution_stream_id = kDefaultExecutionStreamId;
};
class ResourceRequests {
public:
virtual ~ResourceRequests() = default;
virtual absl::Status AddClique(const NcclCliqueKey& clique_key,
int32_t num_local_participants) = 0;
};
class CollectiveCliques {
public:
CollectiveCliques() = default;
explicit CollectiveCliques(NcclClique::AcquiredCliquesMap cliques_map);
absl::StatusOr<NcclApi::NcclCommHandle> GetComm(
const NcclCliqueKey& clique_key, int32_t rank) const;
absl::StatusOr<size_t> num_communicators(
const NcclCliqueKey& clique_key) const;
absl::StatusOr<bool> is_local_clique(const NcclCliqueKey& clique_key) const;
bool empty() const { return cliques_map_.empty(); }
private:
NcclClique::AcquiredCliquesMap cliques_map_;
};
struct CollectiveExecuteParams {
static absl::StatusOr<CollectiveExecuteParams> Create(
const ServiceExecutableRunOptions& run_options,
absl::Span<se::Stream* const> async_streams,
int64_t local_device_ordinal, int64_t collective_max_nchannels = 0,
int64_t p2p_max_nchannels = 0);
using GlobalDeviceIdMap = std::map<int32_t, GlobalDeviceId>;
se::StreamExecutor* executor;
RunId run_id;
absl::InlinedVector<se::Stream*, 4> async_streams;
int64_t local_device_ordinal;
GlobalDeviceId global_device_id;
const DeviceAssignment* device_assn;
const GlobalDeviceIdMap* global_device_id_map;
const NcclCliqueIdCallback* nccl_clique_id_callback;
int64_t collective_max_nchannels;
int64_t p2p_max_nchannels;
private:
CollectiveExecuteParams(se::StreamExecutor* executor, RunId run_id,
absl::Span<se::Stream* const> async_streams,
int64_t local_device_ordinal,
GlobalDeviceId global_device_id,
const DeviceAssignment* device_assn,
const GlobalDeviceIdMap* global_device_id_map,
const NcclCliqueIdCallback* nccl_clique_id_callback,
int64_t collective_max_nchannels,
int64_t p2p_max_nchannels);
};
struct PrepareParams {
const CollectiveExecuteParams* collective_params = nullptr;
};
struct InitializeParams {
se::StreamExecutor* executor = nullptr;
ExecutableSource src;
const BufferAllocations* buffer_allocations = nullptr;
se::Stream* stream = nullptr;
se::Stream* command_buffer_trace_stream = nullptr;
CollectiveExecuteParams* collective_params = nullptr;
CollectiveCliques* collective_cliques = nullptr;
const ffi::ExecutionContext* ffi_execution_context = nullptr;
};
struct ExecuteParams {
static ExecuteParams Create(
const ServiceExecutableRunOptions& run_options,
const BufferAllocations& buffer_allocations, se::Stream* stream,
se::Stream* command_buffer_trace_stream,
CollectiveExecuteParams* collective_params,
CollectiveCliques* collective_cliques,
ExecutionStreamIdMap additional_compute_streams = {});
static ExecuteParams CloneWithNewAllocations(
const ExecuteParams& params,
const BufferAllocations& buffer_allocations);
const BufferAllocations* buffer_allocations;
se::Stream* stream;
se::Stream* command_buffer_trace_stream;
CollectiveExecuteParams* collective_params;
CollectiveCliques* collective_cliques;
se::Stream* device_to_host_stream;
se::Stream* host_to_device_stream;
SendDeviceMemoryFunction* send_device_memory_function;
RecvDeviceMemoryFunction* recv_device_memory_function;
const ffi::ExecutionContext* ffi_execution_context;
ExecutionStreamIdMap additional_compute_streams;
bool mock_collectives = false;
private:
friend class CommandBufferThunk;
ExecuteParams(const BufferAllocations* buffer_allocations,
se::Stream* stream, se::Stream* command_buffer_trace_stream,
CollectiveExecuteParams* collective_params,
CollectiveCliques* collective_cliques,
se::Stream* device_to_host_stream,
se::Stream* host_to_device_stream,
SendDeviceMemoryFunction* send_device_memory_function,
RecvDeviceMemoryFunction* recv_device_memory_function,
const ffi::ExecutionContext* ffi_execution_context,
ExecutionStreamIdMap additional_compute_streams = {},
bool mock_collectives = false);
};
Thunk(Kind kind, ThunkInfo thunk_info)
: kind_(kind),
profile_annotation_(thunk_info.profile_annotation),
execution_stream_id_(thunk_info.execution_stream_id) {}
virtual ~Thunk() = default;
Thunk(const Thunk&) = delete;
Thunk& operator=(const Thunk&) = delete;
virtual std::string ToString(int indent) const { return ""; }
Kind kind() const { return kind_; }
std::string_view profile_annotation() const { return profile_annotation_; }
virtual absl::Status Prepare(const PrepareParams& params,
ResourceRequests& resource_requests) {
return absl::OkStatus();
}
virtual absl::Status Initialize(const InitializeParams& params) {
return absl::OkStatus();
}
virtual absl::Status ExecuteOnStream(const ExecuteParams& params) = 0;
static absl::string_view KindToString(Thunk::Kind kind);
ExecutionStreamId execution_stream_id() const { return execution_stream_id_; }
void set_execution_stream_id(ExecutionStreamId execution_stream_id) {
execution_stream_id_ = execution_stream_id;
}
static absl::StatusOr<se::Stream*> GetStreamForExecution(
ExecutionStreamId stream_id, const ExecuteParams& params);
bool IsCollective() const;
private:
Kind kind_;
std::string profile_annotation_;
ExecutionStreamId execution_stream_id_;
};
using ThunkSequence = std::vector<std::unique_ptr<Thunk>>;
std::ostream& operator<<(std::ostream& os, Thunk::Kind kind);
struct ShapedSlice {
BufferAllocation::Slice slice;
Shape shape;
};
bool IsReductionCollective(Thunk::Kind kind);
}
}
#endif
#include "xla/service/gpu/runtime/thunk.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <ostream>
#include <string>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/executable_run_options.h"
#include "xla/ffi/execution_context.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/global_device_id.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/buffer_allocations.h"
#include "xla/service/gpu/gpu_executable_run_options.h"
#include "xla/service/gpu/runtime/nccl_api.h"
#include "xla/service/gpu/runtime/nccl_clique.h"
#include "xla/service/gpu/runtime/nccl_clique_key.h"
#include "xla/service/service_executable_run_options.h"
#include "xla/stream_executor/stream.h"
#include "xla/translate/mhlo_to_hlo/location_exporter.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
Thunk::CollectiveCliques::CollectiveCliques(
NcclClique::AcquiredCliquesMap cliques_map)
: cliques_map_(std::move(cliques_map)) {}
absl::StatusOr<NcclApi::NcclCommHandle> Thunk::CollectiveCliques::GetComm(
const NcclCliqueKey& clique_key, int32_t rank) const {
auto clique = cliques_map_.find(clique_key);
if (clique == cliques_map_.end()) {
return absl::NotFoundError(absl::StrCat("No clique found for clique key: ",
clique_key.ToString()));
}
auto communicator = (*clique->second)->comm(rank);
if (!communicator.has_value()) {
return absl::InternalError(absl::StrCat("Communicator for rank ", rank,
" not found in a NCCL clique ",
clique_key.ToString()));
}
return *communicator;
}
absl::StatusOr<bool> Thunk::CollectiveCliques::is_local_clique(
const NcclCliqueKey& clique_key) const {
auto clique = cliques_map_.find(clique_key);
if (clique == cliques_map_.end()) {
return absl::NotFoundError(absl::StrCat("No clique found for clique key: ",
clique_key.ToString()));
}
return (*clique->second)->IsLocal();
}
absl::StatusOr<size_t> Thunk::CollectiveCliques::num_communicators(
const NcclCliqueKey& clique_key) const {
auto clique = cliques_map_.find(clique_key);
if (clique == cliques_map_.end()) {
return absl::NotFoundError(absl::StrCat("No clique found for clique key: ",
clique_key.ToString()));
}
return (*clique->second)->num_communicators();
}
using GlobalDeviceIdMap = Thunk::CollectiveExecuteParams::GlobalDeviceIdMap;
static absl::StatusOr<GlobalDeviceId> GetGlobalDeviceId(
const GlobalDeviceIdMap* device_id_map, int64_t local_device_ordinal) {
if (!device_id_map) return GlobalDeviceId(local_device_ordinal);
auto it = device_id_map->find(local_device_ordinal);
if (it == device_id_map->end())
return absl::NotFoundError(
absl::StrCat("No global device id found for local device ordinal: ",
local_device_ordinal));
return it->second;
}
absl::StatusOr<Thunk::CollectiveExecuteParams>
Thunk::CollectiveExecuteParams::Create(
const ServiceExecutableRunOptions& run_options,
absl::Span<se::Stream* const> async_streams, int64_t local_device_ordinal,
int64_t collective_max_nchannels, int64_t p2p_max_nchannels) {
const GpuExecutableRunOptions* gpu_options =
run_options.run_options().gpu_executable_run_options();
auto* device_id_map = gpu_options && gpu_options->gpu_global_device_ids()
? &*gpu_options->gpu_global_device_ids()
: nullptr;
auto* nccl_callback = gpu_options && gpu_options->nccl_clique_id_callback()
? &gpu_options->nccl_clique_id_callback()
: nullptr;
TF_ASSIGN_OR_RETURN(GlobalDeviceId global_device_id,
GetGlobalDeviceId(device_id_map, local_device_ordinal));
return CollectiveExecuteParams(
run_options.stream()->parent(), run_options.run_options().run_id(),
async_streams, local_device_ordinal, global_device_id,
run_options.run_options().device_assignment(), device_id_map,
nccl_callback, collective_max_nchannels, p2p_max_nchannels);
}
Thunk::CollectiveExecuteParams::CollectiveExecuteParams(
se::StreamExecutor* executor, RunId run_id,
absl::Span<se::Stream* const> async_streams, int64_t local_device_ordinal,
GlobalDeviceId global_device_id, const DeviceAssignment* device_assn,
const GlobalDeviceIdMap* global_device_id_map,
const NcclCliqueIdCallback* nccl_clique_id_callback,
int64_t collective_max_nchannels, int64_t p2p_max_nchannels)
: executor(executor),
run_id(run_id),
async_streams(async_streams.begin(), async_streams.end()),
local_device_ordinal(local_device_ordinal),
global_device_id(global_device_id),
device_assn(device_assn),
global_device_id_map(global_device_id_map),
nccl_clique_id_callback(nccl_clique_id_callback),
collective_max_nchannels(collective_max_nchannels),
p2p_max_nchannels(p2p_max_nchannels) {}
Thunk::ExecuteParams Thunk::ExecuteParams::Create(
const ServiceExecutableRunOptions& run_options,
const BufferAllocations& buffer_allocations, se::Stream* stream,
se::Stream* command_buffer_trace_stream,
CollectiveExecuteParams* collective_params,
CollectiveCliques* collective_cliques,
ExecutionStreamIdMap additional_compute_streams) {
return ExecuteParams(&buffer_allocations, stream, command_buffer_trace_stream,
collective_params, collective_cliques,
run_options.run_options().device_to_host_stream(),
run_options.run_options().host_to_device_stream(),
run_options.run_options().send_device_memory_function(),
run_options.run_options().recv_device_memory_function(),
run_options.run_options().ffi_execution_context(),
additional_compute_streams,
run_options.run_options().gpu_executable_run_options()
? run_options.run_options()
.gpu_executable_run_options()
->enable_mock_nccl_collectives()
: false);
}
Thunk::ExecuteParams Thunk::ExecuteParams::CloneWithNewAllocations(
const Thunk::ExecuteParams& params,
const BufferAllocations& buffer_allocations) {
return ExecuteParams(
&buffer_allocations, params.stream, params.command_buffer_trace_stream,
params.collective_params, params.collective_cliques,
params.device_to_host_stream, params.host_to_device_stream,
params.send_device_memory_function, params.recv_device_memory_function,
params.ffi_execution_context, params.additional_compute_streams);
}
Thunk::ExecuteParams::ExecuteParams(
const BufferAllocations* buffer_allocations, se::Stream* stream,
se::Stream* command_buffer_trace_stream,
CollectiveExecuteParams* collective_params,
CollectiveCliques* collective_cliques, se::Stream* device_to_host_stream,
se::Stream* host_to_device_stream,
SendDeviceMemoryFunction* send_device_memory_function,
RecvDeviceMemoryFunction* recv_device_memory_function,
const ffi::ExecutionContext* ffi_execution_context,
ExecutionStreamIdMap additional_compute_streams, bool mock_collectives)
: buffer_allocations(buffer_allocations),
stream(stream),
command_buffer_trace_stream(command_buffer_trace_stream),
collective_params(collective_params),
collective_cliques(collective_cliques),
device_to_host_stream(device_to_host_stream),
host_to_device_stream(host_to_device_stream),
send_device_memory_function(send_device_memory_function),
recv_device_memory_function(recv_device_memory_function),
ffi_execution_context(ffi_execution_context),
additional_compute_streams(additional_compute_streams),
mock_collectives(mock_collectives) {}
absl::string_view Thunk::KindToString(Thunk::Kind kind) {
#define CASE(x) \
case Thunk::x: \
return #x
switch (kind) {
CASE(kAddressComputation);
CASE(kCholesky);
CASE(kCommandBuffer);
CASE(kConditional);
CASE(kConvolution);
CASE(kConvolutionReorder);
CASE(kCopy);
CASE(kCopyDone);
CASE(kCubSort);
CASE(kCublasLtMatmul);
CASE(kCustomCall);
CASE(kCustomKernel);
CASE(kNcclAllGather);
CASE(kNcclAllGatherStart);
CASE(kNcclAllGatherDone);
CASE(kNcclAllReduce);
CASE(kNcclAllReduceStart);
CASE(kNcclAllReduceDone);
CASE(kNcclCollectiveBroadcast);
CASE(kNcclCollectiveBroadcastStart);
CASE(kNcclCollectiveBroadcastDone);
CASE(kNcclCollectivePermute);
CASE(kNcclCollectivePermuteStart);
CASE(kNcclCollectivePermuteDone);
CASE(kNcclReduceScatter);
CASE(kNcclReduceScatterStart);
CASE(kNcclReduceScatterDone);
CASE(kNcclAllToAll);
CASE(kNcclAllToAllStart);
CASE(kNcclAllToAllDone);
CASE(kNcclSend);
CASE(kNcclSendDone);
CASE(kNcclRecv);
CASE(kNcclRecvDone);
CASE(kFft);
CASE(kGemm);
CASE(kInfeed);
CASE(kKernel);
CASE(kMemset32BitValue);
CASE(kMemzero);
CASE(kNorm);
CASE(kOutfeed);
CASE(kSend);
CASE(kSendDone);
CASE(kPartitionId);
CASE(kReplicaId);
CASE(kRecv);
CASE(kRecvDone);
CASE(kSequential);
CASE(kTriangularSolve);
CASE(kWhile);
CASE(kFusedMHA);
CASE(kWaitForStreams);
CASE(kCuDnn);
}
}
absl::StatusOr<se::Stream*> Thunk::GetStreamForExecution(
ExecutionStreamId stream_id, const ExecuteParams& params) {
if (stream_id == kDefaultExecutionStreamId) {
return params.stream;
}
auto iter = params.additional_compute_streams.find(stream_id);
if (iter == params.additional_compute_streams.end()) {
return absl::InvalidArgumentError("Invalid execution stream id.");
}
return iter->second;
}
std::ostream& operator<<(std::ostream& os, Thunk::Kind kind) {
return os << Thunk::KindToString(kind);
}
bool IsReductionCollective(Thunk::Kind kind) {
return kind == Thunk::kNcclAllReduce || kind == Thunk::kNcclAllReduceStart ||
kind == Thunk::kNcclReduceScatter ||
kind == Thunk::kNcclReduceScatterStart;
}
Thunk::ThunkInfo Thunk::ThunkInfo::WithProfileAnnotation(
const HloInstruction* instr) {
ThunkInfo thunk_info;
thunk_info.profile_annotation = instr->name();
auto gpu_backend_config = instr->backend_config<GpuBackendConfig>();
if (gpu_backend_config.ok()) {
thunk_info.execution_stream_id =
std::max<uint64_t>(kDefaultExecutionStreamId.value(),
gpu_backend_config->operation_queue_id());
}
return thunk_info;
}
bool Thunk::IsCollective() const {
switch (kind()) {
case kNcclAllGather:
case kNcclAllGatherStart:
case kNcclAllGatherDone:
case kNcclAllReduce:
case kNcclAllReduceStart:
case kNcclAllReduceDone:
case kNcclCollectiveBroadcast:
case kNcclCollectiveBroadcastStart:
case kNcclCollectiveBroadcastDone:
case kNcclCollectivePermute:
case kNcclCollectivePermuteStart:
case kNcclCollectivePermuteDone:
case kNcclReduceScatter:
case kNcclReduceScatterStart:
case kNcclReduceScatterDone:
case kNcclAllToAll:
case kNcclAllToAllStart:
case kNcclAllToAllDone:
case kNcclSend:
case kNcclSendDone:
case kNcclRecv:
case kNcclRecvDone:
return true;
default:
return false;
}
}
}
} | #include "xla/service/cpu/runtime/thunk.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
TEST(ThunkTest, OkExecuteEvent) {
auto event = Thunk::OkExecuteEvent();
ASSERT_TRUE(event.IsConcrete());
}
}
} |
2,029 | cpp | tensorflow/tensorflow | logical_id_thunk | third_party/xla/xla/backends/cpu/runtime/logical_id_thunk.cc | third_party/xla/xla/backends/cpu/runtime/logical_id_thunk_test.cc | #ifndef XLA_SERVICE_CPU_RUNTIME_LOGICAL_ID_THUNK_H_
#define XLA_SERVICE_CPU_RUNTIME_LOGICAL_ID_THUNK_H_
#include <cstdint>
#include <memory>
#include "absl/status/statusor.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/computation_placer.h"
#include "xla/service/cpu/runtime/thunk.h"
#include "xla/service/global_device_id.h"
#include "xla/tsl/concurrency/async_value_ref.h"
namespace xla::cpu {
enum class LogicalIdKind {
kPartitionId,
kReplicaId,
};
template <LogicalIdKind type>
class LogicalIdThunk : public Thunk {
public:
static absl::StatusOr<std::unique_ptr<LogicalIdThunk>> Create(
Info info, BufferAllocation::Slice logical_id_buffer);
tsl::AsyncValueRef<ExecuteEvent> Execute(const ExecuteParams& params) final;
BufferUses buffer_uses() const final;
private:
LogicalIdThunk(Info info, BufferAllocation::Slice logical_id_buffer);
absl::StatusOr<int32_t> GetIdForDevice(
const DeviceAssignment* device_assignment,
GlobalDeviceId device_id) const;
BufferAllocation::Slice logical_id_buffer_;
};
class ReplicaIdThunk final : public LogicalIdThunk<LogicalIdKind::kReplicaId> {
};
class PartitionIdThunk final
: public LogicalIdThunk<LogicalIdKind::kPartitionId> {};
}
#endif
#include "xla/service/cpu/runtime/logical_id_thunk.h"
#include <cstdint>
#include <cstring>
#include <memory>
#include <utility>
#include "absl/memory/memory.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "xla/runtime/buffer_use.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/computation_placer.h"
#include "xla/service/cpu/runtime/thunk.h"
#include "xla/service/global_device_id.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/lib/traceme.h"
namespace xla::cpu {
static Thunk::Kind ToThunkKind(LogicalIdKind logical_id_kind) {
switch (logical_id_kind) {
case LogicalIdKind::kPartitionId:
return Thunk::Kind::kPartitionId;
case LogicalIdKind::kReplicaId:
return Thunk::Kind::kReplicaId;
}
}
template <LogicalIdKind type>
absl::StatusOr<std::unique_ptr<LogicalIdThunk<type>>>
LogicalIdThunk<type>::Create(Info info,
BufferAllocation::Slice logical_id_buffer) {
return absl::WrapUnique(
new LogicalIdThunk(std::move(info), logical_id_buffer));
}
template <LogicalIdKind type>
LogicalIdThunk<type>::LogicalIdThunk(Info info,
BufferAllocation::Slice logical_id_buffer)
: Thunk(ToThunkKind(type), info), logical_id_buffer_(logical_id_buffer) {}
template <LogicalIdKind type>
static constexpr auto ToString() {
if constexpr (type == LogicalIdKind::kPartitionId) {
return "Partition";
} else if constexpr (type == LogicalIdKind::kReplicaId) {
return "Replica";
}
}
template <LogicalIdKind type>
absl::StatusOr<int32_t> LogicalIdThunk<type>::GetIdForDevice(
const DeviceAssignment* device_assignment, GlobalDeviceId device_id) const {
if constexpr (type == LogicalIdKind::kPartitionId) {
return device_assignment->PartitionIdForDevice(device_id);
} else if constexpr (type == LogicalIdKind::kReplicaId) {
return device_assignment->ReplicaIdForDevice(device_id);
}
}
template <LogicalIdKind type>
tsl::AsyncValueRef<typename LogicalIdThunk<type>::ExecuteEvent>
LogicalIdThunk<type>::Execute(const ExecuteParams& params) {
tsl::profiler::TraceMe trace([&] { return TraceMeEncode(); });
TF_ASSIGN_OR_RETURN(
se::DeviceMemoryBase logical_id_data,
params.buffer_allocations->GetDeviceAddress(logical_id_buffer_));
TF_RET_CHECK(logical_id_data.size() == sizeof(int32_t))
<< "Logical id buffer must be able to fit logical id value";
TF_RET_CHECK(params.collective_params)
<< ToString<type>() << " id requires collective params";
TF_ASSIGN_OR_RETURN(
int32_t logical_id,
GetIdForDevice(params.collective_params->device_assignment,
params.collective_params->global_device_id));
VLOG(3) << absl::StreamFormat("%s id: %d", ToString<type>(), logical_id);
VLOG(3) << absl::StreamFormat(" logical_id: slice %s (%p)",
logical_id_buffer_.ToString(),
logical_id_data.opaque());
std::memcpy(logical_id_data.opaque(), &logical_id, sizeof(int32_t));
return OkExecuteEvent();
}
template <LogicalIdKind type>
using BufferUses = typename LogicalIdThunk<type>::BufferUses;
template <LogicalIdKind type>
BufferUses<type> LogicalIdThunk<type>::buffer_uses() const {
return {BufferUse::Write(logical_id_buffer_)};
}
template class LogicalIdThunk<LogicalIdKind::kReplicaId>;
template class LogicalIdThunk<LogicalIdKind::kPartitionId>;
} | #include "xla/service/cpu/runtime/logical_id_thunk.h"
#include <cstdint>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/executable_run_options.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/cpu/runtime/buffer_allocations.h"
#include "xla/service/cpu/runtime/thunk.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
absl::StatusOr<DeviceAssignment> CreateDeviceAssignment(
std::vector<std::vector<int64_t>> devices) {
const auto computation_count = devices.size();
if (devices.empty()) {
return absl::InternalError("Devices must not be empty.");
}
const auto replica_count = devices[0].size();
DeviceAssignment device_assignment(replica_count, computation_count);
for (int64_t partition = 0; partition < computation_count; ++partition) {
for (int64_t replica = 0; replica < replica_count; ++replica) {
device_assignment(replica, partition) = devices[partition][replica];
}
}
return device_assignment;
}
TEST(LogicalIdThunkTest, GetReplicaId) {
std::vector<int32_t> dst(1, -1);
std::vector<MaybeOwningDeviceMemory> buffers;
buffers.emplace_back(se::DeviceMemoryBase(dst.data(), sizeof(int32_t)));
BufferAllocation alloc(0, sizeof(int32_t), 0);
BufferAllocation::Slice id_slice(&alloc, 0,
sizeof(int32_t));
std::string name(Thunk::KindToString(Thunk::Kind::kReplicaId));
TF_ASSERT_OK_AND_ASSIGN(auto thunk, ReplicaIdThunk::Create({name}, id_slice));
BufferAllocations allocations(buffers);
TF_ASSERT_OK_AND_ASSIGN(DeviceAssignment device_assn,
CreateDeviceAssignment({{0, 1}}));
ExecutableRunOptions run_options;
run_options.set_device_ordinal(0);
run_options.set_device_assignment(&device_assn);
TF_ASSERT_OK_AND_ASSIGN(Thunk::CollectiveExecuteParams collective_params,
Thunk::CollectiveExecuteParams::Create(&run_options));
Thunk::ExecuteParams params;
params.buffer_allocations = &allocations;
params.collective_params = &collective_params;
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError());
EXPECT_EQ(dst[0], 0);
}
TEST(LogicalIdThunkTest, GetPartitionId) {
std::vector<int32_t> dst(2, -1);
std::vector<MaybeOwningDeviceMemory> buffers;
static constexpr auto kDataSize = 2 * sizeof(int32_t);
buffers.emplace_back(se::DeviceMemoryBase(dst.data(), kDataSize));
BufferAllocation alloc(0, kDataSize, 0);
BufferAllocation::Slice id_slice(&alloc, sizeof(int32_t),
sizeof(int32_t));
std::string name(Thunk::KindToString(Thunk::Kind::kPartitionId));
TF_ASSERT_OK_AND_ASSIGN(auto thunk,
PartitionIdThunk::Create({name}, id_slice));
BufferAllocations allocations(buffers);
TF_ASSERT_OK_AND_ASSIGN(DeviceAssignment device_assn,
CreateDeviceAssignment({{0}, {1}}));
ExecutableRunOptions run_options;
run_options.set_device_ordinal(0);
run_options.set_device_assignment(&device_assn);
TF_ASSERT_OK_AND_ASSIGN(Thunk::CollectiveExecuteParams collective_params,
Thunk::CollectiveExecuteParams::Create(&run_options));
Thunk::ExecuteParams params;
params.buffer_allocations = &allocations;
params.collective_params = &collective_params;
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError());
EXPECT_EQ(dst[0], -1);
EXPECT_EQ(dst[1], 0);
}
}
} |
2,030 | cpp | tensorflow/tensorflow | conditional_thunk | third_party/xla/xla/backends/cpu/runtime/conditional_thunk.cc | third_party/xla/xla/backends/cpu/runtime/conditional_thunk_test.cc | #ifndef XLA_SERVICE_GPU_RUNTIME_CONDITIONAL_THUNK_H_
#define XLA_SERVICE_GPU_RUNTIME_CONDITIONAL_THUNK_H_
#include <cstdint>
#include <memory>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/runtime/sequential_thunk.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/stream_executor/memory_allocation.h"
#include "xla/stream_executor/stream_executor.h"
namespace xla {
namespace gpu {
struct ConditionalThunkConfig {
bool branch_index_is_bool;
int64_t branch_count;
std::vector<std::unique_ptr<SequentialThunk>> branch_thunks;
};
class ConditionalThunk : public Thunk {
public:
ConditionalThunk(ThunkInfo thunk_info, ConditionalThunkConfig config,
const BufferAllocation::Slice& branch_index_buffer_index);
ConditionalThunk(const ConditionalThunk&) = delete;
ConditionalThunk& operator=(const ConditionalThunk&) = delete;
absl::Status Prepare(const PrepareParams& params,
ResourceRequests& resource_requests) override;
absl::Status Initialize(const InitializeParams& params) override;
absl::Status ExecuteOnStream(const ExecuteParams& params) override;
absl::Span<const std::unique_ptr<SequentialThunk>> branch_thunks() const {
return config_.branch_thunks;
}
const BufferAllocation::Slice& branch_index_buffer() const {
return branch_index_buffer_index_;
}
private:
const ConditionalThunkConfig config_;
const BufferAllocation::Slice branch_index_buffer_index_;
absl::Mutex mutex_;
absl::flat_hash_map<se::StreamExecutor*,
std::unique_ptr<se::MemoryAllocation>>
predicates_ ABSL_GUARDED_BY(mutex_);
};
}
}
#endif
#include "xla/service/gpu/runtime/conditional_thunk.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <variant>
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/service/gpu/variant_visitor.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/memory_allocation.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
ConditionalThunk::ConditionalThunk(
ThunkInfo thunk_info, ConditionalThunkConfig config,
const BufferAllocation::Slice& branch_index_buffer_index)
: Thunk(Kind::kConditional, thunk_info),
config_(std::move(config)),
branch_index_buffer_index_(branch_index_buffer_index) {}
absl::Status ConditionalThunk::Prepare(const PrepareParams& params,
ResourceRequests& resource_requests) {
if (config_.branch_index_is_bool) {
TF_RET_CHECK(config_.branch_thunks.size() == 2);
} else {
TF_RET_CHECK(!config_.branch_thunks.empty());
}
for (auto& branch_thunk : config_.branch_thunks) {
TF_RETURN_IF_ERROR(branch_thunk->Prepare(params, resource_requests));
}
return absl::OkStatus();
}
absl::Status ConditionalThunk::Initialize(const InitializeParams& params) {
if (config_.branch_index_is_bool) {
TF_RET_CHECK(config_.branch_thunks.size() == 2);
} else {
TF_RET_CHECK(!config_.branch_thunks.empty());
}
for (auto& branch_thunk : config_.branch_thunks) {
TF_RETURN_IF_ERROR(branch_thunk->Initialize(params));
}
absl::MutexLock lock(&mutex_);
if (auto it = predicates_.find(params.executor); it == predicates_.end()) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<se::MemoryAllocation> allocation,
params.executor->HostMemoryAllocate(
config_.branch_index_is_bool ? sizeof(bool) : sizeof(int32_t)));
predicates_.emplace(params.executor, std::move(allocation));
}
return absl::OkStatus();
}
absl::Status ConditionalThunk::ExecuteOnStream(const ExecuteParams& params) {
auto& stream = *params.stream;
auto branch_index_or_pred = [&]() -> std::variant<int32_t*, bool*> {
absl::MutexLock lock(&mutex_);
se::StreamExecutor* executor = stream.parent();
if (config_.branch_index_is_bool) {
return reinterpret_cast<bool*>(predicates_.at(executor)->opaque());
} else {
return reinterpret_cast<int32_t*>(predicates_.at(executor)->opaque());
}
}();
se::DeviceMemoryBase branch_index_address =
params.buffer_allocations->GetDeviceAddress(branch_index_buffer_index_);
if (config_.branch_index_is_bool) {
TF_RETURN_IF_ERROR(stream.Memcpy(std::get<bool*>(branch_index_or_pred),
branch_index_address, sizeof(bool)));
} else {
TF_RETURN_IF_ERROR(stream.Memcpy(std::get<int32_t*>(branch_index_or_pred),
branch_index_address, sizeof(int32_t)));
}
if (absl::Status blocked = stream.BlockHostUntilDone(); !blocked.ok()) {
return Internal("Failed to retrieve branch_index value on stream %p: %s.",
&stream, blocked.message());
}
int32_t branch_index = std::visit(
VariantVisitor{[](int32_t* branch_index) { return *branch_index; },
[](bool* pred) { return *pred ? 0 : 1; }},
branch_index_or_pred);
if (branch_index < 0 || branch_index >= config_.branch_count) {
branch_index = config_.branch_count - 1;
}
TF_RETURN_IF_ERROR(
config_.branch_thunks[branch_index]->ExecuteOnStream(params));
return absl::OkStatus();
}
}
} | #include "xla/service/cpu/runtime/conditional_thunk.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "xla/runtime/buffer_use.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/cpu/runtime/thunk.h"
#include "xla/service/cpu/runtime/thunk_testlib.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
TEST(ConditionalThunkTest, BufferUses) {
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice branch_index_slice(&alloc, 0, sizeof(int32_t));
BufferAllocation::Slice read_slice(&alloc, 10, 10);
std::vector<ThunkSequence> branch_sequences(1);
branch_sequences[0].push_back(
std::make_unique<BufferUseThunk>(BufferUse::Read(read_slice)));
TF_ASSERT_OK_AND_ASSIGN(
auto thunk, ConditionalThunk::Create({"conditional"}, branch_index_slice,
std::move(branch_sequences)));
EXPECT_EQ(thunk->buffer_uses().size(), 2);
EXPECT_EQ(thunk->buffer_uses()[0], BufferUse::Read(branch_index_slice));
EXPECT_EQ(thunk->buffer_uses()[1], BufferUse::Read(read_slice));
}
}
} |
2,031 | cpp | tensorflow/tensorflow | graphcycles | third_party/xla/xla/service/graphcycles/graphcycles.cc | third_party/xla/xla/service/graphcycles/graphcycles_test.cc | #ifndef XLA_SERVICE_GRAPHCYCLES_GRAPHCYCLES_H_
#define XLA_SERVICE_GRAPHCYCLES_GRAPHCYCLES_H_
#include <vector>
#include <optional>
#include "absl/types/span.h"
namespace tensorflow {
class GraphCycles {
public:
GraphCycles();
~GraphCycles();
int32_t NewNode();
void RemoveNode(int32_t node);
bool InsertEdge(int32_t source_node, int32_t dest_node);
void RemoveEdge(int32_t source_node, int32_t dest_node);
bool HasEdge(int32_t source_node, int32_t dest_node) const;
std::optional<int32_t> ContractEdge(int32_t a, int32_t b);
bool CanContractEdge(int32_t a, int32_t b);
bool IsReachable(int32_t source_node, int32_t dest_node) const;
bool IsReachableNonConst(int32_t source_node, int32_t dest_node);
void *GetNodeData(int32_t node) const;
void SetNodeData(int32_t node, void *data);
int FindPath(int32_t source, int32_t dest, int max_path_len,
int32_t path[]) const;
bool CheckInvariants() const;
absl::Span<const int32_t> Successors(int32_t node) const;
absl::Span<const int32_t> Predecessors(int32_t node) const;
std::vector<int32_t> SuccessorsCopy(int32_t node) const;
std::vector<int32_t> PredecessorsCopy(int32_t node) const;
std::vector<int32_t> AllNodesInPostOrder() const;
std::string DebugString() const;
struct Rep;
private:
Rep *rep_;
GraphCycles(const GraphCycles &) = delete;
GraphCycles &operator=(const GraphCycles &) = delete;
};
}
#endif
#include "xla/service/graphcycles/graphcycles.h"
#include <algorithm>
#include <cstddef>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/service/graphcycles/ordered_set.h"
#include "tsl/platform/logging.h"
namespace tensorflow {
namespace {
using NodeSet = absl::flat_hash_set<int32_t>;
using OrderedNodeSet = OrderedSet<int32_t>;
struct Node {
int32_t rank;
bool visited;
};
struct NodeIO {
OrderedNodeSet in;
OrderedNodeSet out;
};
}
struct GraphCycles::Rep {
std::vector<Node> nodes_;
std::vector<NodeIO> node_io_;
std::vector<int32_t> free_nodes_;
std::vector<int32_t> deltaf_;
std::vector<int32_t> deltab_;
std::vector<int32_t> list_;
std::vector<int32_t> merged_;
std::vector<int32_t>
stack_;
std::vector<void*> node_data_;
};
GraphCycles::GraphCycles() : rep_(new Rep) {}
GraphCycles::~GraphCycles() {
delete rep_;
}
bool GraphCycles::CheckInvariants() const {
Rep* r = rep_;
NodeSet ranks;
for (size_t x = 0; x < r->nodes_.size(); x++) {
Node* nx = &r->nodes_[x];
if (nx->visited) {
LOG(FATAL) << "Did not clear visited marker on node " << x;
}
if (!ranks.insert(nx->rank).second) {
LOG(FATAL) << "Duplicate occurrence of rank " << nx->rank;
}
NodeIO* nx_io = &r->node_io_[x];
for (int32_t y : nx_io->out.GetSequence()) {
Node* ny = &r->nodes_[y];
if (nx->rank >= ny->rank) {
LOG(FATAL) << "Edge " << x << "->" << y << " has bad rank assignment "
<< nx->rank << "->" << ny->rank;
}
}
}
return true;
}
int32_t GraphCycles::NewNode() {
if (rep_->free_nodes_.empty()) {
Node n;
n.visited = false;
n.rank = rep_->nodes_.size();
rep_->nodes_.emplace_back(n);
rep_->node_io_.emplace_back();
rep_->node_data_.push_back(nullptr);
return n.rank;
} else {
int32_t r = rep_->free_nodes_.back();
rep_->free_nodes_.pop_back();
rep_->node_data_[r] = nullptr;
return r;
}
}
void GraphCycles::RemoveNode(int32_t node) {
NodeIO* x = &rep_->node_io_[node];
for (int32_t y : x->out.GetSequence()) {
rep_->node_io_[y].in.Erase(node);
}
for (int32_t y : x->in.GetSequence()) {
rep_->node_io_[y].out.Erase(node);
}
x->in.Clear();
x->out.Clear();
rep_->free_nodes_.push_back(node);
}
void* GraphCycles::GetNodeData(int32_t node) const {
return rep_->node_data_[node];
}
void GraphCycles::SetNodeData(int32_t node, void* data) {
rep_->node_data_[node] = data;
}
bool GraphCycles::HasEdge(int32_t x, int32_t y) const {
return rep_->node_io_[x].out.Contains(y);
}
void GraphCycles::RemoveEdge(int32_t x, int32_t y) {
rep_->node_io_[x].out.Erase(y);
rep_->node_io_[y].in.Erase(x);
}
static bool ForwardDFS(GraphCycles::Rep* r, int32_t n, int32_t upper_bound);
static void BackwardDFS(GraphCycles::Rep* r, int32_t n, int32_t lower_bound);
static void Reorder(GraphCycles::Rep* r);
static void Sort(absl::Span<const Node>, std::vector<int32_t>* delta);
static void MoveToList(GraphCycles::Rep* r, std::vector<int32_t>* src,
std::vector<int32_t>* dst);
static void ClearVisitedBits(GraphCycles::Rep* r,
absl::Span<const int32_t> visited_indices);
bool GraphCycles::InsertEdge(int32_t x, int32_t y) {
if (x == y) return false;
Rep* r = rep_;
NodeIO* nx_io = &r->node_io_[x];
if (!nx_io->out.Insert(y)) {
return true;
}
NodeIO* ny_io = &r->node_io_[y];
ny_io->in.Insert(x);
Node* nx = &r->nodes_[x];
Node* ny = &r->nodes_[y];
if (nx->rank <= ny->rank) {
return true;
}
if (!ForwardDFS(r, y, nx->rank)) {
nx_io->out.Erase(y);
ny_io->in.Erase(x);
ClearVisitedBits(r, r->deltaf_);
return false;
}
BackwardDFS(r, x, ny->rank);
Reorder(r);
return true;
}
static bool ForwardDFS(GraphCycles::Rep* r, int32_t n, int32_t upper_bound) {
r->deltaf_.clear();
r->stack_.clear();
r->stack_.push_back(n);
while (!r->stack_.empty()) {
n = r->stack_.back();
r->stack_.pop_back();
Node* nn = &r->nodes_[n];
if (nn->visited) continue;
nn->visited = true;
r->deltaf_.push_back(n);
NodeIO* nn_io = &r->node_io_[n];
for (auto w : nn_io->out.GetSequence()) {
Node* nw = &r->nodes_[w];
if (nw->rank == upper_bound) {
return false;
}
if (!nw->visited && nw->rank < upper_bound) {
r->stack_.push_back(w);
}
}
}
return true;
}
static void BackwardDFS(GraphCycles::Rep* r, int32_t n, int32_t lower_bound) {
r->deltab_.clear();
r->stack_.clear();
r->stack_.push_back(n);
while (!r->stack_.empty()) {
n = r->stack_.back();
r->stack_.pop_back();
Node* nn = &r->nodes_[n];
if (nn->visited) continue;
nn->visited = true;
r->deltab_.push_back(n);
NodeIO* nn_io = &r->node_io_[n];
for (auto w : nn_io->in.GetSequence()) {
Node* nw = &r->nodes_[w];
if (!nw->visited && lower_bound < nw->rank) {
r->stack_.push_back(w);
}
}
}
}
static void Reorder(GraphCycles::Rep* r) {
Sort(r->nodes_, &r->deltab_);
Sort(r->nodes_, &r->deltaf_);
r->list_.clear();
MoveToList(r, &r->deltab_, &r->list_);
MoveToList(r, &r->deltaf_, &r->list_);
r->merged_.resize(r->deltab_.size() + r->deltaf_.size());
std::merge(r->deltab_.begin(), r->deltab_.end(), r->deltaf_.begin(),
r->deltaf_.end(), r->merged_.begin());
for (size_t i = 0; i < r->list_.size(); i++) {
r->nodes_[r->list_[i]].rank = r->merged_[i];
}
}
static void Sort(absl::Span<const Node> nodes, std::vector<int32_t>* delta) {
std::sort(delta->begin(), delta->end(), [&](int32_t a, int32_t b) {
return nodes[a].rank < nodes[b].rank;
});
}
static void MoveToList(GraphCycles::Rep* r, std::vector<int32_t>* src,
std::vector<int32_t>* dst) {
for (size_t i = 0; i < src->size(); i++) {
int32_t w = (*src)[i];
(*src)[i] = r->nodes_[w].rank;
r->nodes_[w].visited = false;
dst->push_back(w);
}
}
static void ClearVisitedBits(GraphCycles::Rep* r,
absl::Span<const int32_t> visited_indices) {
for (auto index : visited_indices) {
r->nodes_[index].visited = false;
}
}
int GraphCycles::FindPath(int32_t x, int32_t y, int max_path_len,
int32_t path[]) const {
int path_len = 0;
Rep* r = rep_;
NodeSet seen;
r->stack_.clear();
r->stack_.push_back(x);
while (!r->stack_.empty()) {
int32_t n = r->stack_.back();
r->stack_.pop_back();
if (n < 0) {
path_len--;
continue;
}
if (path_len < max_path_len) {
path[path_len] = n;
}
path_len++;
r->stack_.push_back(-1);
if (n == y) {
return path_len;
}
for (auto w : r->node_io_[n].out.GetSequence()) {
if (seen.insert(w).second) {
r->stack_.push_back(w);
}
}
}
return 0;
}
bool GraphCycles::IsReachable(int32_t x, int32_t y) const {
return FindPath(x, y, 0, nullptr) > 0;
}
bool GraphCycles::IsReachableNonConst(int32_t x, int32_t y) {
if (x == y) return true;
Rep* r = rep_;
Node* nx = &r->nodes_[x];
Node* ny = &r->nodes_[y];
if (nx->rank >= ny->rank) {
return false;
}
bool reachable = !ForwardDFS(r, x, ny->rank);
ClearVisitedBits(r, r->deltaf_);
return reachable;
}
bool GraphCycles::CanContractEdge(int32_t a, int32_t b) {
CHECK(HasEdge(a, b)) << "No edge exists from " << a << " to " << b;
RemoveEdge(a, b);
bool reachable = IsReachableNonConst(a, b);
InsertEdge(a, b);
return !reachable;
}
std::optional<int32_t> GraphCycles::ContractEdge(int32_t a, int32_t b) {
CHECK(HasEdge(a, b));
RemoveEdge(a, b);
if (IsReachableNonConst(a, b)) {
InsertEdge(a, b);
return std::nullopt;
}
if (rep_->node_io_[b].in.Size() + rep_->node_io_[b].out.Size() >
rep_->node_io_[a].in.Size() + rep_->node_io_[a].out.Size()) {
std::swap(a, b);
}
NodeIO* nb_io = &rep_->node_io_[b];
OrderedNodeSet out = std::move(nb_io->out);
OrderedNodeSet in = std::move(nb_io->in);
for (int32_t y : out.GetSequence()) {
rep_->node_io_[y].in.Erase(b);
}
for (int32_t y : in.GetSequence()) {
rep_->node_io_[y].out.Erase(b);
}
rep_->free_nodes_.push_back(b);
rep_->node_io_[a].out.Reserve(rep_->node_io_[a].out.Size() + out.Size());
for (int32_t y : out.GetSequence()) {
InsertEdge(a, y);
}
rep_->node_io_[a].in.Reserve(rep_->node_io_[a].in.Size() + in.Size());
for (int32_t y : in.GetSequence()) {
InsertEdge(y, a);
}
return a;
}
absl::Span<const int32_t> GraphCycles::Successors(int32_t node) const {
return rep_->node_io_[node].out.GetSequence();
}
absl::Span<const int32_t> GraphCycles::Predecessors(int32_t node) const {
return rep_->node_io_[node].in.GetSequence();
}
std::vector<int32_t> GraphCycles::SuccessorsCopy(int32_t node) const {
absl::Span<const int32_t> successors = Successors(node);
return std::vector<int32_t>(successors.begin(), successors.end());
}
std::vector<int32_t> GraphCycles::PredecessorsCopy(int32_t node) const {
absl::Span<const int32_t> predecessors = Predecessors(node);
return std::vector<int32_t>(predecessors.begin(), predecessors.end());
}
namespace {
void SortInPostOrder(absl::Span<const Node> nodes,
std::vector<int32_t>* to_sort) {
absl::c_sort(*to_sort, [&](int32_t a, int32_t b) {
DCHECK(a == b || nodes[a].rank != nodes[b].rank);
return nodes[a].rank > nodes[b].rank;
});
}
}
std::vector<int32_t> GraphCycles::AllNodesInPostOrder() const {
absl::flat_hash_set<int32_t> free_nodes_set;
absl::c_copy(rep_->free_nodes_,
std::inserter(free_nodes_set, free_nodes_set.begin()));
std::vector<int32_t> all_nodes;
all_nodes.reserve(rep_->nodes_.size() - free_nodes_set.size());
for (int64_t i = 0, e = rep_->nodes_.size(); i < e; i++) {
if (!free_nodes_set.contains(i)) {
all_nodes.push_back(i);
}
}
SortInPostOrder(rep_->nodes_, &all_nodes);
return all_nodes;
}
std::string GraphCycles::DebugString() const {
absl::flat_hash_set<int32_t> free_nodes_set(rep_->free_nodes_.begin(),
rep_->free_nodes_.end());
std::string result = "digraph {\n";
for (int i = 0, end = rep_->nodes_.size(); i < end; i++) {
if (free_nodes_set.contains(i)) {
continue;
}
for (int32_t succ : rep_->node_io_[i].out.GetSequence()) {
absl::StrAppend(&result, " \"", i, "\" -> \"", succ, "\"\n");
}
}
absl::StrAppend(&result, "}\n");
return result;
}
} | #include "xla/service/graphcycles/graphcycles.h"
#include <cstdint>
#include <optional>
#include <random>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/random/random.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
typedef std::vector<int> Nodes;
struct Edge {
int from;
int to;
};
typedef std::vector<Edge> Edges;
static bool IsReachable(Edges *edges, int from, int to,
absl::flat_hash_set<int> *seen) {
seen->insert(from);
if (from == to) return true;
for (int i = 0; i != edges->size(); i++) {
Edge *edge = &(*edges)[i];
if (edge->from == from) {
if (edge->to == to) {
return true;
} else if (seen->find(edge->to) == seen->end() &&
IsReachable(edges, edge->to, to, seen)) {
return true;
}
}
}
return false;
}
static void PrintNodes(Nodes *nodes) {
LOG(INFO) << "NODES (" << nodes->size() << ")";
for (int i = 0; i != nodes->size(); i++) {
LOG(INFO) << (*nodes)[i];
}
}
static void PrintEdges(Edges *edges) {
LOG(INFO) << "EDGES (" << edges->size() << ")";
for (int i = 0; i != edges->size(); i++) {
int a = (*edges)[i].from;
int b = (*edges)[i].to;
LOG(INFO) << a << " " << b;
}
LOG(INFO) << "---";
}
static void PrintGCEdges(Nodes *nodes, tensorflow::GraphCycles *gc) {
LOG(INFO) << "GC EDGES";
for (int i = 0; i != nodes->size(); i++) {
for (int j = 0; j != nodes->size(); j++) {
int a = (*nodes)[i];
int b = (*nodes)[j];
if (gc->HasEdge(a, b)) {
LOG(INFO) << a << " " << b;
}
}
}
LOG(INFO) << "---";
}
static void PrintTransitiveClosure(Nodes *nodes, Edges *edges,
tensorflow::GraphCycles *gc) {
LOG(INFO) << "Transitive closure";
for (int i = 0; i != nodes->size(); i++) {
for (int j = 0; j != nodes->size(); j++) {
int a = (*nodes)[i];
int b = (*nodes)[j];
absl::flat_hash_set<int> seen;
if (IsReachable(edges, a, b, &seen)) {
LOG(INFO) << a << " " << b;
}
}
}
LOG(INFO) << "---";
}
static void PrintGCTransitiveClosure(Nodes *nodes,
tensorflow::GraphCycles *gc) {
LOG(INFO) << "GC Transitive closure";
for (int i = 0; i != nodes->size(); i++) {
for (int j = 0; j != nodes->size(); j++) {
int a = (*nodes)[i];
int b = (*nodes)[j];
if (gc->IsReachable(a, b)) {
LOG(INFO) << a << " " << b;
}
}
}
LOG(INFO) << "---";
}
static void CheckTransitiveClosure(Nodes *nodes, Edges *edges,
tensorflow::GraphCycles *gc) {
absl::flat_hash_set<int> seen;
for (int i = 0; i != nodes->size(); i++) {
for (int j = 0; j != nodes->size(); j++) {
seen.clear();
int a = (*nodes)[i];
int b = (*nodes)[j];
bool gc_reachable = gc->IsReachable(a, b);
CHECK_EQ(gc_reachable, gc->IsReachableNonConst(a, b));
bool reachable = IsReachable(edges, a, b, &seen);
if (gc_reachable != reachable) {
PrintEdges(edges);
PrintGCEdges(nodes, gc);
PrintTransitiveClosure(nodes, edges, gc);
PrintGCTransitiveClosure(nodes, gc);
LOG(FATAL) << "gc_reachable " << gc_reachable << " reachable "
<< reachable << " a " << a << " b " << b;
}
}
}
}
static void CheckEdges(Nodes *nodes, Edges *edges,
tensorflow::GraphCycles *gc) {
int count = 0;
for (int i = 0; i != edges->size(); i++) {
int a = (*edges)[i].from;
int b = (*edges)[i].to;
if (!gc->HasEdge(a, b)) {
PrintEdges(edges);
PrintGCEdges(nodes, gc);
LOG(FATAL) << "!gc->HasEdge(" << a << ", " << b << ")";
}
}
for (int i = 0; i != nodes->size(); i++) {
for (int j = 0; j != nodes->size(); j++) {
int a = (*nodes)[i];
int b = (*nodes)[j];
if (gc->HasEdge(a, b)) {
count++;
}
}
}
if (count != edges->size()) {
PrintEdges(edges);
PrintGCEdges(nodes, gc);
LOG(FATAL) << "edges->size() " << edges->size() << " count " << count;
}
}
static int RandomNode(std::mt19937 *rnd, Nodes *nodes) {
std::uniform_int_distribution<int> distribution(0, nodes->size() - 1);
return distribution(*rnd);
}
static int RandomEdge(std::mt19937 *rnd, Edges *edges) {
std::uniform_int_distribution<int> distribution(0, edges->size() - 1);
return distribution(*rnd);
}
static int EdgeIndex(Edges *edges, int from, int to) {
int i = 0;
while (i != edges->size() &&
((*edges)[i].from != from || (*edges)[i].to != to)) {
i++;
}
return i == edges->size() ? -1 : i;
}
TEST(GraphCycles, RandomizedTest) {
Nodes nodes;
Edges edges;
tensorflow::GraphCycles graph_cycles;
static const int kMaxNodes = 7;
static const int kDataOffset = 17;
int n = 100000;
int op = 0;
std::mt19937 rnd(tsl::testing::RandomSeed() + 1);
for (int iter = 0; iter != n; iter++) {
if ((iter % 10000) == 0) VLOG(0) << "Iter " << iter << " of " << n;
if (VLOG_IS_ON(3)) {
LOG(INFO) << "===============";
LOG(INFO) << "last op " << op;
PrintNodes(&nodes);
PrintEdges(&edges);
PrintGCEdges(&nodes, &graph_cycles);
}
for (int i = 0; i != nodes.size(); i++) {
ASSERT_EQ(reinterpret_cast<intptr_t>(graph_cycles.GetNodeData(i)),
i + kDataOffset)
<< " node " << i;
}
CheckEdges(&nodes, &edges, &graph_cycles);
CheckTransitiveClosure(&nodes, &edges, &graph_cycles);
std::uniform_int_distribution<int> distribution(0, 5);
op = distribution(rnd);
switch (op) {
case 0:
if (nodes.size() < kMaxNodes) {
int new_node = graph_cycles.NewNode();
ASSERT_NE(-1, new_node);
VLOG(1) << "adding node " << new_node;
ASSERT_EQ(nullptr, graph_cycles.GetNodeData(new_node));
graph_cycles.SetNodeData(
new_node, reinterpret_cast<void *>(
static_cast<intptr_t>(new_node + kDataOffset)));
ASSERT_GE(new_node, 0);
for (int i = 0; i != nodes.size(); i++) {
ASSERT_NE(nodes[i], new_node);
}
nodes.push_back(new_node);
}
break;
case 1:
if (!nodes.empty()) {
int node_index = RandomNode(&rnd, &nodes);
int node = nodes[node_index];
nodes[node_index] = nodes.back();
nodes.pop_back();
VLOG(1) << "removing node " << node;
graph_cycles.RemoveNode(node);
int i = 0;
while (i != edges.size()) {
if (edges[i].from == node || edges[i].to == node) {
edges[i] = edges.back();
edges.pop_back();
} else {
i++;
}
}
}
break;
case 2:
if (!nodes.empty()) {
int from = RandomNode(&rnd, &nodes);
int to = RandomNode(&rnd, &nodes);
if (EdgeIndex(&edges, nodes[from], nodes[to]) == -1) {
if (graph_cycles.InsertEdge(nodes[from], nodes[to])) {
Edge new_edge;
new_edge.from = nodes[from];
new_edge.to = nodes[to];
edges.push_back(new_edge);
} else {
absl::flat_hash_set<int> seen;
ASSERT_TRUE(IsReachable(&edges, nodes[to], nodes[from], &seen))
<< "Edge " << nodes[to] << "->" << nodes[from];
}
}
}
break;
case 3:
if (!edges.empty()) {
int i = RandomEdge(&rnd, &edges);
int from = edges[i].from;
int to = edges[i].to;
ASSERT_EQ(i, EdgeIndex(&edges, from, to));
edges[i] = edges.back();
edges.pop_back();
ASSERT_EQ(-1, EdgeIndex(&edges, from, to));
VLOG(1) << "removing edge " << from << " " << to;
graph_cycles.RemoveEdge(from, to);
}
break;
case 4:
if (!nodes.empty()) {
int from = RandomNode(&rnd, &nodes);
int to = RandomNode(&rnd, &nodes);
int32_t path[2 * kMaxNodes];
int path_len = graph_cycles.FindPath(nodes[from], nodes[to],
2 * kMaxNodes, path);
absl::flat_hash_set<int> seen;
bool reachable = IsReachable(&edges, nodes[from], nodes[to], &seen);
bool gc_reachable = graph_cycles.IsReachable(nodes[from], nodes[to]);
ASSERT_EQ(gc_reachable,
graph_cycles.IsReachableNonConst(nodes[from], nodes[to]));
ASSERT_EQ(path_len != 0, reachable);
ASSERT_EQ(path_len != 0, gc_reachable);
ASSERT_LE(path_len, kMaxNodes + 1);
if (path_len != 0) {
ASSERT_EQ(nodes[from], path[0]);
ASSERT_EQ(nodes[to], path[path_len - 1]);
for (int i = 1; i < path_len; i++) {
ASSERT_NE(-1, EdgeIndex(&edges, path[i - 1], path[i]));
ASSERT_TRUE(graph_cycles.HasEdge(path[i - 1], path[i]));
}
}
}
break;
case 5:
CHECK(graph_cycles.CheckInvariants());
break;
default:
LOG(FATAL);
}
std::bernoulli_distribution rarely(1.0 / 1024.0);
if (rarely(rnd)) {
VLOG(3) << "Graph expansion";
CheckEdges(&nodes, &edges, &graph_cycles);
CheckTransitiveClosure(&nodes, &edges, &graph_cycles);
for (int i = 0; i != 256; i++) {
int new_node = graph_cycles.NewNode();
ASSERT_NE(-1, new_node);
VLOG(1) << "adding node " << new_node;
ASSERT_GE(new_node, 0);
ASSERT_EQ(nullptr, graph_cycles.GetNodeData(new_node));
graph_cycles.SetNodeData(
new_node, reinterpret_cast<void *>(
static_cast<intptr_t>(new_node + kDataOffset)));
for (int j = 0; j != nodes.size(); j++) {
ASSERT_NE(nodes[j], new_node);
}
nodes.push_back(new_node);
}
for (int i = 0; i != 256; i++) {
ASSERT_GT(nodes.size(), 0);
int node_index = RandomNode(&rnd, &nodes);
int node = nodes[node_index];
nodes[node_index] = nodes.back();
nodes.pop_back();
VLOG(1) << "removing node " << node;
graph_cycles.RemoveNode(node);
int j = 0;
while (j != edges.size()) {
if (edges[j].from == node || edges[j].to == node) {
edges[j] = edges.back();
edges.pop_back();
} else {
j++;
}
}
}
CHECK(graph_cycles.CheckInvariants());
}
}
}
class GraphCyclesTest : public ::testing::Test {
public:
tensorflow::GraphCycles g_;
GraphCyclesTest() {
for (int i = 0; i < 100; i++) {
CHECK_EQ(i, g_.NewNode());
}
CHECK(g_.CheckInvariants());
}
bool AddEdge(int x, int y) { return g_.InsertEdge(x, y); }
void AddMultiples() {
for (int x = 1; x < 25; x++) {
EXPECT_TRUE(AddEdge(x, 2 * x)) << x;
EXPECT_TRUE(AddEdge(x, 3 * x)) << x;
}
CHECK(g_.CheckInvariants());
}
std::string Path(int x, int y) {
static const int kPathSize = 5;
int32_t path[kPathSize];
int np = g_.FindPath(x, y, kPathSize, path);
std::string result;
for (int i = 0; i < np; i++) {
if (i >= kPathSize) {
result += " ...";
break;
}
if (!result.empty()) result.push_back(' ');
char buf[20];
snprintf(buf, sizeof(buf), "%d", path[i]);
result += buf;
}
return result;
}
};
TEST_F(GraphCyclesTest, NoCycle) {
AddMultiples();
CHECK(g_.CheckInvariants());
}
TEST_F(GraphCyclesTest, SimpleCycle) {
AddMultiples();
EXPECT_FALSE(AddEdge(8, 4));
EXPECT_EQ("4 8", Path(4, 8));
CHECK(g_.CheckInvariants());
}
TEST_F(GraphCyclesTest, IndirectCycle) {
AddMultiples();
EXPECT_TRUE(AddEdge(16, 9));
CHECK(g_.CheckInvariants());
EXPECT_FALSE(AddEdge(9, 2));
EXPECT_EQ("2 4 8 16 9", Path(2, 9));
CHECK(g_.CheckInvariants());
}
TEST_F(GraphCyclesTest, LongPath) {
ASSERT_TRUE(AddEdge(2, 4));
ASSERT_TRUE(AddEdge(4, 6));
ASSERT_TRUE(AddEdge(6, 8));
ASSERT_TRUE(AddEdge(8, 10));
ASSERT_TRUE(AddEdge(10, 12));
ASSERT_FALSE(AddEdge(12, 2));
EXPECT_EQ("2 4 6 8 10 ...", Path(2, 12));
CHECK(g_.CheckInvariants());
}
TEST_F(GraphCyclesTest, RemoveNode) {
ASSERT_TRUE(AddEdge(1, 2));
ASSERT_TRUE(AddEdge(2, 3));
ASSERT_TRUE(AddEdge(3, 4));
ASSERT_TRUE(AddEdge(4, 5));
g_.RemoveNode(3);
ASSERT_TRUE(AddEdge(5, 1));
}
TEST_F(GraphCyclesTest, ManyEdges) {
const int N = 50;
for (int i = 0; i < N; i++) {
for (int j = 1; j < N; j++) {
ASSERT_TRUE(AddEdge(i, i + j));
}
}
CHECK(g_.CheckInvariants());
ASSERT_TRUE(AddEdge(2 * N - 1, 0));
CHECK(g_.CheckInvariants());
ASSERT_FALSE(AddEdge(10, 9));
CHECK(g_.CheckInvariants());
}
TEST_F(GraphCyclesTest, ContractEdge) {
ASSERT_TRUE(AddEdge(1, 2));
ASSERT_TRUE(AddEdge(1, 3));
ASSERT_TRUE(AddEdge(2, 3));
ASSERT_TRUE(AddEdge(2, 4));
ASSERT_TRUE(AddEdge(3, 4));
EXPECT_FALSE(g_.ContractEdge(1, 3).has_value());
CHECK(g_.CheckInvariants());
EXPECT_TRUE(g_.HasEdge(1, 3));
EXPECT_EQ(g_.ContractEdge(1, 2).value(), 2);
CHECK(g_.CheckInvariants());
EXPECT_TRUE(g_.HasEdge(2, 3));
EXPECT_TRUE(g_.HasEdge(2, 4));
EXPECT_TRUE(g_.HasEdge(3, 4));
EXPECT_EQ(g_.ContractEdge(2, 3).value(), 2);
CHECK(g_.CheckInvariants());
EXPECT_TRUE(g_.HasEdge(2, 4));
}
TEST_F(GraphCyclesTest, CanContractEdge) {
ASSERT_TRUE(AddEdge(1, 2));
ASSERT_TRUE(AddEdge(1, 3));
ASSERT_TRUE(AddEdge(2, 3));
ASSERT_TRUE(AddEdge(2, 4));
ASSERT_TRUE(AddEdge(3, 4));
EXPECT_FALSE(g_.CanContractEdge(1, 3));
EXPECT_FALSE(g_.CanContractEdge(2, 4));
EXPECT_TRUE(g_.CanContractEdge(1, 2));
EXPECT_TRUE(g_.CanContractEdge(2, 3));
EXPECT_TRUE(g_.CanContractEdge(3, 4));
}
static void BM_StressTest(::testing::benchmark::State &state) {
const int num_nodes = state.range(0);
while (state.KeepRunningBatch(num_nodes)) {
tensorflow::GraphCycles g;
int32_t *nodes = new int32_t[num_nodes];
for (int i = 0; i < num_nodes; i++) {
nodes[i] = g.NewNode();
}
for (int i = 0; i < num_nodes; i++) {
int end = std::min(num_nodes, i + 5);
for (int j = i + 1; j < end; j++) {
if (nodes[i] >= 0 && nodes[j] >= 0) {
CHECK(g.InsertEdge(nodes[i], nodes[j]));
}
}
}
delete[] nodes;
}
}
BENCHMARK(BM_StressTest)->Range(2048, 1048576);
static void BM_ContractEdge(::testing::benchmark::State &state) {
const int num_nodes = state.range(0);
while (state.KeepRunningBatch(num_nodes)) {
state.PauseTiming();
tensorflow::GraphCycles g;
std::vector<int32_t> nodes;
nodes.reserve(num_nodes);
for (int i = 0; i < num_nodes; i++) {
nodes.push_back(g.NewNode());
}
for (int i = 0; i < num_nodes - 1; ++i) {
g.InsertEdge(nodes[i], nodes[num_nodes - 1]);
}
state.ResumeTiming();
int node = num_nodes - 1;
for (int i = 0; i < num_nodes - 1; ++i) {
node = g.ContractEdge(nodes[i], node).value();
}
}
}
BENCHMARK(BM_ContractEdge)->Arg(1000)->Arg(10000);
static void BM_IsReachableNonConst(testing::benchmark::State &state) {
const int num_nodes = state.range(0);
tensorflow::GraphCycles g;
std::vector<uint32_t> nodes;
nodes.reserve(num_nodes);
for (int i = 0; i < num_nodes; i++) {
nodes.push_back(g.NewNode());
}
absl::BitGen bitgen;
for (int i = 0; i < num_nodes; i++) {
int max = num_nodes - 1 - i;
if (max == 0) break;
constexpr int branch_factor = 2;
for (int b = 0; b < branch_factor; b++) {
int j = i + 1 + absl::Uniform(bitgen, 0, max);
CHECK_LT(j, num_nodes);
CHECK(g.InsertEdge(nodes[i], nodes[j]));
}
}
auto get_random_node = [&]() {
return nodes[absl::Uniform(bitgen, 0, num_nodes)];
};
uint32_t src, dst;
int i = 0;
for (auto s : state) {
if (i % 256 == 0) {
src = get_random_node();
dst = get_random_node();
}
bool reachable = g.IsReachableNonConst(src, dst);
benchmark::DoNotOptimize(reachable);
i++;
}
}
BENCHMARK(BM_IsReachableNonConst)
->Arg(10)
->Arg(50)
->Arg(100)
->Arg(200)
->Arg(1000)
->Arg(30000); |
2,032 | cpp | tensorflow/tensorflow | reduction_dimension_grouper | third_party/xla/xla/service/gpu/transforms/reduction_dimension_grouper.cc | third_party/xla/xla/service/gpu/transforms/reduction_dimension_grouper_test.cc | #ifndef XLA_SERVICE_GPU_REDUCTION_DIMENSION_GROUPER_H_
#define XLA_SERVICE_GPU_REDUCTION_DIMENSION_GROUPER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class ReductionDimensionGrouper : public HloModulePass {
public:
absl::string_view name() const override {
return "reduction-dimension-grouper";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
}
#endif
#include "xla/service/gpu/reduction_dimension_grouper.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/layout_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
class ReduceDimensionGroupVisitor : public DfsHloRewriteVisitor {
public:
absl::Status HandleReduce(HloInstruction *hlo) override {
auto reduce = Cast<HloReduceInstruction>(hlo);
VLOG(4) << "Input: " << reduce->ToString();
absl::InlinedVector<HloInstruction *, 2> reduce_inputs_grouped;
std::vector<int64_t> reduced_dims_grouped;
int idx = -1;
for (HloInstruction *operand : reduce->inputs()) {
idx++;
std::vector<int64_t> new_grouped_dims;
const Shape &shape = operand->shape();
CHECK(shape == LayoutUtil::GetWithDefaultLayout(shape))
<< "Default layout should be enforced on reduction operand";
auto is_reduced = [&](int dim) {
return absl::c_linear_search(reduce->dimensions(), dim);
};
bool changed = false;
int64_t next_dim_size = 1;
for (int logical_dim = 0; logical_dim < shape.rank(); logical_dim++) {
VLOG(5) << "Processing dimension " << logical_dim << " of size "
<< shape.dimensions(logical_dim);
if (is_reduced(logical_dim) && logical_dim < shape.rank() - 1 &&
is_reduced(logical_dim + 1)) {
VLOG(5) << "This and consecutive dimension are reduced, merging";
changed = true;
next_dim_size *= shape.dimensions(logical_dim);
continue;
}
if (is_reduced(logical_dim)) {
new_grouped_dims.push_back(next_dim_size *
shape.dimensions(logical_dim));
if (idx == 0) {
reduced_dims_grouped.push_back(new_grouped_dims.size() - 1);
}
next_dim_size = 1;
} else {
new_grouped_dims.push_back(shape.dimensions(logical_dim));
}
}
if (!changed) {
return absl::OkStatus();
}
Shape grouped_shape =
ShapeUtil::MakeShape(shape.element_type(), new_grouped_dims);
reduce_inputs_grouped.push_back(reduce->parent()->AddInstruction(
HloInstruction::CreateBitcast(grouped_shape, operand),
&operand->metadata()));
VLOG(5) << "Adding bitcast: " << reduce_inputs_grouped.back()->ToString();
}
std::unique_ptr<HloInstruction> new_reduce = HloInstruction::CreateReduce(
reduce->shape(), reduce_inputs_grouped, reduce->init_values(),
reduced_dims_grouped, reduce->to_apply());
VLOG(5) << "Generated new reduction: " << new_reduce->ToString();
return ReplaceWithNewInstruction(reduce, std::move(new_reduce));
}
};
absl::StatusOr<bool> ReductionDimensionGrouper::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
TF_ASSIGN_OR_RETURN(bool changed, ReduceDimensionGroupVisitor().RunOnModule(
module, execution_threads));
return changed;
}
}
} | #include "xla/service/gpu/reduction_dimension_grouper.h"
#include <optional>
#include "absl/strings/string_view.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class ReductionDimensionGrouperTest : public HloTestBase {
public:
void CheckDimensionGrouper(absl::string_view hlo,
std::optional<absl::string_view> expected) {
RunAndFilecheckHloRewrite(hlo, gpu::ReductionDimensionGrouper{}, expected);
}
};
TEST_F(ReductionDimensionGrouperTest, ReductionWithGrouping) {
const char* hlo = R"(
HloModule ReductionWithGrouping
add {
accum = f32[] parameter(0)
op = f32[] parameter(1)
ROOT out = f32[] add(accum, op)
}
ENTRY main {
input = f32[100,10,32,3]{3,2,1,0} parameter(0)
zero = f32[] constant(0)
ROOT out = f32[100,10]{0,1} reduce(input, zero), dimensions={2,3}, to_apply=add
}
)";
CheckDimensionGrouper(hlo,
R"(
)");
}
TEST_F(ReductionDimensionGrouperTest, ReductionWithGroupingVariadic) {
const char* hlo = R"(
HloModule ReductionWithGrouping
argmax {
running_max = f32[] parameter(0)
running_max_idx = u32[] parameter(1)
current_value = f32[] parameter(2)
current_value_idx = u32[] parameter(3)
current = (f32[], u32[]) tuple(running_max, running_max_idx)
potential = (f32[], u32[]) tuple(current_value, current_value_idx)
cmp_code = pred[] compare(current_value, running_max), direction=GT
new_max = f32[] select(cmp_code, current_value, running_max)
new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx)
ROOT out = (f32[], u32[]) tuple(new_max, new_idx)
}
ENTRY main {
input = f32[100,10,32,3]{3,2,1,0} parameter(0)
idxs = u32[100,10,32,3]{3,2,1,0} parameter(1)
zero = f32[] constant(0)
zero_idx = u32[] constant(0)
ROOT out = (f32[100,10]{1,0}, u32[100,10]{1,0}) reduce(input, idxs, zero, zero_idx), dimensions={2,3}, to_apply=argmax
}
)";
CheckDimensionGrouper(hlo, R"(
)");
}
}
} |
2,033 | cpp | tensorflow/tensorflow | gpu_layout_assignment | null | null | #ifndef XLA_SERVICE_GPU_GPU_LAYOUT_ASSIGNMENT_H_
#define XLA_SERVICE_GPU_GPU_LAYOUT_ASSIGNMENT_H_
#include <cstdint>
#include <initializer_list>
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/computation_layout.h"
#include "xla/service/layout_assignment.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/dnn.h"
namespace xla {
namespace gpu {
class GpuLayoutAssignment : public LayoutAssignment {
public:
explicit GpuLayoutAssignment(
ComputationLayout* entry_computation_layout,
const se::GpuComputeCapability& gpu_version,
const se::dnn::VersionInfo& dnn_version,
ChannelLayoutConstraints* channel_constraints = nullptr)
: LayoutAssignment(entry_computation_layout, channel_constraints),
gpu_version_(gpu_version),
dnn_version_(dnn_version) {}
~GpuLayoutAssignment() override = default;
protected:
absl::Status AddBackendConstraints(LayoutConstraints* constraints) override;
private:
absl::Status AddBackendConstraintsToDnnConvCustomCall(
HloCustomCallInstruction* instr, LayoutConstraints* constraints);
absl::Status SetOperandMajorToMinorLayout(
const HloInstruction* instruction, int64_t operand,
std::initializer_list<absl::Span<const int64_t>> dim_groups);
absl::Status SetDotOperandLayout(const HloInstruction* instruction,
int64_t operand,
absl::Span<const int64_t> batch_dims,
absl::Span<const int64_t> row_dims,
absl::Span<const int64_t> col_dims);
absl::Status SetDotLayout(const HloInstruction* instruction,
LayoutConstraints* constraints);
bool PropagateReductionLayoutToOperand(const HloInstruction* user) override;
bool InstructionCanChangeLayoutInstance(
const HloInstruction* instruction) override;
const se::GpuComputeCapability gpu_version_;
const se::dnn::VersionInfo dnn_version_;
};
}
}
#endif
#include "xla/service/gpu/gpu_layout_assignment.h"
#include <cstddef>
#include <cstdint>
#include <initializer_list>
#include <memory>
#include <tuple>
#include <utility>
#include <variant>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/reduction_utils.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/host_memory_offload_annotations.h"
#include "xla/service/logical_buffer.h"
#include "xla/shape.h"
#include "xla/shape_layout.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/dnn.h"
#include "xla/tsl/util/env_var.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
using se::dnn::DataLayout;
using se::dnn::FilterLayout;
static std::tuple<DataLayout, FilterLayout, DataLayout>
HeuristicLayoutAssignment(const HloInstruction* instr,
const se::GpuComputeCapability& gpu_version,
const se::dnn::VersionInfo& dnn_version) {
constexpr auto kAllNCHW =
std::make_tuple(DataLayout::kBatchDepthYX, FilterLayout::kOutputInputYX,
DataLayout::kBatchDepthYX);
constexpr auto kAllNCHW_VECT_C =
std::make_tuple(DataLayout::kBatchDepthYX4, FilterLayout::kOutputInputYX4,
DataLayout::kBatchDepthYX4);
constexpr auto kAllNHWC =
std::make_tuple(DataLayout::kBatchYXDepth, FilterLayout::kOutputYXInput,
DataLayout::kBatchYXDepth);
const ConvolutionDimensionNumbers& dnums =
instr->convolution_dimension_numbers();
Shape input_shape = instr->operand(0)->shape();
PrimitiveType input_ty = instr->operand(0)->shape().element_type();
if (primitive_util::IsIntegralType(input_ty)) {
if (input_ty == S8 && dnums.input_spatial_dimensions_size() == 2 &&
input_shape.dimensions_size() == 5) {
VLOG(2) << "Using NCHW_VECT_C for int8_t conv " << instr->ToString();
return kAllNCHW_VECT_C;
}
VLOG(2) << "Using NHWC for int8_t conv " << instr->ToString();
return kAllNHWC;
}
if (primitive_util::IsF8Type(input_ty)) {
VLOG(2) << "Using NHWC for FP8 conv " << instr->ToString();
return kAllNHWC;
}
const DebugOptions& debug_options =
instr->GetModule()->config().debug_options();
if (debug_options.xla_gpu_force_conv_nchw()) {
VLOG(2) << "Overriding layout to NCHW for " << instr->ToString();
return kAllNCHW;
}
if (debug_options.xla_gpu_force_conv_nhwc()) {
VLOG(2) << "Overriding layout to NHWC for " << instr->ToString();
return kAllNHWC;
}
const auto* rocm_compute_capability =
std::get_if<se::RocmComputeCapability>(&gpu_version);
if (rocm_compute_capability && input_ty == F16) return kAllNHWC;
const bool isFloat16 = (input_ty == F16) || (input_ty == BF16);
if (std::holds_alternative<se::CudaComputeCapability>(gpu_version)) {
const auto* cuda_compute_capability =
std::get_if<se::CudaComputeCapability>(&gpu_version);
bool is_volta =
cuda_compute_capability &&
cuda_compute_capability->IsAtLeast(se::CudaComputeCapability::VOLTA);
if (!isFloat16 || !is_volta ||
instr->shape().tuple_shapes(0).dimensions_size() != 4) {
return kAllNCHW;
}
if (std::make_tuple(dnn_version.major_version(),
dnn_version.minor_version()) <= std::make_tuple(7, 3) &&
instr->custom_call_target() == kCudnnConvBackwardInputCallTarget &&
window_util::HasStride(instr->window())) {
return kAllNCHW;
}
} else if (std::holds_alternative<se::RocmComputeCapability>(gpu_version)) {
bool is_enabled = false;
TF_CHECK_OK(tsl::ReadBoolFromEnvVar("TF_USE_ROCM_NHWC",
false, &is_enabled));
auto rocm_compute_capability =
std::get<se::RocmComputeCapability>(gpu_version);
if (!isFloat16 || (!rocm_compute_capability.has_nhwc_layout_support()) ||
instr->shape().tuple_shapes(0).dimensions_size() != 4 || !is_enabled) {
return kAllNCHW;
}
}
VLOG(2) << "Using heuristic to figure out layouts for " << instr->ToString();
return kAllNHWC;
}
absl::Status GpuLayoutAssignment::AddBackendConstraintsToDnnConvCustomCall(
HloCustomCallInstruction* instr, LayoutConstraints* constraints) {
Shape lhs_shape = instr->operand(0)->shape();
Shape rhs_shape = instr->operand(1)->shape();
Shape result_shape = instr->shape().tuple_shapes(0);
Shape* input_shape;
Shape* filter_shape;
Shape* output_shape;
TF_ASSIGN_OR_RETURN(auto kind, GetCudnnConvKind(instr));
switch (kind) {
case CudnnConvKind::kForward:
case CudnnConvKind::kForwardActivation:
case CudnnConvKind::kForwardGraph:
input_shape = &lhs_shape;
filter_shape = &rhs_shape;
output_shape = &result_shape;
break;
case CudnnConvKind::kBackwardInput:
input_shape = &result_shape;
filter_shape = &rhs_shape;
output_shape = &lhs_shape;
break;
case CudnnConvKind::kBackwardFilter:
input_shape = &lhs_shape;
filter_shape = &result_shape;
output_shape = &rhs_shape;
break;
}
{
DataLayout input;
FilterLayout filter;
DataLayout output;
std::tie(input, filter, output) =
HeuristicLayoutAssignment(instr, gpu_version_, dnn_version_);
TF_ASSIGN_OR_RETURN(
std::tie(*input_shape->mutable_layout(),
*filter_shape->mutable_layout(),
*output_shape->mutable_layout()),
StreamExecutorConvLayoutsToXlaLayouts(
instr->convolution_dimension_numbers(), input, filter, output));
}
TF_ASSIGN_OR_RETURN(
const LogicalBuffer* call_result_buf,
points_to_analysis_->GetBufferDefinedAt(instr, {0}));
TF_RETURN_IF_ERROR(SetOperandLayout(lhs_shape, instr, 0));
TF_RETURN_IF_ERROR(SetOperandLayout(rhs_shape, instr, 1));
TF_RETURN_IF_ERROR(SetBufferLayout(result_shape.layout(), *call_result_buf));
if (kind == CudnnConvKind::kForwardActivation &&
instr->operand_count() == 4) {
TF_RETURN_IF_ERROR(SetOperandLayout(*output_shape, instr, 3));
}
if (kind == CudnnConvKind::kForwardGraph) {
for (int k = 2; k < instr->operand_count(); ++k) {
if (!ShapeUtil::IsScalar(instr->operand(k)->shape())) {
TF_RETURN_IF_ERROR(SetOperandLayout(*output_shape, instr, k));
}
}
}
if (instr->operand_count() > 2 && kind != CudnnConvKind::kForwardActivation &&
kind != CudnnConvKind::kForwardGraph) {
return Internal(
"Invalid convolution. Conv has a side input, but kind is not fused "
"conv forward or graph conv foward: %s",
instr->ToString());
}
return absl::OkStatus();
}
namespace {
void SetFortranLayout(Shape* shape) {
LayoutUtil::SetToDefaultLayout(shape);
int n = shape->mutable_layout()->minor_to_major_size();
CHECK_GE(n, 2);
std::swap(shape->mutable_layout()->mutable_minor_to_major()->at(0),
shape->mutable_layout()->mutable_minor_to_major()->at(1));
}
bool DotCanSupportShapeWithLayout(const HloInstruction* dot,
const Shape& shape) {
const DotDimensionNumbers& dot_dims = dot->dot_dimension_numbers();
return MatrixLayout::For(shape, dot_dims.lhs_batch_dimensions().size(),
dot->operand(0)->shape().rank() -
dot_dims.lhs_contracting_dimensions().size() -
dot_dims.lhs_batch_dimensions().size(),
dot_dims.rhs_batch_dimensions().size(),
dot->operand(1)->shape().rank() -
dot_dims.rhs_contracting_dimensions().size() -
dot_dims.rhs_batch_dimensions().size())
.ok();
}
}
absl::Status GpuLayoutAssignment::AddBackendConstraints(
LayoutConstraints* constraints) {
auto post_order = constraints->computation()->MakeInstructionPostOrder();
for (auto iterator = post_order.rbegin(); iterator != post_order.rend();
++iterator) {
HloInstruction* instruction = *iterator;
if (IsCustomCallToDnnConvolution(*instruction)) {
TF_RETURN_IF_ERROR(AddBackendConstraintsToDnnConvCustomCall(
Cast<HloCustomCallInstruction>(instruction), constraints));
}
CHECK(!IsCublasGemm(*instruction))
<< "Gemm rewriting should run after layout assignment";
if (instruction->opcode() == HloOpcode::kDot) {
const Shape& output_shape = instruction->shape();
const Shape& lhs_shape = instruction->operand(0)->shape();
const Shape& rhs_shape = instruction->operand(1)->shape();
const DotDimensionNumbers& dot_dims =
instruction->dot_dimension_numbers();
absl::Span<const int64_t> lhs_batch_dims =
dot_dims.lhs_batch_dimensions();
absl::Span<const int64_t> lhs_contracting_dims =
dot_dims.lhs_contracting_dimensions();
TF_ASSIGN_OR_RETURN(std::vector<int64_t> lhs_non_contracting_dims,
GetNonContractingDims(lhs_shape, lhs_batch_dims,
lhs_contracting_dims));
absl::Span<const int64_t> rhs_batch_dims =
dot_dims.rhs_batch_dimensions();
absl::Span<const int64_t> rhs_contracting_dims =
dot_dims.rhs_contracting_dimensions();
TF_ASSIGN_OR_RETURN(std::vector<int64_t> rhs_non_contracting_dims,
GetNonContractingDims(rhs_shape, rhs_batch_dims,
rhs_contracting_dims));
const DebugOptions& debug_options =
instruction->GetModule()->config().debug_options();
bool is_bf16_to_bf16 =
(output_shape.element_type() == PrimitiveType::BF16 &&
lhs_shape.element_type() == PrimitiveType::BF16 &&
rhs_shape.element_type() == PrimitiveType::BF16);
bool is_s8_to_s32 = (output_shape.element_type() == PrimitiveType::S32 &&
lhs_shape.element_type() == PrimitiveType::S8 &&
rhs_shape.element_type() == PrimitiveType::S8 &&
output_shape.dimensions_size() == 2 &&
lhs_shape.dimensions_size() == 2 &&
rhs_shape.dimensions_size() == 2);
if (is_s8_to_s32 ||
(is_bf16_to_bf16 &&
debug_options.xla_gpu_ensure_minor_dot_contraction_dims())) {
TF_RETURN_IF_ERROR(SetOperandMajorToMinorLayout(
instruction, 0,
{lhs_batch_dims, lhs_non_contracting_dims, lhs_contracting_dims}));
TF_RETURN_IF_ERROR(SetOperandMajorToMinorLayout(
instruction, 1,
{rhs_batch_dims, rhs_non_contracting_dims, rhs_contracting_dims}));
TF_RETURN_IF_ERROR(SetDotLayout(instruction, constraints));
} else {
if (!lhs_batch_dims.empty() || lhs_contracting_dims.size() > 1 ||
lhs_non_contracting_dims.size() > 1) {
TF_RETURN_IF_ERROR(SetDotOperandLayout(instruction, 0, lhs_batch_dims,
lhs_contracting_dims,
lhs_non_contracting_dims));
}
if (!rhs_batch_dims.empty() || rhs_non_contracting_dims.size() > 1 ||
rhs_contracting_dims.size() > 1) {
TF_RETURN_IF_ERROR(SetDotOperandLayout(instruction, 1, rhs_batch_dims,
rhs_contracting_dims,
rhs_non_contracting_dims));
}
if (!lhs_batch_dims.empty() || lhs_non_contracting_dims.size() > 1 ||
rhs_non_contracting_dims.size() > 1) {
TF_RETURN_IF_ERROR(SetDotLayout(instruction, constraints));
}
}
} else if (instruction->opcode() == HloOpcode::kTranspose) {
const HloInstruction* operand = instruction->operand(0);
if ((operand->opcode() != HloOpcode::kDot) ||
(operand->user_count() > 1)) {
continue;
}
Shape shape = operand->shape();
*shape.mutable_layout() =
LayoutUtil::MakeLayoutFromMajorToMinor(instruction->dimensions());
if (DotCanSupportShapeWithLayout(operand, shape)) {
TF_RETURN_IF_ERROR(
SetOperandLayout(shape, instruction, 0));
}
} else if (instruction->opcode() == HloOpcode::kFft) {
Shape op0_shape = instruction->operand(0)->shape();
LayoutUtil::SetToDefaultLayout(&op0_shape);
Shape output_shape = instruction->shape();
LayoutUtil::SetToDefaultLayout(&output_shape);
TF_RETURN_IF_ERROR(SetOperandLayout(op0_shape, instruction, 0));
TF_RETURN_IF_ERROR(SetInstructionLayout(output_shape, instruction));
} else if (instruction->opcode() == HloOpcode::kSort &&
instruction->operand(0)->shape().rank() > 1) {
Shape keys_shape = instruction->operand(0)->shape();
Layout keys_layout =
LayoutUtil::GetDefaultLayoutForRank(keys_shape.rank());
for (int64_t i = 0; i < instruction->operand_count(); ++i) {
Shape shape = instruction->operand(i)->shape();
*shape.mutable_layout() = keys_layout;
TF_RETURN_IF_ERROR(SetOperandLayout(shape, instruction, i));
const LogicalBuffer* output_buffer;
if (instruction->shape().IsArray()) {
TF_ASSIGN_OR_RETURN(
output_buffer,
points_to_analysis_->GetBufferDefinedAt(instruction, {}));
} else {
TF_ASSIGN_OR_RETURN(
output_buffer,
points_to_analysis_->GetBufferDefinedAt(instruction, {i}));
}
TF_RETURN_IF_ERROR(SetBufferLayout(keys_layout, *output_buffer));
}
} else if (instruction->opcode() == HloOpcode::kTriangularSolve) {
Shape op0_shape = instruction->operand(0)->shape();
Shape op1_shape = instruction->operand(1)->shape();
Shape output_shape = instruction->shape();
SetFortranLayout(&op0_shape);
SetFortranLayout(&op1_shape);
SetFortranLayout(&output_shape);
TF_RETURN_IF_ERROR(SetOperandLayout(op0_shape, instruction, 0));
TF_RETURN_IF_ERROR(SetOperandLayout(op1_shape, instruction, 1));
TF_RETURN_IF_ERROR(SetInstructionLayout(output_shape, instruction));
} else if (instruction->opcode() == HloOpcode::kReduceScatter) {
auto ars = Cast<HloReduceScatterInstruction>(instruction);
TF_RETURN_IF_ERROR(SetInstructionLayout(
ShapeUtil::MoveDimToMajor(ars->shape(), ars->scatter_dimension()),
ars));
} else if (instruction->opcode() == HloOpcode::kAllGather) {
auto ag = Cast<HloAllGatherInstruction>(instruction);
TF_RETURN_IF_ERROR(SetInstructionLayout(
ShapeUtil::MoveDimToMajor(ag->shape(), ag->all_gather_dimension()),
ag));
} else if (instruction->opcode() == HloOpcode::kAllToAll &&
instruction->shape().IsArray()) {
auto* all_to_all = Cast<HloAllToAllInstruction>(instruction);
TF_RETURN_IF_ERROR(SetInstructionLayout(
ShapeUtil::MoveDimToMajor(all_to_all->shape(),
*all_to_all->split_dimension()),
all_to_all));
} else if (instruction->opcode() == HloOpcode::kSend) {
Shape s = instruction->operand(0)->shape();
LayoutUtil::SetToDefaultLayout(&s);
TF_RETURN_IF_ERROR(SetInstructionLayout(s, instruction->operand(0)));
TF_RETURN_IF_ERROR(
SetArrayOperandLayout(s.layout(), instruction->operand(0), 0));
} else if (instruction->opcode() == HloOpcode::kRecv) {
Shape s = instruction->shape();
ShapeUtil::ForEachMutableSubshape(
&s, [&](Shape* subshape, const ShapeIndex& index) {
LayoutUtil::SetToDefaultLayout(subshape);
});
TF_RETURN_IF_ERROR(SetInstructionLayout(s, instruction));
}
}
return absl::OkStatus();
}
absl::Status GpuLayoutAssignment::SetDotOperandLayout(
const HloInstruction* instruction, int64_t operand,
absl::Span<const int64_t> batch_dims, absl::Span<const int64_t> row_dims,
absl::Span<const int64_t> col_dims) {
Shape shape = instruction->operand(operand)->shape();
if (shape.has_layout() &&
MatrixLayout::For(shape, batch_dims, row_dims, col_dims).ok())
return SetOperandLayout(shape, instruction, operand);
LayoutUtil::SetToDefaultLayout(&shape);
if (MatrixLayout::For(shape, batch_dims, row_dims, col_dims).ok())
return SetOperandLayout(shape, instruction, operand);
return SetOperandMajorToMinorLayout(
instruction, operand,
{batch_dims, row_dims, col_dims});
}
absl::Status GpuLayoutAssignment::SetOperandMajorToMinorLayout(
const HloInstruction* instruction, int64_t operand,
std::initializer_list<absl::Span<const int64_t>> dim_groups) {
size_t size = 0;
for (auto group : dim_groups) size += group.size();
std::vector<int64_t> major_to_minor;
major_to_minor.reserve(size);
for (const auto& group : dim_groups) {
major_to_minor.insert(major_to_minor.end(), group.begin(), group.end());
}
Shape shape = instruction->operand(operand)->shape();
*shape.mutable_layout() =
LayoutUtil::MakeLayoutFromMajorToMinor(major_to_minor);
return SetOperandLayout(shape, instruction, operand);
}
absl::Status GpuLayoutAssignment::SetDotLayout(
const HloInstruction* instruction, LayoutConstraints* constraints) {
for (const HloInstruction* user : instruction->users()) {
for (int64_t i = 0; i < user->operand_count(); ++i) {
if (user->operand(i) != instruction) {
continue;
}
const ShapeLayout* constraint = constraints->OperandLayout(user, i);
if ((constraint != nullptr) &&
DotCanSupportShapeWithLayout(instruction, constraint->shape())) {
return SetInstructionLayout(constraint->shape(), instruction);
}
}
}
return SetInstructionLayout(
LayoutUtil::GetWithDefaultLayout(instruction->shape()), instruction);
}
bool GpuLayoutAssignment::PropagateReductionLayoutToOperand(
const HloInstruction* user) {
int64_t reduction_size = 1;
for (int64_t reduction_dim : user->dimensions()) {
reduction_size *= user->operand(0)->shape().dimensions(reduction_dim);
}
int64_t kept_dimension_size = ShapeUtil::ElementsIn(user->shape());
return IsUnnestedReductionFasterThanElemental(
{true, {1, kept_dimension_size, reduction_size}});
}
bool GpuLayoutAssignment::InstructionCanChangeLayoutInstance(
const HloInstruction* instruction) {
const HloCustomCallInstruction* custom_call =
DynCast<HloCustomCallInstruction>(instruction);
if (custom_call != nullptr &&
(custom_call->custom_call_target() ==
host_memory_offload_annotations::kMoveToHostCustomCallTarget ||
custom_call->custom_call_target() ==
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget)) {
return false;
}
return LayoutAssignment::InstructionCanChangeLayoutInstance(instruction);
}
}
} | #include "xla/service/gpu/gpu_layout_assignment.h"
#include <cstdint>
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/service/computation_layout.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_layout.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/dnn.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
using ::tsl::testing::IsOkAndHolds;
class LayoutAssignmentTest : public HloTestBase {
public:
se::CudaComputeCapability GetCudaComputeCapability() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
}
se::GpuComputeCapability GetGpuComputeCapability() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.gpu_compute_capability();
}
se::dnn::VersionInfo GetDnnVersion() {
return GetDnnVersionInfoOrDefault(backend().default_stream_executor(),
se::dnn::VersionInfo{8, 3, 0});
}
};
TEST_F(LayoutAssignmentTest, Elementwise) {
Shape ashape = ShapeUtil::MakeShape(F32, {42, 12});
Shape ashape_in_row_major(ashape);
Shape ashape_in_col_major(ashape);
*ashape_in_row_major.mutable_layout() = LayoutUtil::MakeLayout({1, 0});
*ashape_in_col_major.mutable_layout() = LayoutUtil::MakeLayout({0, 1});
for (const Shape& lhs_shape_with_layout :
{ashape_in_row_major, ashape_in_col_major}) {
for (const Shape& rhs_shape_with_layout :
{ashape_in_row_major, ashape_in_col_major}) {
for (const Shape& result_shape_with_layout :
{ashape_in_row_major, ashape_in_col_major}) {
auto builder = HloComputation::Builder(TestName());
auto x = builder.AddInstruction(
HloInstruction::CreateParameter(0, ashape, "x"));
auto y = builder.AddInstruction(
HloInstruction::CreateParameter(1, ashape, "y"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(ashape, HloOpcode::kAdd, x, y));
auto module = CreateNewVerifiedModule();
HloComputation* computation =
module->AddEntryComputation(builder.Build(add));
ComputationLayout computation_layout(
computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(lhs_shape_with_layout);
*computation_layout.mutable_parameter_layout(1) =
ShapeLayout(rhs_shape_with_layout);
*computation_layout.mutable_result_layout() =
ShapeLayout(result_shape_with_layout);
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(module.get()), IsOkAndHolds(true));
for (const HloInstruction* operand : add->operands()) {
EXPECT_TRUE(LayoutUtil::Equal(add->shape().layout(),
operand->shape().layout()));
}
}
}
}
}
TEST_F(LayoutAssignmentTest, DotLayoutUnchangedIfValid) {
const char* hlo_text = R"(
HloModule DotLayout
ENTRY dot {
p0 = f32[5,2,3]{1,2,0} parameter(0)
p1 = f32[5,3,4]{1,2,0} parameter(1)
ROOT dot.1330.10585 = f32[5,2,4]{2,1,0} dot(p0, p1),
lhs_batch_dims={0}, lhs_contracting_dims={2},
rhs_batch_dims={0}, rhs_contracting_dims={1}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
ComputationLayout computation_layout(
module->entry_computation()->ComputeProgramShape(),
false);
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Dot(m::Op().WithShape(F32, {5, 2, 3}, {1, 2, 0}),
m::Op().WithShape(F32, {5, 3, 4}, {1, 2, 0}))
.WithShape(F32, {5, 2, 4}, {2, 1, 0})));
}
TEST_F(LayoutAssignmentTest, DotLayoutSetToDefaultIfDefaultValid) {
const char* hlo_text = R"(
HloModule DotLayout
ENTRY dot {
p0 = f32[5,3,2] parameter(0)
p1 = f32[5,4,3]{0,1,2} parameter(1)
ROOT dot.1330.10585 = f32[5,2,4] dot(p0, p1),
lhs_batch_dims={0}, lhs_contracting_dims={1},
rhs_batch_dims={0}, rhs_contracting_dims={2}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
ComputationLayout computation_layout(
module->entry_computation()->ComputeProgramShape(),
false);
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Dot(m::Op().WithShape(F32, {5, 3, 2}, {2, 1, 0}),
m::Op().WithShape(F32, {5, 4, 3}, {2, 1, 0}))
.WithShape(F32, {5, 2, 4}, {2, 1, 0})));
}
TEST_F(LayoutAssignmentTest, DotOperandLayoutSetToBatchRowsColsOtherwise) {
const char* hlo_text = R"(
HloModule DotLayout
ENTRY dot {
p0 = f32[2,3,5]{2,1,0} parameter(0)
p1 = f32[3,4,5] parameter(1)
ROOT dot.1330.10585 = f32[5,2,4] dot(p0, p1),
lhs_batch_dims={2}, lhs_contracting_dims={1},
rhs_batch_dims={2}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
ComputationLayout computation_layout(
module->entry_computation()->ComputeProgramShape(),
false);
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Dot(m::Op().WithShape(F32, {2, 3, 5}, {0, 1, 2}),
m::Op().WithShape(F32, {3, 4, 5}, {1, 0, 2}))));
}
TEST_F(LayoutAssignmentTest, DotOperandInconsistentDimLayouts) {
const char* hlo_text = R"(
HloModule DotLayout
ENTRY dot {
p0 = f32[5,6,2,3] parameter(0)
p1 = f32[6,5,3,4] parameter(1)
ROOT dot.1330.10585 = f32[5,6,2,4] dot(p0, p1),
lhs_batch_dims={0,1}, lhs_contracting_dims={3},
rhs_batch_dims={1,0}, rhs_contracting_dims={2}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
ComputationLayout computation_layout(
module->entry_computation()->ComputeProgramShape(),
false);
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Dot(m::Op().WithShape(F32, {5, 6, 2, 3}, {3, 2, 1, 0}),
m::Op().WithShape(F32, {6, 5, 3, 4}, {3, 2, 0, 1}))));
}
TEST_F(LayoutAssignmentTest, TransposedDotLayout) {
const char* hlo_text = R"(
HloModule DotLayout
ENTRY dot {
p0 = f32[5,2,3] parameter(0)
p1 = f32[5,3,4,6] parameter(1)
dot = f32[5,2,4,6] dot(p0, p1),
lhs_batch_dims={0}, lhs_contracting_dims={2},
rhs_batch_dims={0}, rhs_contracting_dims={1}
ROOT out = f32[2,5,4,6] transpose(dot), dimensions={1,0,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
ComputationLayout computation_layout(
module->entry_computation()->ComputeProgramShape(),
false);
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Transpose(
m::Dot(m::Op().WithShape(F32, {5, 2, 3}, {2, 1, 0}),
m::Op().WithShape(F32, {5, 3, 4, 6}, {3, 2, 1, 0}))
.WithShape(F32, {5, 2, 4, 6}, {3, 2, 0, 1}))
.WithShape(F32, {2, 5, 4, 6}, {3, 2, 1, 0})));
}
TEST_F(LayoutAssignmentTest, TransposedDotOfDotLayout) {
const char* hlo_text = R"(
HloModule DotLayout
ENTRY dot {
p0 = f32[8,50] parameter(0)
p1 = f32[2,8,4,4] parameter(1)
p2 = f32[4,38] parameter(2)
dot.1 = f32[50,2,4,4]{3,2,1,0} dot(p0, p1),
lhs_contracting_dims={0}, rhs_contracting_dims={1}
dot.2 = f32[50,2,4,38]{3,2,1,0} dot(dot.1, p2),
lhs_contracting_dims={2}, rhs_contracting_dims={0}
ROOT out = f32[2,50,38,4]{2,3,0,1} transpose(dot.2), dimensions={1,0,3,2}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
ComputationLayout computation_layout(
module->entry_computation()->ComputeProgramShape(),
false);
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Transpose(
m::Dot(m::Copy(m::Dot(m::Op().WithShape(F32, {8, 50}, {1, 0}),
m::Op().WithShape(F32, {2, 8, 4, 4},
{3, 2, 0, 1}))
.WithShape(F32, {50, 2, 4, 4}, {3, 2, 1, 0}))
.WithShape(F32, {50, 2, 4, 4}, {3, 1, 0, 2}),
m::Op().WithShape(F32, {4, 38}, {1, 0}))
.WithShape(F32, {50, 2, 4, 38}, {3, 2, 1, 0}))
.WithShape(F32, {2, 50, 38, 4}, {2, 3, 0, 1})));
}
TEST_F(LayoutAssignmentTest, DotLayoutS8) {
const char* hlo_text = R"(
HloModule DotLayout
ENTRY int8_t {
p0 = s8[32,64] parameter(0)
p1 = s8[64,96] parameter(1)
ROOT out = s32[32,96] dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
ComputationLayout computation_layout(
module->entry_computation()->ComputeProgramShape(),
false);
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Dot(m::Op().WithShape(S8, {32, 64}, {1, 0}),
m::Op().WithShape(S8, {64, 96}, {0, 1}))));
}
TEST_F(LayoutAssignmentTest, SortLayout) {
const char* hlo_text = R"(
HloModule SortLayout
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
p.1.lhs = f32[] parameter(2)
p.1.rhs = f32[] parameter(3)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY sort {
keys = f32[3,2]{0,1} constant({{0,1},{0,1},{0,1}})
values = f32[2,3]{1,0} parameter(0)
transpose = f32[3,2]{1,0} transpose(values), dimensions={1,0}
ROOT sort = (f32[3,2]{1,0}, f32[3,2]{1,0}) sort(keys, transpose),
dimensions={1}, to_apply=compare
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
ComputationLayout computation_layout(
module->entry_computation()->ComputeProgramShape(),
false);
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Sort(m::Op().WithShape(F32, {3, 2}, {1, 0}),
m::Op().WithShape(F32, {3, 2}, {1, 0}))));
}
TEST_F(LayoutAssignmentTest, FftLayout) {
const char* hlo_text = R"(
HloModule Fft_module
ENTRY Fft {
input = c64[8,32]{0,1} parameter(0)
fft = c64[8,32] fft(input), fft_type=FFT, fft_length={32}
ROOT transpose = c64[32,8] transpose(fft), dimensions={1,0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
ComputationLayout computation_layout(
module->entry_computation()->ComputeProgramShape(),
false);
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(
m::Transpose(m::Fft(m::Op().WithShape(C64, {8, 32}, {1, 0}))
.WithShape(C64, {8, 32}, {1, 0})))));
}
TEST_F(LayoutAssignmentTest, CustomCallConstrainedAlias) {
const char* module_str = R"(
HloModule TestModule
ENTRY entry {
Arg_0 = f32[2,5,5]{2,1,0} parameter(0)
Arg_1 = f32[2,5,5]{2,1,0} parameter(1)
Arg_2 = f32[2,5,5]{2,1,0} parameter(2)
dot.0 = f32[2,5,5]{2,1,0} dot(Arg_1, Arg_2), lhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_batch_dims={0}, rhs_contracting_dims={2}, operand_precision={highest,highest}
custom-call.0 = (f32[2,5,5]{1,2,0}, s8[16]{0}, s8[16]{0}) custom-call(Arg_0, dot.0), custom_call_target="dummy_call", operand_layout_constraints={f32[2,5,5]{1,2,0}, f32[2,5,5]{1,2,0}}, output_to_operand_aliasing={{0}: (1, {})}
ROOT get-tuple-element.0 = f32[2,5,5]{1,2,0} get-tuple-element(custom-call.0), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(module_str));
ComputationLayout computation_layout(
m->entry_computation()->ComputeProgramShape());
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(m.get()), IsOkAndHolds(true));
const HloInstruction* call_0 = FindInstruction(m.get(), "custom-call.0");
auto expect_layout = [](const Shape& shape,
absl::Span<const int64_t> minor_to_major) {
const Layout expected = LayoutUtil::MakeLayout(minor_to_major);
EXPECT_TRUE(LayoutUtil::Equal(shape.layout(), expected))
<< "Expected layout " << expected << ", actual " << shape.layout();
};
expect_layout(ShapeUtil::GetSubshape(call_0->shape(), {0}), {1, 2, 0});
expect_layout(call_0->operand(0)->shape(), {1, 2, 0});
expect_layout(call_0->operand(1)->shape(), {1, 2, 0});
}
TEST_F(LayoutAssignmentTest, MoveToHostCustomCallConstrained) {
const char* module_str = R"(
HloModule TestModule
ENTRY entry {
Arg_0 = f32[2,5,5]{2,1,0} parameter(0)
custom-call.0 = f32[2,5,5] custom-call(Arg_0), custom_call_target="MoveToHost"
ROOT custom-call.1 = f32[2,5,5]{2, 1, 0} custom-call(custom-call.0), custom_call_target="fixed_call", operand_layout_constraints={f32[2,5,5]{1,2,0}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(module_str));
ComputationLayout computation_layout(
m->entry_computation()->ComputeProgramShape());
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(m.get()), IsOkAndHolds(true));
const HloInstruction* call_0 = FindInstruction(m.get(), "custom-call.0");
const Layout input_layout = call_0->operand(0)->shape().layout();
const Layout output_layout = call_0->shape().layout();
EXPECT_TRUE(LayoutUtil::Equal(input_layout, output_layout))
<< "Expected the same input/output layouts. Input: " << input_layout
<< ". Output: " << output_layout;
}
TEST_F(LayoutAssignmentTest, MoveToDeviceCustomCallConstrained) {
const char* module_str = R"(
HloModule TestModule
ENTRY entry {
Arg_0 = f32[2,5,5]{2,1,0} parameter(0)
custom-call.0 = f32[2,5,5] custom-call(Arg_0), custom_call_target="MoveToDevice"
ROOT custom-call.1 = f32[2,5,5]{2, 1, 0} custom-call(custom-call.0), custom_call_target="fixed_call", operand_layout_constraints={f32[2,5,5]{1,2,0}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(module_str));
ComputationLayout computation_layout(
m->entry_computation()->ComputeProgramShape());
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(m.get()), IsOkAndHolds(true));
const HloInstruction* call_0 = FindInstruction(m.get(), "custom-call.0");
const Layout input_layout = call_0->operand(0)->shape().layout();
const Layout output_layout = call_0->shape().layout();
EXPECT_TRUE(LayoutUtil::Equal(input_layout, output_layout))
<< "Expected the same input/output layouts. Input: " << input_layout
<< ". Output: " << output_layout;
}
TEST_F(LayoutAssignmentTest, ConvCuDNNF8) {
if (!GetCudaComputeCapability().IsAtLeast(
se::CudaComputeCapability::HOPPER)) {
GTEST_SKIP() << "FP8 convolutions require HOPPER or newer archiecture.";
}
const char* hlo = R"(
HloModule jit_conv_general_dilated
ENTRY main.4 {
Arg_0 = f8e4m3fn[1,64,64,16]{3,2,1,0} parameter(0)
Arg_1 = f8e4m3fn[3,3,16,32]{3,2,1,0} parameter(1)
ROOT conv = f8e4m3fn[1,64,64,32]{3,2,1,0} convolution(Arg_0, Arg_1), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f
}
)";
MatchOptimizedHlo(hlo, R"(
)");
}
TEST_F(LayoutAssignmentTest, ConvCuDNNBF16) {
if (!GetCudaComputeCapability().IsAtLeast(
se::CudaComputeCapability::AMPERE)) {
GTEST_SKIP() << "Conv with Bfloat16 uses NHWC layout for "
"architectures with Tensor Cores.";
}
const char* hlo = R"(
HloModule jit_conv_general_dilated
ENTRY main.4 {
Arg_0.1 = bf16[1,64,64,16]{3,2,1,0} parameter(0), sharding={replicated}
Arg_1.2 = bf16[3,3,16,32]{3,2,1,0} parameter(1), sharding={replicated}
ROOT convolution.3 = bf16[1,64,64,32]{3,2,1,0} convolution(Arg_0.1, Arg_1.2), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, metadata={op_name="jit(conv_general_dilated)/jit(main)/conv_general_dilated[window_strides=(1, 1) padding=((1, 1), (1, 1)) lhs_dilation=(1, 1) rhs_dilation=(1, 1) dimension_numbers=ConvDimensionNumbers(lhs_spec=(0, 3, 1, 2), rhs_spec=(3, 2, 0, 1), out_spec=(0, 3, 1, 2)) feature_group_count=1 batch_group_count=1 lhs_shape=(1, 64, 64, 16) rhs_shape=(3, 3, 16, 32) precision=None preferred_element_type=None]" source_file="/usr/local/lib/python3.8/dist-packages/flax/linen/linear.py" source_line=438}
}
)";
MatchOptimizedHlo(hlo, R"(
)");
}
TEST_F(LayoutAssignmentTest, ConvCuDNNFP16) {
if (!GetCudaComputeCapability().IsAtLeast(se::CudaComputeCapability::VOLTA)) {
GTEST_SKIP() << "Conv with FP16 uses NHWC layout for "
"architectures with Tensor Cores.";
}
const char* hlo = R"(
HloModule jit_conv_general_dilated
ENTRY main.4 {
Arg_0.1 = f16[1,64,64,16]{3,2,1,0} parameter(0), sharding={replicated}
Arg_1.2 = f16[3,3,16,32]{3,2,1,0} parameter(1), sharding={replicated}
ROOT convolution.3 = f16[1,64,64,32]{3,2,1,0} convolution(Arg_0.1, Arg_1.2), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f
}
)";
MatchOptimizedHlo(hlo, R"(
)");
}
TEST_F(LayoutAssignmentTest, ReduceOperandLayout) {
const char* module_str = R"(
scalar_add_computation {
scalar_lhs = c64[] parameter(0)
scalar_rhs = c64[] parameter(1)
ROOT add.1 = c64[] add(scalar_lhs, scalar_rhs)
}
ENTRY main {
param_0 = c64[512,64,1024,32,128]{4,3,2,1,0} parameter(0)
negate = c64[512,64,1024,32,128]{4,3,2,1,0} negate(param_0)
constant_7 = c64[] constant((0, 0))
ROOT reduce.2 = c64[512,1024,128]{2,1,0} reduce(negate, constant_7), dimensions={1,3}, to_apply=scalar_add_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(module_str));
ComputationLayout computation_layout(
m->entry_computation()->ComputeProgramShape());
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(m.get()), IsOkAndHolds(true));
auto reduce = m->entry_computation()->root_instruction();
EXPECT_EQ(reduce->operand(0)->shape().layout().minor_to_major(),
LayoutUtil::MakeLayout({3, 1, 4, 2, 0}).minor_to_major());
}
TEST_F(LayoutAssignmentTest, ReduceOperandLayoutDivisorOfWarpSize) {
const char* module_str = R"(
scalar_add_computation {
scalar_lhs = c64[] parameter(0)
scalar_rhs = c64[] parameter(1)
ROOT add.1 = c64[] add(scalar_lhs, scalar_rhs)
}
ENTRY main {
param_0 = c64[512,16,1024,128]{3,2,1,0} parameter(0)
negate = c64[512,16,1024,128]{3,2,1,0} negate(param_0)
constant_7 = c64[] constant((0, 0))
ROOT reduce.2 = c64[512,1024,128]{2,1,0} reduce(negate, constant_7), dimensions={1}, to_apply=scalar_add_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(module_str));
ComputationLayout computation_layout(
m->entry_computation()->ComputeProgramShape());
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(m.get()), IsOkAndHolds(true));
auto reduce = m->entry_computation()->root_instruction();
EXPECT_EQ(reduce->operand(0)->shape().layout().minor_to_major(),
LayoutUtil::MakeLayout({1, 3, 2, 0}).minor_to_major());
}
TEST_F(LayoutAssignmentTest, SendRcvLayout) {
const char* hlo = R"(
HloModule Module
condition {
p = (f32[100,100], (f32[100,100], u32[], token[])) parameter(0)
ROOT lt = pred[] constant(1)
}
body {
p = (f32[100,100], (f32[100,100], u32[], token[])) parameter(0)
t1 = f32[100,100] get-tuple-element(p), index=0
t = (f32[100,100], u32[], token[]) get-tuple-element(p), index=1
sdone = token[] send-done(t), channel_id=3, frontend_attributes={
_xla_send_recv_pipeline="0"
}
tk = token[] after-all()
rcvd = (f32[100,100]{0,1}, u32[], token[]) recv(tk), channel_id=2
zz = (f32[100,100]{0,1}, token[]) recv-done(rcvd), channel_id=2
rcvd_d = get-tuple-element(zz), index=0
snd = (f32[100,100]{0,1}, u32[], token[]) send(t1, tk), channel_id=3, frontend_attributes={
_xla_send_recv_pipeline="0"
}
a = add(t1, t1)
b = add(rcvd_d, a)
ROOT tup = tuple(b, snd)
}
ENTRY %main {
p0 = f32[100,100] parameter(0)
tk = token[] after-all()
snd = (f32[100,100]{0,1}, u32[], token[]) send(p0, tk), channel_id=1, frontend_attributes={
_xla_send_recv_pipeline="0"
}
t = tuple(p0, snd)
ROOT loop = while(t), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo));
ComputationLayout computation_layout(
m->entry_computation()->ComputeProgramShape());
RunAndFilecheckHloRewrite(
hlo,
GpuLayoutAssignment{&computation_layout, GetGpuComputeCapability(),
GetDnnVersion()},
R"(
)");
}
}
}
} |
2,034 | cpp | tensorflow/tensorflow | split_k_gemm_rewriter | third_party/xla/xla/service/gpu/split_k_gemm_rewriter.cc | third_party/xla/xla/service/gpu/split_k_gemm_rewriter_test.cc | #ifndef XLA_SERVICE_GPU_SPLIT_K_GEMM_REWRITER_H_
#define XLA_SERVICE_GPU_SPLIT_K_GEMM_REWRITER_H_
#include <cstdint>
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/matmul_utils.h"
namespace xla {
namespace gpu {
bool HasDivisibleSuffixAllowingSplit(absl::Span<int64_t const> span,
int64_t divisor);
absl::Status MakeDotSplitKBatch(HloInstruction* dot_fusion,
const TritonGemmConfig& config);
}
}
#endif
#include "xla/service/gpu/split_k_gemm_rewriter.h"
#include <cmath>
#include <cstdint>
#include <iterator>
#include <stack>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/layout.h"
#include "xla/literal_util.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/triton_fusion_analysis.h"
#include "xla/service/gpu/triton_support.h"
#include "xla/service/gpu/triton_tiling_propagation.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
bool HasDivisibleSuffixAllowingSplit(const absl::Span<int64_t const> span,
const int64_t divisor) {
CHECK_GE(divisor, 1);
int64_t product = 1;
for (auto it = span.crbegin(); it != span.crend(); ++it) {
product *= *it;
if (product % divisor == 0) {
return true;
}
if (divisor % product != 0) {
return false;
}
}
return false;
}
namespace {
void CopyIncrementingAboveThreshold(
const tsl::protobuf::RepeatedField<int64_t>& source,
tsl::protobuf::RepeatedField<int64_t>& destination, const int threshold) {
destination.Reserve(source.size());
for (int64_t x : source) {
if (x >= threshold) {
++x;
}
destination.Add(x);
}
}
void CopyIncrementingAboveThreshold(absl::Span<const int64_t> source,
DimensionVector& destination,
const int threshold) {
destination.reserve(source.size());
for (int64_t x : source) {
if (x >= threshold) {
++x;
}
destination.push_back(x);
}
}
absl::Status UncompilableMatmul(absl::string_view explanation) {
absl::Status s = absl::CancelledError(explanation);
s.SetPayload(kUncompilableFusion, absl::Cord(explanation));
return s;
}
absl::StatusOr<HloInstruction*> MakeSparseMetaOperand(
HloDotInstruction& dot, const TritonGemmConfig& config) {
CHECK_EQ(dot.sparse_operands(), 1);
CHECK_EQ(dot.sparsity().front().index(), 0);
HloInstruction* meta = dot.mutable_operand(2);
const Shape& shape = meta->shape();
if (shape.dimensions().back() % config.split_k != 0) {
return UncompilableMatmul("Sparsity metadata has incorrect shape.");
}
std::vector<int64_t> dimensions(shape.dimensions().begin(),
shape.dimensions().end() - 1);
dimensions.push_back(config.split_k);
dimensions.push_back(shape.dimensions().back() / config.split_k);
Shape new_shape = ShapeUtil::MakeShapeWithDescendingLayout(
shape.element_type(), dimensions);
return MakeBitcastHlo(meta, new_shape);
}
}
absl::StatusOr<HloInstruction*> MakeSplitKOperand(
HloInstruction& dot, const TritonFusionAnalysis& analysis,
const TritonGemmConfig& config, const int64_t contracting_dim_idx,
const int operand_number) {
HloInstruction* operand = dot.mutable_operand(operand_number);
const int64_t k = operand->shape().dimensions(contracting_dim_idx);
const bool need_padding = k % config.split_k != 0;
TritonFusionAnalysis::Scope scope = (operand_number == 0)
? TritonFusionAnalysis::Scope::LHS
: TritonFusionAnalysis::Scope::RHS;
auto check_if_supported = [&](const HloInstruction& hlo,
bool check_divisibility) {
const TensorIterationSpec::DimIterationSpec* spec =
analysis.IterSpec(scope, &hlo, contracting_dim_idx);
if (spec == nullptr) {
return absl::OkStatus();
}
if (spec->size() != 1) {
return UncompilableMatmul("Unsupported case.");
}
const TensorIterationSpec::IterationSpecFragment& fragment = spec->at(0);
if (fragment.is_sliced()) {
return UncompilableMatmul(
"Sliced contracting dimension is not supported yet.");
}
if (check_divisibility && !HasDivisibleSuffixAllowingSplit(
fragment.subfragments, config.split_k)) {
return UncompilableMatmul("Contracting dimension is too fragmented.");
}
if (config.split_k > ceil(1.0 * fragment.count / config.block_k)) {
return UncompilableMatmul(
"Too small divisible part of the contracting dimension.");
}
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(
check_if_supported(*operand, !need_padding));
for (const HloInstruction* param : analysis.ScopeParameters(scope)) {
TF_RETURN_IF_ERROR(
check_if_supported(*param, !need_padding));
}
if (need_padding) {
HloInstruction* const zero =
dot.parent()->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(operand->shape().element_type())));
PaddingConfig padding_config = MakeNoPaddingConfig(operand->shape().rank());
padding_config.mutable_dimensions(contracting_dim_idx)
->set_edge_padding_high(config.split_k - k % config.split_k);
TF_ASSIGN_OR_RETURN(HloInstruction * pad,
MakePadHlo(operand, zero, padding_config));
*pad->mutable_shape()->mutable_layout() = operand->shape().layout();
operand = pad;
}
CHECK_GE(operand->shape().dimensions(contracting_dim_idx), config.split_k);
const Shape& shape = operand->shape();
Shape new_shape(shape.element_type(), {}, {}, {});
for (int i = 0; i < shape.rank(); ++i) {
const int64_t dimension_size = shape.dimensions(i);
if (i == contracting_dim_idx) {
new_shape.add_dimensions(config.split_k);
new_shape.add_dimensions(dimension_size / config.split_k);
} else {
new_shape.add_dimensions(dimension_size);
}
}
Layout* new_layout = new_shape.mutable_layout();
for (int64_t logical_dim_idx : shape.layout().minor_to_major()) {
if (logical_dim_idx >= contracting_dim_idx) {
new_layout->add_minor_to_major(logical_dim_idx + 1);
}
if (logical_dim_idx <= contracting_dim_idx) {
new_layout->add_minor_to_major(logical_dim_idx);
}
}
return MakeBitcastHlo(operand, new_shape);
}
absl::Status MakeDotComputationSplitKBatch(
HloComputation* computation, const TritonGemmConfig& config,
bool disable_reduced_precision_reduction) {
HloDotInstruction* dot = Cast<HloDotInstruction>(
hlo_query::GetFirstInstructionWithOpcode(*computation, HloOpcode::kDot));
TF_ASSIGN_OR_RETURN(const auto analysis,
TritonFusionAnalysis::Execute(*computation));
const DotDimensionNumbers& old_dim_numbers = dot->dot_dimension_numbers();
DotDimensionNumbers new_dim_numbers;
TF_ASSIGN_OR_RETURN(const int64_t lhs_contracting_idx,
ContractingDimensionIndex(*dot, 0));
CopyIncrementingAboveThreshold(
old_dim_numbers.lhs_contracting_dimensions(),
*new_dim_numbers.mutable_lhs_contracting_dimensions(),
lhs_contracting_idx);
new_dim_numbers.mutable_lhs_batch_dimensions()->Add(lhs_contracting_idx);
CopyIncrementingAboveThreshold(
old_dim_numbers.lhs_batch_dimensions(),
*new_dim_numbers.mutable_lhs_batch_dimensions(), lhs_contracting_idx);
TF_ASSIGN_OR_RETURN(const int64_t rhs_contracting_idx,
ContractingDimensionIndex(*dot, 1));
CopyIncrementingAboveThreshold(
old_dim_numbers.rhs_contracting_dimensions(),
*new_dim_numbers.mutable_rhs_contracting_dimensions(),
rhs_contracting_idx);
new_dim_numbers.mutable_rhs_batch_dimensions()->Add(rhs_contracting_idx);
CopyIncrementingAboveThreshold(
old_dim_numbers.rhs_batch_dimensions(),
*new_dim_numbers.mutable_rhs_batch_dimensions(), rhs_contracting_idx);
if (dot->sparse_operands()) {
if (dot->sparsity().size() != 1 || dot->sparsity().front().index() != 0) {
return UncompilableMatmul("Sparsity is only supported on left operand.");
}
}
std::stack<HloInstruction*> to_process;
absl::flat_hash_set<HloInstruction*> to_process_set;
HloInstruction* current = dot;
do {
to_process.push(current);
CHECK(to_process_set.insert(current).second);
if (current->users().empty()) {
break;
}
CHECK_EQ(current->user_count(), 1);
current = current->users()[0];
if (!legacy_triton::IsDistributiveOverAddition(*current)) {
return Cancelled("Operation non-distributive over addition after dot.");
}
} while (true);
bool did_pad = false;
while (!to_process.empty()) {
HloInstruction* current = to_process.top();
to_process.pop();
HloInstruction* expanded;
if (current == dot) {
TF_ASSIGN_OR_RETURN(
HloInstruction * lhs,
MakeSplitKOperand(*dot, analysis, config, lhs_contracting_idx, 0));
TF_ASSIGN_OR_RETURN(
HloInstruction * rhs,
MakeSplitKOperand(*dot, analysis, config, rhs_contracting_idx, 1));
if (lhs->operand(0)->opcode() == HloOpcode::kPad) {
CHECK_EQ(rhs->operand(0)->opcode(), HloOpcode::kPad);
did_pad = true;
}
std::vector<SparsityDescriptor> sparsity(dot->sparsity().begin(),
dot->sparsity().end());
std::vector<HloInstruction*> sparse_meta(sparsity.size());
for (int i = 0; i < sparsity.size(); ++i) {
sparsity[i].set_dimension(sparsity[i].dimension() + 1);
TF_ASSIGN_OR_RETURN(sparse_meta[i],
MakeSparseMetaOperand(*dot, config));
}
expanded = MakeDotHlo(lhs, rhs, new_dim_numbers, dot->precision_config(),
dot->shape().element_type(), sparsity, sparse_meta)
.value();
expanded->mutable_shape()->mutable_layout()->clear_minor_to_major();
CopyIncrementingAboveThreshold(dot->shape().layout().minor_to_major(),
*expanded->mutable_shape()
->mutable_layout()
->mutable_minor_to_major(),
0);
expanded->mutable_shape()->mutable_layout()->add_minor_to_major(0);
dot->SetupDerivedInstruction(expanded);
} else {
expanded = computation->AddInstruction(current->CloneWithNewShape(
ShapeUtil::PrependMajorDimension(config.split_k, current->shape())));
if (expanded->opcode() == HloOpcode::kTranspose) {
const auto* old_transpose = Cast<HloTransposeInstruction>(current);
auto* new_transpose = Cast<HloTransposeInstruction>(expanded);
new_transpose->mutable_dimensions()->clear();
new_transpose->mutable_dimensions()->reserve(
new_transpose->shape().rank());
new_transpose->mutable_dimensions()->push_back(0);
for (const int64_t dim : old_transpose->dimensions()) {
new_transpose->mutable_dimensions()->push_back(dim + 1);
}
}
}
TF_RETURN_IF_ERROR(current->ReplaceAllUsesWithDifferentShape(expanded));
TF_RETURN_IF_ERROR(computation->RemoveInstruction(current));
if (current == dot) {
continue;
}
for (int i = 0; i < expanded->operands().size(); ++i) {
HloInstruction* operand = expanded->mutable_operand(i);
if (!to_process_set.contains(operand)) {
std::vector<int64_t> broadcast_dimensions(operand->shape().rank());
absl::c_iota(broadcast_dimensions, 1);
TF_RETURN_IF_ERROR(expanded->ReplaceOperandWithDifferentShape(
i, MakeBroadcastHlo(operand, broadcast_dimensions,
ShapeUtil::PrependMajorDimension(
config.split_k, operand->shape()))));
}
}
}
if (disable_reduced_precision_reduction) {
PrimitiveType output_type =
computation->root_instruction()->shape().element_type();
PrimitiveType accumulator_type = output_type == PrimitiveType::F64
? PrimitiveType::F64
: PrimitiveType::F32;
computation->root_instruction()->mutable_shape()->set_element_type(
accumulator_type);
}
if (did_pad) {
TF_RETURN_IF_ERROR(
TritonFusionAnalysis::Execute(*computation, config.split_k).status());
}
return absl::OkStatus();
}
absl::Status MakeDotSplitKBatch(HloInstruction* dot_fusion,
const TritonGemmConfig& config) {
CHECK_EQ(dot_fusion->opcode(), HloOpcode::kFusion);
if (dot_fusion->shape().IsTuple()) {
return Unimplemented("Tuple output is not supported with split-K yet.");
}
const bool disable_reduced_precision_reduction =
dot_fusion->GetModule()
->config()
.debug_options()
.xla_gpu_triton_gemm_disable_reduced_precision_reduction();
const PrimitiveType output_type = dot_fusion->shape().element_type();
const Layout output_layout = dot_fusion->shape().layout();
TF_RETURN_IF_ERROR(MakeDotComputationSplitKBatch(
dot_fusion->fused_instructions_computation(), config,
disable_reduced_precision_reduction));
const HloInstruction* root = dot_fusion->fused_expression_root();
*dot_fusion->mutable_shape() = root->shape();
HloInstruction* zero =
dot_fusion->parent()->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(root->shape().element_type())));
TF_ASSIGN_OR_RETURN(HloInstruction * reduce,
MakeReduceHlo(dot_fusion, zero, {0},
HloOpcode::kAdd, &dot_fusion->metadata()));
*reduce->mutable_shape()->mutable_layout() = output_layout;
if (dot_fusion->IsRoot()) {
dot_fusion->parent()->set_root_instruction(reduce,
true);
} else {
TF_RETURN_IF_ERROR(dot_fusion->ReplaceAllUsesWithDifferentShape(reduce));
}
if (disable_reduced_precision_reduction) {
HloInstruction* convert = MakeConvertToHlo(reduce, output_type);
if (reduce->IsRoot()) {
reduce->parent()->set_root_instruction(convert,
true);
} else {
TF_RETURN_IF_ERROR(reduce->ReplaceAllUsesWithDifferentShape(convert));
}
}
return absl::OkStatus();
}
}
} | #include "xla/service/gpu/split_k_gemm_rewriter.h"
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/triton_fusion_analysis.h"
#include "xla/service/hlo_verifier.h"
#include "xla/service/layout_assignment.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
using ::testing::FieldsAre;
namespace m = ::xla::match;
TEST(HasDivisibleSuffixAllowingSplitTest, AllTests) {
EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({1}, 1));
EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({2}, 2));
EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({2, 2}, 2));
EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({3, 2}, 6));
EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({2, 3, 2}, 6));
EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({15, 2}, 6));
EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({3, 15, 2}, 6));
EXPECT_FALSE(HasDivisibleSuffixAllowingSplit({}, 1));
EXPECT_FALSE(HasDivisibleSuffixAllowingSplit({1}, 2));
EXPECT_FALSE(HasDivisibleSuffixAllowingSplit({3}, 2));
EXPECT_FALSE(HasDivisibleSuffixAllowingSplit({2, 3}, 2));
}
using SplitKTest = HloTestBase;
TEST_F(SplitKTest, MakeSplitK) {
const std::string hlo_text = R"(
HloModule t
triton_gemm_dot {
parameter_0 = s8[3,128,5,32]{3,2,1,0} parameter(0)
bitcast.1 = s8[3,5,32,128]{2,1,3,0} bitcast(parameter_0)
copy.1 = s8[3,5,32,128]{3,2,1,0} copy(bitcast.1)
reshape.5 = s8[480,128]{1,0} reshape(copy.1)
convert.8 = bf16[480,128]{1,0} convert(reshape.5)
parameter_1 = bf16[16,128]{1,0} parameter(1)
ROOT dot.0 = bf16[480,16]{1,0} dot(convert.8, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[3,128,5,32]{3,2,1,0} parameter(0)
p1 = bf16[16,128]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm",
metadata={op_name="foo"}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(16, 16, 16, 4, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kReduce);
EXPECT_EQ(root->metadata().op_name(), "foo");
}
TEST_F(SplitKTest, MakeSplitKWithOutputFusion) {
const std::string hlo_text = R"(
HloModule t
triton_gemm_dot {
p0 = f16[480,128]{1,0} parameter(0)
p1 = f16[16,128]{1,0} parameter(1)
d = f16[480,16]{1,0} dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
c = bf16[] constant(123)
n = bf16[] negate(c)
bc = bf16[480,16]{1,0} broadcast(n)
cv = bf16[480,16]{1,0} convert(d)
ROOT a = bf16[480,16]{1,0} multiply(bc, cv)
}
ENTRY e {
p0 = f16[480,128]{1,0} parameter(0)
p1 = f16[16,128]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(16, 16, 16, 4, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kReduce);
}
TEST_F(SplitKTest, PreventSplitKWithNonDistributiveOperations) {
const std::string hlo_text = R"(
HloModule t
triton_gemm_dot {
p0 = f16[480,128]{1,0} parameter(0)
p1 = f16[16,128]{1,0} parameter(1)
d = f16[480,16]{1,0} dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
c = f32[480,16]{1,0} convert(d)
ROOT s = f32[480,16]{1,0} tanh(c)
}
ENTRY e {
p0 = f16[480,128]{1,0} parameter(0)
p1 = f16[16,128]{1,0} parameter(1)
ROOT fusion = f32[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(16, 16, 16, 4, 1, 4);
EXPECT_THAT(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config),
tsl::testing::StatusIs(
tsl::error::CANCELLED,
absl::StrFormat(
"Operation non-distributive over addition after dot.")));
}
TEST_F(SplitKTest, MakeSplitKWithNonDivisibleDimensionSize) {
constexpr absl::string_view kHloText = R"(
t {
c1 = s32[] constant(1)
bc1 = s32[31]{0} broadcast(c1), dimensions={}
p0 = s32[31]{0} parameter(0)
cmp = pred[31]{0} compare(bc1, p0), direction=EQ
cvt = f32[31]{0} convert(cmp)
bc2 = f32[17,31]{1,0} broadcast(cvt), dimensions={1}
c0 = f32[] constant(0)
bc0 = f32[17,16]{1,0} broadcast(c0), dimensions={}
ROOT dot = f32[31,16]{1,0} dot(bc2, bc0),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = s32[31]{0} parameter(0)
ROOT r = f32[31,16]{1,0} fusion(p0),
kind=kCustom, calls=t, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TritonGemmConfig config(16, 16, 16, 2, 1, 2);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
}
TEST_F(SplitKTest, AvoidSplitKWithSlicedContractingDimension) {
const std::string hlo_text = R"(
t {
p0 = f16[32,1234] parameter(0)
s0 = f16[32,256] slice(p0), slice={[0:32], [41:297]}
p1 = f16[256,768] parameter(1)
ROOT d = f16[32,768] dot(s0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f16[32,1234] parameter(0)
p1 = f16[256,768] parameter(1)
ROOT r = f16[32,768] fusion(p0, p1),
kind=kCustom, calls=t, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(16, 16, 16, 2, 1, 2);
EXPECT_THAT(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config),
tsl::testing::StatusIs(
tsl::error::CANCELLED,
absl::StrFormat(
"Sliced contracting dimension is not supported yet.")));
}
TEST_F(SplitKTest, MakeSplitKWithNonStandardOutputLayout) {
const std::string kHloText = R"(
HloModule t
triton_gemm_dot {
parameter_0 = s8[3,128,5,32]{3,2,1,0} parameter(0)
bitcast.1 = s8[3,5,32,128]{2,1,3,0} bitcast(parameter_0)
copy.1 = s8[3,5,32,128]{3,2,1,0} copy(bitcast.1)
reshape.5 = s8[480,128]{1,0} reshape(copy.1)
convert.8 = bf16[480,128]{1,0} convert(reshape.5)
parameter_1 = bf16[16,128]{1,0} parameter(1)
ROOT dot.0 = bf16[480,16]{0,1} dot(convert.8, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[3,128,5,32]{3,2,1,0} parameter(0)
p1 = bf16[16,128]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{0,1} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TritonGemmConfig config(16, 16, 16, 4, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kReduce);
EXPECT_EQ(module->entry_computation()->root_instruction()->shape().layout(),
Layout({0, 1}));
}
TEST_F(SplitKTest, MakeSplitKWithExistingBatchDim) {
const std::string hlo_text = R"(
HloModule m
triton_gemm_dot.24 {
parameter_1 = bf16[1,1,800,5,128]{4,3,2,1,0} parameter(1)
bitcast.3 = bf16[800,5,128]{2,1,0} bitcast(parameter_1)
convert.3 = f32[800,5,128]{2,1,0} convert(bitcast.3)
parameter_0 = f32[1,5,700,800]{3,2,1,0} parameter(0)
bitcast.2 = f32[5,700,800]{2,1,0} bitcast(parameter_0)
ROOT dot.26 = f32[5,128,700]{2,1,0} dot(convert.3, bitcast.2),
lhs_batch_dims={1}, lhs_contracting_dims={0},
rhs_batch_dims={0}, rhs_contracting_dims={2}
}
ENTRY e {
tmp_3 = f32[1,5,700,800]{3,2,1,0} parameter(0)
tmp_0 = bf16[1,1,800,5,128]{4,3,2,1,0} parameter(1)
ROOT triton_gemm_dot.24 = f32[5,128,700]{2,1,0} fusion(tmp_3, tmp_0),
kind=kCustom, calls=triton_gemm_dot.24,
backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(32, 64, 64, 8, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kReduce);
}
TEST_F(SplitKTest, SupportsIndivisible) {
constexpr absl::string_view kHloText = R"(
HloModule t
triton_gemm_dot {
parameter_0 = s8[3,129,5,32]{3,2,1,0} parameter(0)
bitcast.1 = s8[3,5,32,129]{2,1,3,0} bitcast(parameter_0)
copy.1 = s8[3,5,32,129]{3,2,1,0} copy(bitcast.1)
reshape.5 = s8[480,129]{1,0} reshape(copy.1)
convert.8 = bf16[480,129]{1,0} convert(reshape.5)
parameter_1 = bf16[16,129]{1,0} parameter(1)
ROOT dot.0 = bf16[480,16]{1,0} dot(convert.8, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[3,129,5,32]{3,2,1,0} parameter(0)
p1 = bf16[16,129]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TritonGemmConfig config(16, 16, 16, 4, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
}
TEST_F(SplitKTest, SupportsIndivisibleSimpleSplitK4) {
constexpr absl::string_view kHloText = R"(
HloModule t
triton_gemm_dot {
parameter_0 = s8[480,129]{1,0} parameter(0)
convert_0 = bf16[480,129]{1,0} convert(parameter_0)
parameter_1 = bf16[16,129]{1,0} parameter(1)
ROOT dot.0 = bf16[480,16]{1,0} dot(convert_0, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[480,129]{1,0} parameter(0)
p1 = bf16[16,129]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TritonGemmConfig config(16, 16, 16, 4, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
}
TEST_F(SplitKTest, SupportsIndivisibleWithCustomLayout) {
constexpr absl::string_view kHloText = R"(
HloModule t
triton_gemm_dot {
parameter_0 = s8[480,129]{0,1} parameter(0)
convert_0 = bf16[480,129]{0,1} convert(parameter_0)
parameter_1 = bf16[16,129]{0,1} parameter(1)
ROOT dot.0 = bf16[480,16]{1,0} dot(convert_0, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[480,129]{0,1} parameter(0)
p1 = bf16[16,129]{0,1} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
constexpr TritonGemmConfig kConfig(16, 16, 16, 4, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), kConfig));
TF_EXPECT_OK(HloVerifier(true,
true,
LayoutAssignment::InstructionCanChangeLayout)
.Run(module.get())
.status());
}
TEST_F(SplitKTest, SupportsIndivisibleSimpleSplitK16) {
constexpr absl::string_view kHloText = R"(
HloModule t
triton_gemm_dot {
parameter_0 = s8[480,255]{1,0} parameter(0)
convert_0 = bf16[480,255]{1,0} convert(parameter_0)
parameter_1 = bf16[16,255]{1,0} parameter(1)
ROOT dot.0 = bf16[480,16]{1,0} dot(convert_0, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[480,255]{1,0} parameter(0)
p1 = bf16[16,255]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TritonGemmConfig config(16, 16, 16, 16, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
}
TEST_F(SplitKTest, SupportsIndivisibleWithTranspose) {
constexpr absl::string_view kHloText = R"(
HloModule t
triton_gemm_dot {
parameter_0 = s8[480,255]{1,0} parameter(0)
convert_0 = bf16[480,255]{1,0} convert(parameter_0)
transpose_0 = bf16[255,480]{1,0} transpose(convert_0), dimensions={1,0}
parameter_1 = bf16[16,255]{1,0} parameter(1)
ROOT dot.0 = bf16[480,16]{1,0} dot(transpose_0, parameter_1),
lhs_contracting_dims={0}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[480,255]{1,0} parameter(0)
p1 = bf16[16,255]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TritonGemmConfig config(16, 16, 16, 16, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
}
TEST_F(SplitKTest, SupportIndivisibleWithBroadcast) {
constexpr absl::string_view kHloText = R"(
HloModule t
triton_gemm_dot {
parameter_0 = s8[] parameter(0)
convert_0 = bf16[] convert(parameter_0)
broadcast_0 = bf16[480,255]{1,0} broadcast(convert_0)
parameter_1 = bf16[16,255]{1,0} parameter(1)
ROOT dot.0 = bf16[480,16]{1,0} dot(broadcast_0, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[] parameter(0)
p1 = bf16[16,255]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TritonGemmConfig config(16, 16, 16, 16, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
}
TEST_F(SplitKTest, SupportsIndivisibleWithBitcast) {
constexpr absl::string_view kHloText = R"(
HloModule t
triton_gemm_dot {
parameter_0 = s8[3,5,480,17]{3,0,1,2} parameter(0)
convert_0 = bf16[3,5,480,17]{3,0,1,2} convert(parameter_0)
bitcast_0 = bf16[480,255]{1,0} bitcast(convert_0)
parameter_1 = bf16[16,255]{1,0} parameter(1)
ROOT dot.0 = bf16[480,16]{1,0} dot(bitcast_0, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[3,5,480,17]{3,0,1,2} parameter(0)
p1 = bf16[16,255]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TritonGemmConfig config(16, 16, 16, 16, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
}
TEST_F(SplitKTest, SkipSmallK) {
const std::string hlo_text = R"(
HloModule t
triton_gemm_dot {
parameter_0 = s8[3,64,5,32]{3,2,1,0} parameter(0)
bitcast.1 = s8[3,5,32,64]{2,1,3,0} bitcast(parameter_0)
copy.1 = s8[3,5,32,64]{3,2,1,0} copy(bitcast.1)
reshape.5 = s8[480,64]{1,0} reshape(copy.1)
convert.8 = bf16[480,64]{1,0} convert(reshape.5)
parameter_1 = bf16[16,64]{1,0} parameter(1)
ROOT dot.0 = bf16[480,16]{1,0} dot(convert.8, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[3,64,5,32]{3,2,1,0} parameter(0)
p1 = bf16[16,64]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(16, 16, 128, 4, 1, 4);
EXPECT_THAT(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config),
tsl::testing::StatusIs(
tsl::error::CANCELLED,
"Too small divisible part of the contracting dimension."));
}
TEST_F(SplitKTest, FragmentedKSupported) {
const std::string hlo_text = R"(
HloModule t
triton_gemm_dot {
p0 = f16[7,2,16,4,20] parameter(0)
t0 = f16[2,16,4,20,7] transpose(p0), dimensions={1,2,3,4,0}
b0 = f16[2560,7] bitcast(t0)
a1 = f16[2560,5] parameter(1)
ROOT r = f16[7,5] dot(b0, a1),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f16[7,2,16,4,20] parameter(0)
p1 = f16[2560,5] parameter(1)
ROOT fusion = f16[7,5] fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(32, 32, 16, 1, 1, 4);
config.split_k = 5;
EXPECT_THAT(
MakeDotSplitKBatch(module->entry_computation()->root_instruction(),
config),
tsl::testing::StatusIs(tsl::error::CANCELLED,
"Contracting dimension is too fragmented."));
config.split_k = 8;
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kReduce);
const HloComputation* dot_computation = module->entry_computation()
->root_instruction()
->operand(0)
->called_computations()[0];
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
TF_ASSERT_OK_AND_ASSIGN(
const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation, config.split_k));
EXPECT_EQ(dot_computation->root_instruction()->shape(),
ShapeUtil::MakeShapeWithDescendingLayout(F16, {8, 7, 5}));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),
ElementsAre(FieldsAre(1, 2560, 0,
2560,
ElementsAre(20, 4, 4, 4, 2))));
}
TEST_F(SplitKTest, FragmentedKUnsupported) {
const std::string hlo_text = R"(
HloModule t
triton_gemm_dot {
p0 = f32[3,128,77] parameter(0)
b0 = f32[384,77] bitcast(p0)
a1 = f32[384,25] parameter(1)
ROOT r = f32[77,25] dot(b0, a1),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f32[3,128,77] parameter(0)
p1 = f32[384,25] parameter(1)
ROOT fusion = f32[77,25] fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(16, 16, 16, 4, 1, 4);
EXPECT_THAT(
MakeDotSplitKBatch(module->entry_computation()->root_instruction(),
config),
tsl::testing::StatusIs(tsl::error::CANCELLED,
"Contracting dimension is too fragmented."));
}
TEST_F(SplitKTest, MakeSplitKWithNonDefaultOutputLayout) {
const std::string kHloText = R"(
triton_gemm_dot.4842_computation {
parameter_0 = bf16[96,96]{1,0} parameter(0)
parameter_1 = bf16[96,7]{1,0} parameter(1)
dot.0 = bf16[96,7]{0,1} dot(parameter_0, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT bitcast.2 = bf16[7,3,32]{2,1,0} bitcast(dot.0)
}
ENTRY e {
parameter_0.91 = bf16[96,96]{1,0} parameter(0)
parameter_1.86 = bf16[96,7]{1,0} parameter(1)
ROOT triton_gemm_dot.4842 = bf16[7,3,32]{2,1,0}
fusion(parameter_0.91, parameter_1.86), kind=kCustom,
calls=triton_gemm_dot.4842_computation
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TritonGemmConfig config(16, 16, 16, 2, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kReduce);
const HloComputation* dot_computation = module->entry_computation()
->root_instruction()
->operand(0)
->called_computations()[0];
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
}
TEST_F(SplitKTest, SparseDotWithLhsSparseOperandIsRewritten) {
const std::string hlo_text = R"(
HloModule test
triton_gemm {
lhs = f16[2,5,1600] parameter(0)
rhs = f16[2,3200,10] parameter(1)
meta = u16[2,5,200] parameter(2)
ROOT dot = f32[2,5,10] dot(lhs, rhs, meta),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}, sparsity=L.2@2:4
}
ENTRY e {
lhs = f16[2,5,1600] parameter(0)
rhs = f16[2,3200,10] parameter(1)
meta = u16[2,5,200] parameter(2)
ROOT fusion = f32[2,5,10] fusion(lhs, rhs, meta),
kind=kCustom, calls=triton_gemm, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(16, 16, 16, 4, 1, 1);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kReduce);
HloInstruction* dot =
module->GetComputationWithName("triton_gemm")->root_instruction();
EXPECT_EQ(dot->operand(0)->shape(),
ShapeUtil::MakeShapeWithDescendingLayout(F16, {2, 5, 4, 400}));
EXPECT_EQ(dot->operand(1)->shape(),
ShapeUtil::MakeShapeWithDescendingLayout(F16, {2, 4, 800, 10}));
EXPECT_EQ(dot->operand(2)->shape(),
ShapeUtil::MakeShapeWithDescendingLayout(U16, {2, 5, 4, 50}));
}
TEST_F(SplitKTest, SparseDotWithRhsSparseOperandTriggersError) {
const std::string hlo_text = R"(
HloModule test
triton_gemm {
lhs = f16[2,5,3200] parameter(0)
rhs = f16[2,1600,10] parameter(1)
meta = u16[2,200,10] parameter(2)
ROOT dot = f32[2,5,10] dot(lhs, rhs, meta),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}, sparsity=R.1@2:4
}
ENTRY e {
lhs = f16[2,5,3200] parameter(0)
rhs = f16[2,1600,10] parameter(1)
meta = u16[2,200,10] parameter(2)
ROOT fusion = f32[2,5,10] fusion(lhs, rhs, meta),
kind=kCustom, calls=triton_gemm, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(16, 16, 16, 4, 1, 1);
auto result = MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config);
EXPECT_FALSE(result.ok());
}
class SplitKTestWithMorePreciseReduction
: public HloTestBase,
public ::testing::WithParamInterface<int> {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_gpu_triton_gemm_disable_reduced_precision_reduction(
true);
return debug_options;
}
};
TEST_F(SplitKTestWithMorePreciseReduction, MakeSplitK) {
constexpr absl::string_view kHloText = R"(
HloModule t
triton_gemm_dot {
parameter_0 = s8[3,128,5,32]{3,2,1,0} parameter(0)
bitcast.1 = s8[3,5,32,128]{2,1,3,0} bitcast(parameter_0)
copy.1 = s8[3,5,32,128]{3,2,1,0} copy(bitcast.1)
reshape.5 = s8[480,128]{1,0} reshape(copy.1)
convert.8 = bf16[480,128]{1,0} convert(reshape.5)
parameter_1 = bf16[16,128]{1,0} parameter(1)
ROOT dot.0 = bf16[480,16]{1,0} dot(convert.8, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[3,128,5,32]{3,2,1,0} parameter(0)
p1 = bf16[16,128]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TritonGemmConfig config(16, 16, 16, 4, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Convert(m::Reduce(m::Fusion(), m::Constant()))));
}
TEST_F(SplitKTestWithMorePreciseReduction, MakeSplitKWithOutputFusion) {
const std::string hlo_text = R"(
HloModule t
triton_gemm_dot {
p0 = f16[480,128]{1,0} parameter(0)
p1 = f16[16,128]{1,0} parameter(1)
d = f16[480,16]{1,0} dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
c = bf16[] constant(123)
n = bf16[] negate(c)
bc = bf16[480,16]{1,0} broadcast(n)
cv = bf16[480,16]{1,0} convert(d)
ROOT a = bf16[480,16]{1,0} multiply(bc, cv)
}
ENTRY e {
p0 = f16[480,128]{1,0} parameter(0)
p1 = f16[16,128]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(16, 16, 16, 4, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Convert(m::Reduce(m::Fusion(), m::Constant()))));
}
TEST_F(SplitKTest, MakeSplitKWithTransposeAfterDot) {
const std::string hlo_text = R"(
triton_gemm_dot {
p0 = f16[8,288,288]{2,1,0} parameter(0)
p1 = f16[8,288,32]{2,0,1} parameter(1)
d = f16[8,288,32]{2,1,0} dot(p0, p1),
lhs_batch_dims={0}, lhs_contracting_dims={2},
rhs_batch_dims={0}, rhs_contracting_dims={1}
ROOT t = f16[288,8,32]{2,1,0} transpose(d), dimensions={1,0,2}
}
ENTRY e {
p0 = f16[8,288,288]{2,1,0} parameter(0)
p1 = f16[8,288,32]{2,0,1} parameter(1)
ROOT fusion = f16[288,8,32]{2,1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(16, 128, 32, 8, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
const auto* transpose =
Cast<HloTransposeInstruction>(module->entry_computation()
->root_instruction()
->operand(0)
->fused_instructions_computation()
->root_instruction());
EXPECT_THAT(transpose->dimensions(), ElementsAre(0, 2, 1, 3));
}
TEST_F(SplitKTest, MakeSplitKWithTrivialDimension) {
const std::string hlo_text = R"(
triton_gemm_dot {
parameter_0 = f32[1001,1]{1,0} parameter(0)
parameter_1 = f32[1001,2048]{1,0} parameter(1)
ROOT dot = f32[1,2048]{1,0} dot(parameter_0, parameter_1),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
ENTRY %entry_computation {
p0 = f32[1001,1]{1,0} parameter(0)
p1 = f32[1001,2048]{1,0} parameter(1)
ROOT fusion = f32[1,2048]{1,0} fusion(p0, p1), kind=kCustom,
calls=triton_gemm_dot
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(16, 128, 64, 4, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Reduce(m::Fusion(), m::Constant())));
}
}
}
} |
2,035 | cpp | tensorflow/tensorflow | gpu_algebraic_simplifier | null | null | #ifndef XLA_SERVICE_GPU_GPU_ALGEBRAIC_SIMPLIFIER_H_
#define XLA_SERVICE_GPU_GPU_ALGEBRAIC_SIMPLIFIER_H_
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
namespace xla::gpu {
class GpuAlgebraicSimplifierVisitor : public AlgebraicSimplifierVisitor {
public:
explicit GpuAlgebraicSimplifierVisitor(
const AlgebraicSimplifierOptions& options,
se::GpuComputeCapability compute_capability,
AlgebraicSimplifier* simplifier)
: AlgebraicSimplifierVisitor(options, simplifier),
compute_capability_(std::move(compute_capability)) {}
bool ShouldStrengthReduceDotToReduce(const HloInstruction* hlo) override;
private:
se::GpuComputeCapability compute_capability_;
};
class GpuAlgebraicSimplifier : public AlgebraicSimplifier {
public:
explicit GpuAlgebraicSimplifier(const AlgebraicSimplifierOptions& options,
se::GpuComputeCapability compute_capability)
: AlgebraicSimplifier(options),
compute_capability_(std::move(compute_capability)) {}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(HloModule* module,
const absl::flat_hash_set<absl::string_view>&
execution_threads) override {
XLA_VLOG_LINES(
2, "GpuAlgebraicSimplifier::Run(), before:\n" + module->ToString());
bool changed = false;
GpuAlgebraicSimplifierVisitor visitor(options_, compute_capability_, this);
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
if (visitor.Run(comp, options_, this)) {
changed = true;
}
}
XLA_VLOG_LINES(
2, "GpuAlgebraicSimplifier::Run(), after:\n" + module->ToString());
return changed;
}
private:
se::GpuComputeCapability compute_capability_;
};
}
#endif
#include "xla/service/gpu/gpu_algebraic_simplifier.h"
#include "absl/log/check.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/triton_support.h"
#include "xla/xla_data.pb.h"
namespace xla::gpu {
bool GpuAlgebraicSimplifierVisitor::ShouldStrengthReduceDotToReduce(
const HloInstruction* hlo) {
if (!options_.enable_dot_strength_reduction()) {
return false;
}
const HloDotInstruction* dot = DynCast<HloDotInstruction>(hlo);
if (dot == nullptr) {
return false;
}
const HloInstruction* lhs = dot->operand(0);
const HloInstruction* rhs = dot->operand(1);
DotDimensionNumbers dnums = dot->dot_dimension_numbers();
bool lhs_is_vector = (dnums.lhs_batch_dimensions_size() +
dnums.lhs_contracting_dimensions_size() ==
lhs->shape().rank());
bool rhs_is_vector = (dnums.rhs_batch_dimensions_size() +
dnums.rhs_contracting_dimensions_size() ==
rhs->shape().rank());
if (lhs_is_vector && rhs_is_vector) {
return true;
}
absl::StatusOr<bool> is_too_small =
IsMatrixMultiplicationTooSmallForRewriting(*hlo, 1000000);
CHECK_OK(is_too_small.status());
if (is_too_small.value()) {
return true;
}
return !legacy_triton::CanTritonHandleGEMM(*dot, compute_capability_);
}
} | #include "xla/service/gpu/gpu_algebraic_simplifier.h"
#include <string>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
class GpuAlgebraicSimplifierTest : public HloTestBase {};
TEST_F(GpuAlgebraicSimplifierTest, VectorVectorDotShouldBeStrengthReduced) {
const std::string& hlo_string = R"(
HloModule m
ENTRY entry {
p0 = f32[32, 500] parameter(0)
p1 = f32[32, 500] parameter(1)
ROOT dot = f32[32] dot(p0, p1), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloInstruction* dot = module->entry_computation()->root_instruction();
AlgebraicSimplifierOptions options;
options.set_enable_dot_strength_reduction(true);
se::CudaComputeCapability ampere(8, 0);
GpuAlgebraicSimplifier simplifier(options, ampere);
GpuAlgebraicSimplifierVisitor visitor(options, ampere, &simplifier);
EXPECT_TRUE(visitor.ShouldStrengthReduceDotToReduce(dot));
}
TEST_F(GpuAlgebraicSimplifierTest, MatrixVectorDotShouldNotBeStrengthReduced) {
const std::string& hlo_string = R"(
HloModule m
ENTRY entry {
p0 = f32[32, 5000, 7000] parameter(0)
p1 = f32[32, 5000] parameter(1)
ROOT dot = f32[32,7000] dot(p0, p1), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1},
algorithm=dot_bf16_bf16_f32_x6
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloInstruction* dot = module->entry_computation()->root_instruction();
AlgebraicSimplifierOptions options;
options.set_enable_dot_strength_reduction(true);
se::CudaComputeCapability ampere(8, 0);
GpuAlgebraicSimplifier simplifier(options, ampere);
GpuAlgebraicSimplifierVisitor visitor(options, ampere, &simplifier);
EXPECT_FALSE(visitor.ShouldStrengthReduceDotToReduce(dot));
}
TEST_F(GpuAlgebraicSimplifierTest,
DotWithTypeUnsupportedByGemmFusionShouldBeStrengthReduced) {
const std::string& hlo_string = R"(
HloModule m
ENTRY entry {
p0 = c64[32, 5000, 7000] parameter(0)
p1 = c64[32, 5000] parameter(1)
ROOT dot = c64[32,7000] dot(p0, p1), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloInstruction* dot = module->entry_computation()->root_instruction();
AlgebraicSimplifierOptions options;
options.set_enable_dot_strength_reduction(true);
se::CudaComputeCapability ampere(8, 0);
GpuAlgebraicSimplifier simplifier(options, ampere);
GpuAlgebraicSimplifierVisitor visitor(options, ampere, &simplifier);
EXPECT_TRUE(visitor.ShouldStrengthReduceDotToReduce(dot));
}
TEST_F(GpuAlgebraicSimplifierTest, SmallDotShouldBeStrengthReduced) {
const std::string& hlo_string = R"(
HloModule m
ENTRY entry {
p0 = f32[32, 50, 70] parameter(0)
p1 = f32[32, 50] parameter(1)
ROOT dot = f32[32,70] dot(p0, p1), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1},
algorithm=dot_bf16_bf16_f32_x6
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloInstruction* dot = module->entry_computation()->root_instruction();
AlgebraicSimplifierOptions options;
options.set_enable_dot_strength_reduction(true);
se::CudaComputeCapability ampere(8, 0);
GpuAlgebraicSimplifier simplifier(options, ampere);
GpuAlgebraicSimplifierVisitor visitor(options, ampere, &simplifier);
EXPECT_TRUE(visitor.ShouldStrengthReduceDotToReduce(dot));
}
}
} |
2,036 | cpp | tensorflow/tensorflow | gpu_reduce_scatter_creator | null | null | #ifndef XLA_SERVICE_GPU_GPU_REDUCE_SCATTER_CREATOR_H_
#define XLA_SERVICE_GPU_GPU_REDUCE_SCATTER_CREATOR_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class ReduceScatterCreator : public HloModulePass {
public:
ReduceScatterCreator() = default;
absl::string_view name() const override { return "reduce-scatter-creator"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
}
#endif
#include "xla/service/gpu/gpu_reduce_scatter_creator.h"
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/collective_opt_utils.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/status_macros.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace gpu {
absl::StatusOr<bool> ReduceScatterCreator::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
const HloModuleConfig &config = module->config();
int64_t next_channel_id = hlo_query::NextChannelId(*module);
bool changed = false;
for (HloComputation *computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction *instruction :
computation->MakeInstructionPostOrder()) {
if (instruction->opcode() != HloOpcode::kAllReduce) {
continue;
}
auto *ar = Cast<HloAllReduceInstruction>(instruction);
auto ar_spec = MatchReduceScatter(ar, config.num_partitions(),
config.replica_count(),
false,
true);
if (!ar_spec) {
VLOG(2) << "Cannot match reduce-scatter " << ar->ToString();
continue;
}
HloInstruction *ds = ar_spec->dynamic_slice;
const int64_t split_dim = ar_spec->split_dim;
Shape scatter_shape = ar->shape();
const int64_t split_dim_size = scatter_shape.dimensions(split_dim);
HloInstruction *rs_input = ar->mutable_operand(0);
const int64_t scatter_dim_size = split_dim_size / ar_spec->group_size;
TF_RET_CHECK(scatter_dim_size * ar_spec->group_size <= split_dim_size);
if (split_dim_size % ar_spec->group_size != 0) {
scatter_shape.set_dimensions(split_dim,
scatter_dim_size * ar_spec->group_size);
rs_input = computation->AddInstruction(HloInstruction::CreateSlice(
scatter_shape, rs_input,
std::vector<int64_t>(scatter_shape.rank(), 0),
scatter_shape.dimensions(),
std::vector<int64_t>(scatter_shape.rank(), 1)));
}
scatter_shape.set_dimensions(split_dim, scatter_dim_size);
std::optional<int64_t> channel_id;
if (ar->channel_id()) {
channel_id = next_channel_id++;
}
HloInstruction *ars =
computation->AddInstruction(HloInstruction::CreateReduceScatter(
scatter_shape, {rs_input}, ar->to_apply(), ar->device_list(),
ar->constrain_layout(), channel_id, ar->use_global_device_ids(),
ar_spec->split_dim));
HloInstruction *result = ars;
HloInstruction *reshape = nullptr;
if (ds->operand(0) != ar) {
reshape = ds->mutable_operand(0);
result = computation->AddInstruction(
HloInstruction::CreateReshape(ds->shape(), result));
}
TF_RETURN_IF_ERROR(ds->ReplaceAllUsesWith(result));
TF_RETURN_IF_ERROR(computation->RemoveInstruction(ds));
if (reshape) {
TF_RETURN_IF_ERROR(computation->RemoveInstruction(reshape));
}
TF_RETURN_IF_ERROR(computation->RemoveInstructionAndUnusedOperands(ar));
changed = true;
}
}
return changed;
}
}
} | #include "xla/service/gpu/gpu_reduce_scatter_creator.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
class GpuReduceScatterCreatorTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> RunPass(
absl::string_view hlo_module, int64_t num_replicas,
int64_t num_partitions, bool expect_change) {
HloModuleConfig config = GetModuleConfigForTest(
num_replicas,
num_partitions);
config.set_use_spmd_partitioning(num_partitions > 1);
TF_ASSIGN_OR_RETURN(auto module,
ParseAndReturnVerifiedModule(hlo_module, config));
auto changed = ReduceScatterCreator().Run(module.get());
if (!changed.ok()) {
return changed.status();
}
EXPECT_EQ(changed.value(), expect_change);
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
size_t AllReduceCount(std::unique_ptr<HloModule> &module) {
return CollectiveCount(module, HloOpcode::kAllReduce);
}
size_t ReduceScatterCount(std::unique_ptr<HloModule> &module) {
return CollectiveCount(module, HloOpcode::kAllReduce);
}
private:
size_t CollectiveCount(std::unique_ptr<HloModule> &module, HloOpcode opcode) {
return absl::c_count_if(
module->entry_computation()->instructions(),
[&opcode](HloInstruction *instr) { return instr->opcode() == opcode; });
}
};
TEST_F(GpuReduceScatterCreatorTest, AllReplicas) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={}, to_apply=%sum
%table = s32[8]{0} constant({0,1,2,3,4,5,6,7})
%rid = u32[] replica-id()
%id = s32[1] dynamic-slice(%table, %rid), dynamic_slice_sizes={1}
%reshape = s32[] reshape(%id)
%slice_size = s32[] constant(4)
%offset = s32[] multiply(%reshape, %slice_size)
%zero = s32[] constant(0)
ROOT %dynamic-slice = f32[4,8,128] dynamic-slice(%all-reduce, %offset, %zero, %zero),
dynamic_slice_sizes={4,8,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
1,
true));
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::ReduceScatter(m::Parameter(0))));
const auto *rs = Cast<HloReduceScatterInstruction>(
module->entry_computation()->root_instruction());
EXPECT_EQ(rs->scatter_dimension(), 0) << rs->ToString();
EXPECT_EQ(AllReduceCount(module), 0);
}
TEST_F(GpuReduceScatterCreatorTest, AllReplicasWithOffsetReshape) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={}, to_apply=%sum
%table = s32[8]{0} constant({0,1,2,3,4,5,6,7})
%rid = u32[] replica-id()
%id = s32[1] dynamic-slice(%table, %rid), dynamic_slice_sizes={1}
%slice_size = s32[1] constant({4})
%offset = s32[1] multiply(%id, %slice_size)
%reshape = s32[] reshape(%offset)
%zero = s32[] constant(0)
ROOT %dynamic-slice = f32[4,8,128] dynamic-slice(%all-reduce, %reshape, %zero, %zero),
dynamic_slice_sizes={4,8,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
1,
true));
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::ReduceScatter(m::Parameter(0))));
const auto *rs = Cast<HloReduceScatterInstruction>(
module->entry_computation()->root_instruction());
EXPECT_EQ(rs->scatter_dimension(), 0) << rs->ToString();
EXPECT_EQ(AllReduceCount(module), 0);
}
TEST_F(GpuReduceScatterCreatorTest, AllReplicasWithReshape) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={}, to_apply=%sum
%table = s32[8]{0} constant({0,1,2,3,4,5,6,7})
%rid = u32[] replica-id()
%id = s32[1] dynamic-slice(%table, %rid), dynamic_slice_sizes={1}
%reshape = s32[] reshape(%id)
%slice_size = s32[] constant(4)
%offset = s32[] multiply(%reshape, %slice_size)
%zero = s32[] constant(0)
%reshape.1 = f32[32,16,64] reshape(%all-reduce)
ROOT %dynamic-slice = f32[4,16,64] dynamic-slice(%reshape.1, %offset, %zero, %zero),
dynamic_slice_sizes={4,16,64}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
1,
true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Reshape(m::ReduceScatter(m::Parameter(0)))));
EXPECT_EQ(AllReduceCount(module), 0);
}
TEST_F(GpuReduceScatterCreatorTest, AllReplicasWithReshapeSplitDimModified) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[336,1024] parameter(0)
%all-reduce = f32[336,1024] all-reduce(%param), replica_groups={}, to_apply=%sum
%rid = u32[] replica-id()
%id = s32[] convert(%rid)
%slice_size = s32[] constant(128)
%offset = s32[] multiply(%id, %slice_size)
%zero = s32[] constant(0)
%reshape.1 = f32[4,84,1024] reshape(%all-reduce)
ROOT %dynamic-slice = f32[4,84,128] dynamic-slice(%reshape.1, %zero, %zero, %offset),
dynamic_slice_sizes={4,84,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
1,
true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Reshape(m::ReduceScatter(m::Parameter(0)))));
EXPECT_EQ(AllReduceCount(module), 0);
}
TEST_F(GpuReduceScatterCreatorTest, AllReplicasDim2) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={}, to_apply=%sum
%table = s32[8]{0} constant({0,1,2,3,4,5,6,7})
%rid = u32[] replica-id()
%rid_s32 = s32[] convert(%rid)
%slice_size = s32[] constant(16)
%offset = s32[] multiply(%rid_s32, %slice_size)
%zero = s32[] constant(0)
ROOT %dynamic-slice = f32[32,8,16] dynamic-slice(%all-reduce, %zero, %zero, %offset),
dynamic_slice_sizes={32,8,16}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
1,
true));
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::ReduceScatter(m::Parameter(0))));
const auto *rs = Cast<HloReduceScatterInstruction>(
module->entry_computation()->root_instruction());
EXPECT_EQ(rs->scatter_dimension(), 2) << rs->ToString();
EXPECT_EQ(AllReduceCount(module), 0);
}
TEST_F(GpuReduceScatterCreatorTest, AllReplicasWrongOffsets) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={}, to_apply=%sum
%table = s32[8]{0} constant({0,1,2,3,4,5,6,8})
%rid = u32[] replica-id()
%id = s32[1] dynamic-slice(%table, %rid), dynamic_slice_sizes={1}
%reshape = s32[] reshape(%id)
%slice_size = s32[] constant(4)
%offset = s32[] multiply(%reshape, %slice_size)
%zero = s32[] constant(0)
ROOT %dynamic-slice = f32[4,8,128] dynamic-slice(%all-reduce, %offset, %zero, %zero),
dynamic_slice_sizes={4,8,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
1,
false));
}
TEST_F(GpuReduceScatterCreatorTest, AllReplicasIotaTable) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={}, to_apply=%sum
%table = s32[8]{0} iota(), iota_dimension=0
%rid = u32[] replica-id()
%id = s32[1] dynamic-slice(%table, %rid), dynamic_slice_sizes={1}
%reshape = s32[] reshape(%id)
%slice_size = s32[] constant(4)
%offset = s32[] multiply(%reshape, %slice_size)
%zero = s32[] constant(0)
ROOT %dynamic-slice = f32[4,8,128] dynamic-slice(%all-reduce, %offset, %zero, %zero),
dynamic_slice_sizes={4,8,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
2,
true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::ReduceScatter(m::Parameter(0))));
EXPECT_EQ(AllReduceCount(module), 0);
}
TEST_F(GpuReduceScatterCreatorTest, SubgroupedReplicas) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={{1,3,2,0},{4,5,6,7}}, to_apply=%sum
%gtable = s32[8]{0} constant({3,0,2,1,0,1,2,3})
%rid = u32[] replica-id()
%id = s32[1] dynamic-slice(%gtable, %rid), dynamic_slice_sizes={1}
%reshape.0 = s32[] reshape(%id)
%table = s32[4]{0} constant({0,8,16,24})
%offset = s32[1] dynamic-slice(%table, %reshape.0), dynamic_slice_sizes={1}
%reshape.1 = s32[] reshape(%offset)
%zero = s32[] constant(0)
ROOT %dynamic-slice = f32[8,8,128] dynamic-slice(%all-reduce, %reshape.1, %zero, %zero),
dynamic_slice_sizes={8,8,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
2,
true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::ReduceScatter(m::Parameter(0))));
EXPECT_EQ(AllReduceCount(module), 0);
}
TEST_F(GpuReduceScatterCreatorTest, AllPartitions) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={{0},{1}}, to_apply=%sum, channel_id=1
%table = s32[8]{0} constant({0,1,2,3,4,5,6,7})
%pid = u32[] partition-id()
%id = s32[1] dynamic-slice(%table, %pid), dynamic_slice_sizes={1}
%reshape = s32[] reshape(%id)
%slice_size = s32[] constant(4)
%offset = s32[] multiply(%reshape, %slice_size)
%zero = s32[] constant(0)
ROOT %dynamic-slice = f32[4,8,128] dynamic-slice(%all-reduce, %offset, %zero, %zero),
dynamic_slice_sizes={4,8,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
2,
8,
true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::ReduceScatter(m::Parameter(0))));
EXPECT_EQ(AllReduceCount(module), 0);
}
TEST_F(GpuReduceScatterCreatorTest, AllReduceFollowedByAllReduce) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce.scattered = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={{0,1,2,3,4,5,6,7},{8,9,10,11,12,13,14,15}}, to_apply=%sum, use_global_device_ids=true, channel_id=1
%table = s32[8]{0} constant({0,1,2,3,4,5,6,7})
%pid = u32[] partition-id()
%id = s32[1] dynamic-slice(%table, %pid), dynamic_slice_sizes={1}
%reshape = s32[] reshape(%id)
%slice_size = s32[] constant(4)
%offset = s32[] multiply(%reshape, %slice_size)
%zero = s32[] constant(0)
%dynamic-slice = f32[4,8,128] dynamic-slice(%all-reduce.scattered, %offset, %zero, %zero),
dynamic_slice_sizes={4,8,128}
ROOT %all-reduce.sync = f32[4,8,128]{2,1,0} all-reduce(%dynamic-slice),
replica_groups={{0,8},{1,9},{2,10},{3,11},{4,12},{5,13},{6,14},{7,15}}, to_apply=%sum, use_global_device_ids=true, channel_id=2
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
2,
8,
true));
EXPECT_EQ(AllReduceCount(module), 1);
EXPECT_EQ(ReduceScatterCount(module), 1);
}
TEST_F(GpuReduceScatterCreatorTest, SubgroupsGlobals) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={{1,3,2,0},{4,5,6,7}}, to_apply=%sum, channel_id=1, use_global_device_ids=true
%pid = u32[] partition-id()
%rid = u32[] replica-id()
%pcount = u32[] constant(4)
%ridxp = u32[] multiply(%rid, %pcount)
%gid = u32[] add(%ridxp, %pid)
%gtable = s32[8]{0} constant({3,0,2,1,0,1,2,3})
%id = s32[1] dynamic-slice(%gtable, %gid), dynamic_slice_sizes={1}
%reshape.0 = s32[] reshape(%id)
%table = s32[4]{0} constant({0,8,16,24})
%offset = s32[1] dynamic-slice(%table, %reshape.0), dynamic_slice_sizes={1}
%reshape.1 = s32[] reshape(%offset)
%zero = s32[] constant(0)
ROOT %dynamic-slice = f32[8,8,128] dynamic-slice(%all-reduce, %reshape.1, %zero, %zero),
dynamic_slice_sizes={8,8,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
2,
4,
true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::ReduceScatter(m::Parameter(0))));
EXPECT_EQ(AllReduceCount(module), 0);
}
TEST_F(GpuReduceScatterCreatorTest, SubgroupsGlobalsOrthogonalReplicas) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={{1,3,2,0},{5,7,6,4}}, to_apply=%sum, channel_id=1, use_global_device_ids=true
%pid = u32[] partition-id()
%pid_table = s32[4]{0} constant({3,0,2,1})
%offset = s32[1] dynamic-slice(%pid_table, %pid), dynamic_slice_sizes={1}
%reshape = s32[] reshape(%offset)
%shard_size = s32[] constant(8)
%mul = s32[] multiply(%reshape, %shard_size)
%zero = s32[] constant(0)
ROOT %dynamic-slice = f32[8,8,128] dynamic-slice(%all-reduce, %mul, %zero, %zero),
dynamic_slice_sizes={8,8,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
2,
4,
true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::ReduceScatter(m::Parameter(0))));
EXPECT_EQ(AllReduceCount(module), 0);
}
TEST_F(GpuReduceScatterCreatorTest, SubgroupsGlobalsNonOrthogonalReplicas) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={{1,3,2,0},{7,5,6,4}}, to_apply=%sum, channel_id=1, use_global_device_ids=true
%pid = u32[] partition-id()
%pid_table = s32[4]{0} constant({3,0,2,1})
%offset = s32[1] dynamic-slice(%pid_table, %pid), dynamic_slice_sizes={1}
%reshape = s32[] reshape(%offset)
%shard_size = s32[] constant(8)
%mul = s32[] multiply(%reshape, %shard_size)
%zero = s32[] constant(0)
ROOT %dynamic-slice = f32[8,8,128] dynamic-slice(%all-reduce, %mul, %zero, %zero),
dynamic_slice_sizes={8,8,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
2,
4,
false));
}
TEST_F(GpuReduceScatterCreatorTest, NonUniformSplit) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[1,7]{1,0} parameter(0)
%all-reduce = f32[1,7]{1,0} all-reduce(%param),
replica_groups={{0,1},{2,3},{4,5},{6,7}}, to_apply=%sum, channel_id=1, use_global_device_ids=true
%pid = u32[] partition-id()
%pid_table = s32[8]{0} constant({0, 1, 0, 1, 0, 1, 0, 1})
%offset = s32[1] dynamic-slice(%pid_table, %pid), dynamic_slice_sizes={1}
%reshape = s32[] reshape(%offset)
%shard_size = s32[] constant(3)
%mul = s32[] multiply(%reshape, %shard_size)
%zero = s32[] constant(0)
ROOT %dynamic-slice = f32[1,3] dynamic-slice(%all-reduce, %zero, %mul),
dynamic_slice_sizes={1,3}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
1,
8,
true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::ReduceScatter(m::Slice(m::Parameter(0)))));
}
}
}
} |
2,037 | cpp | tensorflow/tensorflow | priority_fusion | third_party/xla/xla/service/gpu/transforms/priority_fusion.cc | third_party/xla/xla/service/gpu/transforms/priority_fusion_test.cc | #ifndef XLA_SERVICE_GPU_PRIORITY_FUSION_H_
#define XLA_SERVICE_GPU_PRIORITY_FUSION_H_
#include <stdint.h>
#include <memory>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/fusion_queue.h"
#include "xla/service/gpu/fusion_process_dump.pb.h"
#include "xla/service/gpu/model/fusion_analysis_cache.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/service/instruction_fusion.h"
#include "xla/stream_executor/device_description.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace gpu {
class GpuPriorityFusion : public InstructionFusion {
public:
GpuPriorityFusion(tsl::thread::ThreadPool* thread_pool,
const se::DeviceDescription& device,
GpuHloCostAnalysis::Options cost_analysis_options)
: InstructionFusion(GpuPriorityFusion::IsExpensive),
thread_pool_(thread_pool),
device_info_(device),
cost_analysis_options_(std::move(cost_analysis_options)),
fusion_analysis_cache_(device_info_) {}
absl::string_view name() const override { return "priority-fusion"; }
static bool IsExpensive(const HloInstruction& instruction);
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
protected:
std::unique_ptr<FusionQueue> GetFusionQueue(
HloComputation* computation) override;
FusionDecision ShouldFuse(HloInstruction* consumer,
int64_t operand_index) override;
HloInstruction::FusionKind ChooseKind(
const HloInstruction* producer, const HloInstruction* consumer) override;
private:
HloInstruction* FuseInstruction(HloInstruction* fusion_instruction,
HloInstruction* producer) override;
bool ConsumeFuel(HloInstruction* producer, HloInstruction* consumer);
tsl::thread::ThreadPool* thread_pool_;
se::DeviceDescription device_info_;
GpuHloCostAnalysis::Options cost_analysis_options_;
std::unique_ptr<FusionProcessDumpProto> fusion_process_dump_;
HloFusionAnalysisCache fusion_analysis_cache_;
mlir::MLIRContext mlir_context_;
};
}
}
#endif
#include "xla/service/gpu/priority_fusion.h"
#include <cstddef>
#include <cstdint>
#include <functional>
#include <iterator>
#include <limits>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/meta/type_traits.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "llvm/ADT/STLExtras.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/dump.h"
#include "xla/service/fusion_queue.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/fusion_process_dump.pb.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/model/fusion_analysis_cache.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/gpu/model/gpu_performance_model.h"
#include "xla/service/gpu/model/gpu_performance_model_base.h"
#include "xla/service/gpu/model/symbolic_tile_analysis.h"
#include "xla/service/hlo_graph_dumper.h"
#include "xla/service/instruction_fusion.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/blocking_counter.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace gpu {
namespace {
bool ElementIsF32OrF16(const Shape& shape) {
PrimitiveType type = shape.element_type();
return type == F32 || type == F16;
}
bool IsFusible(const HloInstruction& instr) {
if (!instr.IsFusible()) {
return false;
}
if (instr.IsElementwise()) {
return true;
}
switch (instr.opcode()) {
case HloOpcode::kFusion:
return instr.fusion_kind() != HloInstruction::FusionKind::kCustom;
case HloOpcode::kCopy:
case HloOpcode::kIota:
case HloOpcode::kConstant:
case HloOpcode::kReduce:
case HloOpcode::kBitcast:
case HloOpcode::kBroadcast:
case HloOpcode::kConcatenate:
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kGather:
case HloOpcode::kPad:
case HloOpcode::kReduceWindow:
case HloOpcode::kReshape:
case HloOpcode::kReverse:
case HloOpcode::kScatter:
case HloOpcode::kSlice:
case HloOpcode::kTranspose:
return true;
default:
return false;
}
}
class GpuPriorityFusionQueue {
using Priority = int64_t;
using CanFuseCallback = std::function<FusionDecision(
HloInstruction* , int64_t )>;
public:
GpuPriorityFusionQueue(
HloComputation* computation,
const GpuHloCostAnalysis::Options& cost_analysis_options,
const se::DeviceDescription* device_info,
FusionProcessDumpProto* fusion_process_dump,
tsl::thread::ThreadPool* thread_pool, mlir::MLIRContext* mlir_context,
HloFusionAnalysisCache& fusion_analysis_cache,
bool triton_softmax_priority_fusion_enabled)
: computation_(computation),
cost_analysis_(cost_analysis_options, device_info),
fusion_process_dump_(fusion_process_dump),
thread_pool_(thread_pool),
mlir_context_(mlir_context),
fusion_analysis_cache_(fusion_analysis_cache),
triton_softmax_priority_fusion_enabled_(
triton_softmax_priority_fusion_enabled) {
VLOG(2) << "Running full HLO cost analysis for " << computation_->name();
TF_CHECK_OK(computation_->Accept(&cost_analysis_));
dump_fusion_visualization_ = computation->parent()
->config()
.debug_options()
.xla_dump_fusion_visualization();
std::vector<HloInstruction*> instructions;
for (auto* instruction : computation->MakeInstructionPostOrder()) {
if (instruction->opcode() == HloOpcode::kParameter ||
instruction->user_count() == 0 || !instruction->IsFusible() ||
instruction->opcode() == HloOpcode::kTuple ||
instruction->opcode() == HloOpcode::kGetTupleElement) {
continue;
}
instructions.push_back(instruction);
}
ComputeAndSetPriorities(instructions);
}
void ComputeAndSetPriorities(
const std::vector<HloInstruction*>& instructions) {
std::vector<Priority> priorities = ComputePriorities(instructions);
for (auto [instruction, priority] : llvm::zip(instructions, priorities)) {
auto key = std::make_pair(priority, instruction->unique_id());
auto reverse_it = reverse_map_.find(instruction);
if (reverse_it != reverse_map_.end()) {
const PriorityQueue::iterator& queue_it = reverse_it->second;
if (key == queue_it->first) {
continue;
}
producer_priority_queue_.erase(queue_it);
reverse_map_.erase(reverse_it);
}
if (priority < 0) {
continue;
}
auto emplace_result = producer_priority_queue_.emplace(key, instruction);
reverse_map_.emplace(instruction, emplace_result.first);
}
}
std::vector<Priority> ComputePriorities(
const std::vector<HloInstruction*>& instructions) {
auto schedule_or_run = [this](std::function<void()> fn) {
if (thread_pool_) {
thread_pool_->Schedule(std::move(fn));
} else {
fn();
}
};
tsl::BlockingCounter counter(instructions.size());
std::vector<Priority> priorities(instructions.size());
for (size_t i = 0; i < instructions.size(); ++i) {
schedule_or_run([&, i] {
priorities[i] = CalculateProducerPriority(instructions[i]);
counter.DecrementCount();
});
}
counter.Wait();
return priorities;
}
bool DequeueNextProducer() {
current_producer_ = nullptr;
current_consumers_.clear();
while (!producer_priority_queue_.empty() && current_consumers_.empty()) {
auto next_it = std::prev(producer_priority_queue_.end());
current_producer_ = next_it->second;
producer_priority_queue_.erase(next_it);
reverse_map_.erase(current_producer_);
current_consumers_ = current_producer_->users();
if (current_producer_->opcode() == HloOpcode::kBitcast) {
llvm::erase_if(current_consumers_, [&](HloInstruction* consumer) {
return !CanFuseCached(current_producer_, consumer);
});
}
}
return !current_consumers_.empty();
}
void UpdatePriorities() {
for (auto instruction : to_update_priority_) {
TF_CHECK_OK(cost_analysis_.RevisitInstruction(instruction));
}
ComputeAndSetPriorities(std::vector<HloInstruction*>{
to_update_priority_.begin(), to_update_priority_.end()});
to_update_priority_.clear();
}
void PreFusion(HloInstruction* producer, HloInstruction* consumer) {
if (dump_fusion_visualization_) {
RegisterFusionState(
*computation_,
absl::StrCat("About to fuse |", producer->name(), "| into |",
consumer->name(), "| inside PriorityFusion"),
*consumer, producer);
}
InvalidateCaches(producer);
InvalidateCaches(consumer);
}
void InvalidateCaches(HloInstruction* instruction) {
can_fuse_cache_.erase(instruction);
for (const HloInstruction* operand : instruction->operands()) {
auto it = can_fuse_cache_.find(operand);
if (it != can_fuse_cache_.end()) {
it->second.erase(instruction);
}
}
gpu_performance_model_cache_.Invalidate(*instruction);
fusion_analysis_cache_.Invalidate(*instruction);
}
void OnFusingInstruction(HloInstruction* fusion,
HloInstruction* original_producer,
HloInstruction* original_consumer) {
if (fusion_process_dump_) {
auto* fusion_step =
fusion_process_dump_->add_fusion_steps()->mutable_fusion();
fusion_step->set_fusion_name(std::string(fusion->name()));
fusion_step->set_producer_name(std::string(original_producer->name()));
fusion_step->set_consumer_name(std::string(original_consumer->name()));
}
if (dump_fusion_visualization_) {
RegisterFusionState(
*computation_,
absl::StrCat("Fused |", original_producer->name(), "| into |",
fusion->name(), "| inside PriorityFusion"),
*fusion);
}
if (fusion != original_consumer) {
RemoveInstruction(original_consumer);
}
if (original_producer->user_count() == 0) {
original_producer->DetachFromOperandsAndUsers();
}
for (HloInstruction* operand : fusion->operands()) {
if (operand == original_producer ||
operand->opcode() == HloOpcode::kConstant ||
operand->opcode() == HloOpcode::kGetTupleElement) {
continue;
}
if (!operand->IsFusible()) {
continue;
}
to_update_priority_.insert(operand);
}
to_update_priority_.insert(fusion);
}
void RemoveInstruction(HloInstruction* instruction) {
to_update_priority_.erase(instruction);
fusion_analysis_cache_.Invalidate(*instruction);
auto reverse_it = reverse_map_.find(instruction);
if (reverse_it == reverse_map_.end()) {
return;
}
producer_priority_queue_.erase(reverse_it->second);
reverse_map_.erase(reverse_it);
}
HloInstruction* current_producer() { return current_producer_; }
const std::vector<HloInstruction*>& current_consumers() {
return current_consumers_;
}
private:
Priority CalculateProducerPriority(HloInstruction* producer) {
if (producer->opcode() == HloOpcode::kBitcast) {
return std::numeric_limits<Priority>::max();
}
if (producer->opcode() == HloOpcode::kConstant) {
return std::numeric_limits<Priority>::min();
}
if (auto fusion_decision = CanFuseWithAllNonBitcastUsers(producer);
!fusion_decision) {
if (fusion_process_dump_) {
absl::MutexLock lock(&fusion_process_dump_mutex_);
auto* step = fusion_process_dump_->add_fusion_steps()
->mutable_producer_ineligible();
step->set_producer_name(std::string(producer->name()));
step->set_reason(fusion_decision.Explain());
}
return std::numeric_limits<Priority>::min();
}
GpuPerformanceModel::RunTimes run_times =
GpuPerformanceModel::EstimateRunTimesForPriorityFusion(
producer, &cost_analysis_,
GpuPerformanceModelOptions::PriorityFusion(
&fusion_analysis_cache_, &gpu_performance_model_cache_),
producer->users());
if (fusion_process_dump_) {
absl::MutexLock lock(&fusion_process_dump_mutex_);
auto* step =
fusion_process_dump_->add_fusion_steps()->mutable_update_priority();
step->set_producer_name(std::string(producer->name()));
for (auto* consumer : producer->users()) {
step->add_consumer_names(std::string(consumer->name()));
}
step->set_us_fused(absl::ToDoubleMicroseconds(run_times.time_fused));
step->set_us_unfused(absl::ToDoubleMicroseconds(run_times.time_unfused));
}
return absl::ToInt64Nanoseconds(run_times.time_unfused -
run_times.time_fused);
}
FusionDecision CanFuseTriton(HloInstruction* producer,
HloInstruction* consumer) {
if (!triton_softmax_priority_fusion_enabled_) {
return "triton softmax fusion is not enabled";
}
if (IsGenericTritonFusion(*producer)) {
if (!IsFusible(*consumer)) {
return "the consumer is not fusible";
}
} else {
if (!IsFusible(*producer)) {
return "the producer is not fusible";
}
}
auto fusion = HloFusionAdaptor::ForProducerConsumer(producer, consumer);
SymbolicTileAnalysisOrError symbolic_tile_analysis_or =
SymbolicTileAnalysis::AnalyzeFusion(*fusion, mlir_context_);
if (const auto* fusion_decision =
std::get_if<FusionDecision>(&symbolic_tile_analysis_or)) {
return {
absl::StrCat("Fusion can not be tiled with SymbolicTileAnalysis: ",
fusion_decision->Explain())};
}
return {};
}
FusionDecision CanFuse(HloInstruction* producer, HloInstruction* consumer) {
if (IsGenericTritonFusion(*producer) || IsGenericTritonFusion(*consumer)) {
return CanFuseTriton(producer, consumer);
}
if (!IsFusible(*producer)) {
return "the producer is not fusible";
}
if (!IsFusible(*consumer)) {
return "the consumer is not fusible";
}
if (consumer->opcode() == HloOpcode::kBitcast) {
return "not fusing into a single bitcast as consumer";
}
if (auto can_fuse = CanEmitInputFusedScatter(*producer, *consumer);
!can_fuse) {
return can_fuse;
}
auto contains_significant_reduce = [&](const HloInstruction* instr) {
auto fusion = HloFusionAdaptor::ForInstruction(instr);
return HloAnyOf(fusion->GetRoots(), *fusion, [](auto node) {
if (!(node.opcode() == HloOpcode::kReduce && node.shape().IsArray())) {
return false;
}
int64_t reduction_size =
ShapeUtil::ElementsIn(node.instruction().operand(0)->shape()) /
ShapeUtil::ElementsIn(node.shape());
return reduction_size >= 16;
});
};
if (contains_significant_reduce(producer) &&
contains_significant_reduce(consumer)) {
return "both the producer and the consumer contain a reduce";
}
const auto& analysis = fusion_analysis_cache_.Get(*producer);
if (analysis.GetEmitterFusionKind() ==
HloFusionAnalysis::EmitterFusionKind::kReduction) {
const auto& analysis_fused =
fusion_analysis_cache_.Get(*producer, *consumer);
if (analysis_fused.GetEmitterFusionKind() ==
HloFusionAnalysis::EmitterFusionKind::kLoop) {
return "fusion into output of a reduce fusion would create a loop "
"fusion";
}
}
if (auto fits_budget = FusionFitsInBudget(
*consumer, *producer, *cost_analysis_.device_info_,
true);
!fits_budget) {
return fits_budget;
}
if (cost_analysis_.ProducerConsumerMergedTooLarge(*producer, *consumer)) {
return "the fusion would result in an overly large code duplication";
}
if (producer == producer->parent()->root_instruction()) {
return "not fusing into the output of the root instruction";
}
return InstructionFusion::ShouldFuseInPlaceOp(producer, consumer);
}
FusionDecision CanFuseCached(HloInstruction* producer,
HloInstruction* consumer) {
{
absl::MutexLock lock(&can_fuse_cache_mutex_);
auto& producer_cache = can_fuse_cache_[producer];
auto it = producer_cache.find(consumer);
if (it != producer_cache.end()) {
return it->second;
}
}
auto fusion_decision = CanFuse(producer, consumer);
{
absl::MutexLock lock(&can_fuse_cache_mutex_);
can_fuse_cache_[producer][consumer] = fusion_decision;
}
return fusion_decision;
}
FusionDecision CanFuseWithAllNonBitcastUsers(HloInstruction* producer) {
if (producer->users().empty()) {
return "No users to fuse";
}
FusionDecision result;
bool has_non_bitcast_user = false;
for (const auto& user : producer->users()) {
if (user->opcode() == HloOpcode::kBitcast) {
continue;
}
has_non_bitcast_user = true;
if (auto fusion_decision = CanFuseCached(producer, user);
!fusion_decision) {
VLOG(10) << "Cannot fuse " << producer->name() << " with "
<< user->name() << ", because: " << fusion_decision.Explain();
return fusion_decision;
}
}
if (!has_non_bitcast_user) {
return "not fusing because there are only bitcast users";
}
return {};
}
HloComputation* computation_;
GpuHloCostAnalysis cost_analysis_;
using PriorityQueue = std::map<std::pair<Priority, int>, HloInstruction*>;
PriorityQueue producer_priority_queue_;
absl::flat_hash_map<HloInstruction*, PriorityQueue::iterator> reverse_map_;
HloInstruction* current_producer_;
std::vector<HloInstruction*> current_consumers_;
absl::flat_hash_set<HloInstruction*> to_update_priority_;
FusionProcessDumpProto* fusion_process_dump_;
absl::Mutex fusion_process_dump_mutex_;
tsl::thread::ThreadPool* thread_pool_;
mlir::MLIRContext* mlir_context_;
HloFusionAnalysisCache& fusion_analysis_cache_;
absl::flat_hash_map<
const HloInstruction*,
absl::flat_hash_map<const HloInstruction*, FusionDecision>>
can_fuse_cache_;
absl::Mutex can_fuse_cache_mutex_;
GpuPerformanceModelCache gpu_performance_model_cache_;
bool triton_softmax_priority_fusion_enabled_;
bool dump_fusion_visualization_;
};
}
bool GpuPriorityFusion::IsExpensive(
const HloInstruction& instruction) {
switch (instruction.opcode()) {
case HloOpcode::kDivide:
case HloOpcode::kSqrt:
case HloOpcode::kRsqrt:
case HloOpcode::kExp:
if (ElementIsF32OrF16(instruction.shape())) {
return false;
}
break;
case HloOpcode::kFusion:
return false;
default:
break;
}
return InstructionFusion::IsExpensive(instruction);
}
bool IsSmallConstant(const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kConstant && instr->shape().IsArray() &&
ShapeUtil::ElementsIn(instr->shape()) <= 1;
}
bool GpuPriorityFusion::ConsumeFuel(HloInstruction* producer,
HloInstruction* consumer) {
return xla::ConsumeFuel(name(), [&] {
return absl::StrFormat("Not fusing producer %s with consumer %s",
producer->name(), consumer->name());
});
};
absl::StatusOr<bool> GpuPriorityFusion::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool dump_enabled =
DumpingEnabledForHloPass(name(), module->config().debug_options());
if (dump_enabled) {
fusion_process_dump_ = std::make_unique<FusionProcessDumpProto>();
*fusion_process_dump_->mutable_gpu_device_info() =
device_info_.ToGpuProto();
}
auto fusible_computations =
GetFusibleComputations(*module, execution_threads);
for (auto* computation : fusible_computations) {
for (auto* instruction : computation->instructions()) {
module->SetAndUniquifyInstrName(instruction,
absl::StrCat(instruction->name(), ".0"));
}
}
if (dump_enabled) {
fusion_process_dump_->set_hlo_module_before_fusion(
module->ToString(HloPrintOptions::ShortParsable()));
}
bool triton_softmax_priority_fusion_enabled =
module->config()
.debug_options()
.xla_gpu_enable_triton_softmax_priority_fusion();
int changed = false;
for (auto* computation : fusible_computations) {
CHECK(!computation->IsFusionComputation());
auto fusion_queue = std::make_unique<GpuPriorityFusionQueue>(
computation, cost_analysis_options_, &device_info_,
fusion_process_dump_.get(), thread_pool_, &mlir_context_,
fusion_analysis_cache_, triton_softmax_priority_fusion_enabled);
while (fusion_queue->DequeueNextProducer()) {
auto producer = fusion_queue->current_producer();
for (auto* consumer : fusion_queue->current_consumers()) {
if (consumer->opcode() == HloOpcode::kBitcast) {
continue;
}
if (!ConsumeFuel(producer, consumer)) continue;
VLOG(5) << "next: " << consumer->name() << "(" << consumer << ") + "
<< producer->name() << "(" << producer << ")";
fusion_queue->PreFusion(producer, consumer);
auto fusion_instruction = Fuse(producer, consumer, computation);
fusion_queue->OnFusingInstruction(fusion_instruction, producer,
consumer);
changed = true;
}
if (producer->user_count() == 0) {
fusion_queue->RemoveInstruction(producer);
TF_RETURN_IF_ERROR(computation->RemoveInstruction(producer));
}
fusion_queue->UpdatePriorities();
}
std::vector<HloInstruction*> constants;
for (auto* instruction : computation->instructions()) {
if (IsSmallConstant(instruction)) {
constants.push_back(instruction);
}
}
for (auto* constant : constants) {
auto users = constant->users();
for (auto* user : users) {
if (IsFusible(*user) && CanEmitInputFusedScatter(*constant, *user)) { | #include "xla/service/gpu/priority_fusion.h"
#include <stdint.h>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace m = ::xla::match;
using ::testing::UnorderedElementsAre;
using ::tsl::testing::IsOk;
using ::tsl::testing::IsOkAndHolds;
namespace xla {
namespace gpu {
class PriorityFusionTest : public HloTestBase {
HloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const {
return [&](const Shape& shape) {
constexpr int64_t kPointerSize = 8;
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
};
}
public:
std::vector<HloFusionAnalysis::EmitterFusionKind> RunAndGetFusionKinds(
absl::string_view hlo) {
auto module = ParseAndReturnVerifiedModule(hlo).value();
EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->RemoveUnusedComputations(), IsOk());
std::vector<HloFusionAnalysis::EmitterFusionKind> kinds;
for (auto computation : module->computations()) {
if (!computation->FusionInstruction()) continue;
auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto analysis = HloFusionAnalysis::Create(
Cast<HloFusionInstruction>(computation->FusionInstruction()),
&device_info);
kinds.push_back(analysis.GetEmitterFusionKind());
}
return kinds;
}
GpuPriorityFusion priority_fusion_{
nullptr, TestGpuDeviceInfo::RTXA6000DeviceInfo(),
GpuHloCostAnalysis::Options{ShapeSizeBytesFunction(),
{},
true}};
};
TEST_F(PriorityFusionTest, FuseWithSharedArgument) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY main {
%p0 = f32[] parameter(0)
%p1 = f32[] parameter(1)
%subtract = f32[] subtract(%p0, %p1)
%compare = pred[] compare(%subtract, %subtract), direction=NE
%add = f32[] add(%p0, %p1)
%abs = f32[] abs(%subtract)
ROOT %select = f32[] select(%compare, %add, %abs)
})")
.value();
EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(true));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Fusion()));
EXPECT_EQ(root->fusion_kind(), HloInstruction::FusionKind::kLoop);
}
TEST_F(PriorityFusionTest, FusionFusionWithDuplication) {
absl::string_view kHlo = R"(
HloModule test_module
square {
p = f32[16384]{0} parameter(0)
ROOT m = f32[16384]{0} multiply(p, p)
}
exp {
p = f32[16384]{0} parameter(0)
ROOT e = f32[16384]{0} exponential(p)
}
log {
p = f32[16384]{0} parameter(0)
ROOT l = f32[16384]{0} log(p)
}
ENTRY main {
p = f32[16384]{0} parameter(0)
s = f32[16384]{0} fusion(p), kind=kLoop, calls=square
e = f32[16384]{0} fusion(s), kind=kLoop, calls=exp
l = f32[16384]{0} fusion(s), kind=kInput, calls=log
ROOT t = (f32[16384], f32[16384]) tuple(l, e)
})";
RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"(
CHECK: ENTRY
CHECK-NEXT: %[[PARAM:.*]] = f32[16384]{0} parameter(0)
CHECK-NEXT: %[[FUSION_0:.*]] = f32[16384]{0} fusion(%[[PARAM]])
CHECK-NEXT: %[[FUSION_1:.*]] = f32[16384]{0} fusion(%[[PARAM]])
CHECK-NEXT: ROOT {{.*}} tuple(%[[FUSION_0]], %[[FUSION_1]])
)");
}
TEST_F(PriorityFusionTest, FuseBroadcastIntoBitcastConsumers) {
absl::string_view kHlo = R"(
HloModule test_module
ENTRY main {
param_0 = f32[96]{0} parameter(0)
broadcast = f32[8,96,128,7]{3,2,1,0} broadcast(param_0), dimensions={1}
bitcast.6079.2 = f32[8,24,4,128,7]{4,3,2,1,0} bitcast(broadcast)
ROOT transpose.1990.2 = f32[8,24,128,7,4]{4,3,2,1,0} transpose(bitcast.6079.2), dimensions={0,1,3,4,2}
}
)";
RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"(
CHECK: ENTRY
CHECK-NEXT: %[[PARAM:.*]] = f32[96]{0} parameter(0)
CHECK-NEXT: ROOT %{{.*}} fusion(%[[PARAM]])
)");
}
TEST_F(PriorityFusionTest, FuseWideningConvertIntoConsumers) {
absl::string_view kHlo = R"(
HloModule test_module
ENTRY main {
p = f16[512]{0} parameter(0)
a = f16[512]{0} add(p, p)
c = f32[512]{0} convert(a)
s = f32[512]{0} multiply(c, c)
bc = s32[512]{0} bitcast(c)
ROOT t = (f32[512], s32[512]) tuple(s, bc)
})";
RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"(
CHECK: ENTRY
CHECK-NEXT: %[[PARAM:.*]] = f16[512]{0} parameter(0)
CHECK-NEXT: %[[FUSION_F32:.*]] = f32[512]{0} fusion(%[[PARAM]])
CHECK-NEXT: %[[CONVERT_FUSION:.*]] = f32[512]{0} fusion(%[[PARAM]])
CHECK-NEXT: %[[BITCAST:.*]] = s32[512]{0} bitcast(%[[CONVERT_FUSION]])
CHECK-NEXT: ROOT %{{.*}} = (f32[512]{0}, s32[512]{0}) tuple(%[[FUSION_F32]], %[[BITCAST]])
)");
}
TEST_F(PriorityFusionTest, FuseConvertIntoReduce) {
absl::string_view kHlo = R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add.13235 = f32[] add(p0, p1)
}
ENTRY main {
param_0_0.79 = bf16[1024,8192]{1,0} parameter(0)
param_1_0.79 = bf16[1024,8192]{1,0} parameter(1)
param_2.483 = f32[8192]{0} parameter(2)
param_4.2892 = bf16[1024,8192]{1,0} parameter(3)
convert.21854 = f32[1024,8192]{1,0} convert(param_0_0.79)
convert.21855 = f32[1024,8192]{1,0} convert(param_1_0.79)
constant_7773 = f32[] constant(0)
broadcast.14555 = f32[1024,8192]{1,0} broadcast(param_2.483), dimensions={1}
multiply.6906 = f32[1024,8192]{1,0} multiply(broadcast.14555, convert.21854)
reduce.4813 = f32[1024]{0} reduce(multiply.6906, constant_7773), dimensions={1}, to_apply=add
convert.13970 = bf16[1024]{0} convert(reduce.4813)
convert.21534 = f32[1024,8192]{1,0} convert(param_4.2892)
multiply.6910.clone.1 = f32[1024,8192]{1,0} multiply(broadcast.14555, convert.21534)
reduce.4811.clone.1 = f32[1024]{0} reduce(multiply.6910.clone.1, constant_7773), dimensions={1}, to_apply=add
convert.13967.clone.1 = bf16[1024]{0} convert(reduce.4811.clone.1)
multiply.6908.clone.1 = f32[1024,8192]{1,0} multiply(broadcast.14555, convert.21855)
reduce.4812.clone.1 = f32[1024]{0} reduce(multiply.6908.clone.1, constant_7773), dimensions={1}, to_apply=add
convert.13969.clone.1 = bf16[1024]{0} convert(reduce.4812.clone.1)
ROOT fusion.241 = (bf16[1024]{0}, bf16[1024]{0}, bf16[1024]{0}) tuple(convert.13970, convert.13967.clone.1, convert.13969.clone.1)
})";
RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"(
CHECK-COUNT-3: ROOT {{.*}} convert(
CHECK: ENTRY %main
CHECK-COUNT-3: fusion
)");
}
TEST_F(PriorityFusionTest, ReductionEpilogueFusionRegressionTest) {
absl::string_view kHlo = R"(
HloModule test_module
add {
rhs.407 = f32[] parameter(1)
lhs.407 = f32[] parameter(0)
ROOT add.24451 = f32[] add(lhs.407, rhs.407)
}
ENTRY main {
param_1.15162 = f32[2752]{0} parameter(1)
convert.44829 = bf16[2752]{0} convert(param_1.15162)
bitcast.24686 = bf16[1,1,2752]{2,1,0} bitcast(convert.44829)
convert.44468 = f32[1,1,2752]{2,1,0} convert(bitcast.24686)
constant_13722 = bf16[] constant(1)
convert.17451 = f32[] convert(constant_13722)
broadcast.17565 = f32[1,1,2752]{2,1,0} broadcast(convert.17451), dimensions={}
negate.167 = f32[1,1,2752]{2,1,0} negate(convert.44468)
exponential.569 = f32[1,1,2752]{2,1,0} exponential(negate.167)
add.1850 = f32[1,1,2752]{2,1,0} add(broadcast.17565, exponential.569)
divide.1376 = f32[1,1,2752]{2,1,0} divide(broadcast.17565, add.1850)
multiply.9709 = f32[1,1,2752]{2,1,0} multiply(convert.44468, divide.1376)
param_0.15005 = f32[2752]{0} parameter(0)
convert.44826 = bf16[2752]{0} convert(param_0.15005)
bitcast.24683 = bf16[1,1,2752]{2,1,0} bitcast(convert.44826)
convert.44467 = f32[1,1,2752]{2,1,0} convert(bitcast.24683)
multiply.9708 = f32[1,1,2752]{2,1,0} multiply(multiply.9709, convert.44467)
convert.16959 = bf16[1,1,2752]{2,1,0} convert(multiply.9708)
fusion.3203 = bf16[2752]{0} bitcast(convert.16959)
convert.15093 = f32[2752]{0} convert(fusion.3203)
broadcast.13841 = f32[8192,2752]{1,0} broadcast(convert.15093), dimensions={1}
param_0.15525 = bf16[8192,2752]{1,0} parameter(2)
convert.13738 = f32[8192,2752]{1,0} convert(param_0.15525)
multiply.6422 = f32[8192,2752]{1,0} multiply(broadcast.13841, convert.13738)
constant_14382 = f32[] constant(0)
fusion.339 = f32[8192]{0} reduce(multiply.6422, constant_14382), dimensions={1}, to_apply=add
convert.44633 = bf16[8192]{0} convert(fusion.339)
ROOT bitcast.24487 = bf16[1,1,8192]{2,1,0} bitcast(convert.44633)
}
)";
EXPECT_THAT(
RunAndGetFusionKinds(kHlo),
UnorderedElementsAre(HloFusionAnalysis::EmitterFusionKind::kLoop,
HloFusionAnalysis::EmitterFusionKind::kReduction));
RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"(
CHECK: ENTRY
CHECK: ROOT {{.*}} bitcast({{.*}}fusion{{.*}})
)");
}
TEST_F(PriorityFusionTest, DoNotChangeReductionFusionToLoopFusion) {
auto module = *ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
rhs.407 = f32[] parameter(1)
lhs.407 = f32[] parameter(0)
ROOT add.24451 = f32[] add(lhs.407, rhs.407)
}
fused_computation {
p0 = f32[16,64]{1,0} parameter(0)
zero = f32[] constant(0.0)
ROOT reduce = f32[16]{0} reduce(p0, zero), dimensions={1}, to_apply=add
}
ENTRY main {
param0 = f32[16,64]{1,0} parameter(0)
fusion = f32[16]{0} fusion(param0), kind=kLoop, calls=fused_computation
ROOT slice = f32[8]{0} slice(fusion), slice={[0:8]}
})");
EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(false));
}
TEST_F(PriorityFusionTest, DoNotFuseTransposeIntoReduce) {
absl::string_view kHlo = R"(
HloModule test_module
add {
Arg_1.1046 = f32[] parameter(1)
Arg_0.1045 = f32[] parameter(0)
ROOT add.3303 = f32[] add(Arg_0.1045, Arg_1.1046)
}
ENTRY main {
param_0.17323 = pred[2048,2048]{1,0} parameter(0)
broadcast.22829 = pred[1,12,2048,2048]{3,2,1,0} broadcast(param_0.17323), dimensions={2,3}
param_1.19761 = bf16[2048,24576]{1,0} parameter(1)
convert.29880.clone.1 = f32[2048,24576]{1,0} convert(param_1.19761)
constant_10033_clone_1 = bf16[] constant(0.02002)
convert.30056.clone.1 = f32[] convert(constant_10033_clone_1)
broadcast.18898.clone.1 = f32[2048,24576]{1,0} broadcast(convert.30056.clone.1), dimensions={}
multiply.13451.clone.1 = f32[2048,24576]{1,0} multiply(convert.29880.clone.1, broadcast.18898.clone.1)
tanh.798.clone.1 = f32[2048,24576]{1,0} tanh(multiply.13451.clone.1)
constant_10244_clone_1 = bf16[] constant(50)
convert.30039.clone.1 = f32[] convert(constant_10244_clone_1)
broadcast.18310.clone.1 = f32[2048,24576]{1,0} broadcast(convert.30039.clone.1), dimensions={}
multiply.12550.clone.1 = f32[2048,24576]{1,0} multiply(tanh.798.clone.1, broadcast.18310.clone.1)
convert.29370.clone.1 = bf16[2048,24576]{1,0} convert(multiply.12550.clone.1)
bitcast.22330 = bf16[1,2048,2048,12]{3,2,1,0} bitcast(convert.29370.clone.1)
transpose.6582 = bf16[1,12,2048,2048]{3,2,1,0} transpose(bitcast.22330), dimensions={0,3,2,1}
convert.33705 = f32[1,12,2048,2048]{3,2,1,0} convert(transpose.6582)
constant_10212 = f32[] constant(-2.38197633e+38)
broadcast.22828 = f32[1,12,2048,2048]{3,2,1,0} broadcast(constant_10212), dimensions={}
select.589 = f32[1,12,2048,2048]{3,2,1,0} select(broadcast.22829, convert.33705, broadcast.22828)
bitcast.22075 = f32[12,2048,2048]{2,1,0} bitcast(select.589)
constant_10192 = f32[] constant(-inf)
reduce.1614 = f32[12,2048]{1,0} reduce(bitcast.22075, constant_10192), dimensions={2}, to_apply=add
predarg = pred[1,1,2048,2048]{3,2,1,0} parameter(2)
bitcast.11069 = pred[2048,2048]{1,0} bitcast(predarg)
broadcast.22825 = pred[1,12,2048,2048]{3,2,1,0} broadcast(bitcast.11069), dimensions={2,3}
bitcast.22331 = bf16[1,2048,2048,12]{3,2,1,0} bitcast(convert.29370.clone.1)
transpose.6580 = bf16[1,12,2048,2048]{3,2,1,0} transpose(bitcast.22331), dimensions={0,3,2,1}
convert.33703 = f32[1,12,2048,2048]{3,2,1,0} convert(transpose.6580)
constant_10213 = f32[] constant(-2.38197633e+38)
broadcast.22824 = f32[1,12,2048,2048]{3,2,1,0} broadcast(constant_10213), dimensions={}
select.587 = f32[1,12,2048,2048]{3,2,1,0} select(broadcast.22825, convert.33703, broadcast.22824)
broadcast.22819 = f32[1,12,2048,2048]{3,2,1,0} broadcast(reduce.1614), dimensions={1,2}
subtract.1129 = f32[1,12,2048,2048]{3,2,1,0} subtract(select.587, broadcast.22819)
exponential.418 = f32[1,12,2048,2048]{3,2,1,0} exponential(subtract.1129)
bitcast.22074 = f32[12,2048,2048]{2,1,0} bitcast(exponential.418)
constant_10490 = f32[] constant(0)
reduce.1613 = f32[12,2048]{1,0} reduce(bitcast.22074, constant_10490), dimensions={2}, to_apply=add
constant_468 = f32[] constant(-2.38197633e+38)
broadcast.22833 = pred[1,12,2048,2048]{3,2,1,0} broadcast(bitcast.11069), dimensions={2,3}
bitcast.22332 = bf16[1,2048,2048,12]{3,2,1,0} bitcast(convert.29370.clone.1)
transpose.6584 = bf16[1,12,2048,2048]{3,2,1,0} transpose(bitcast.22332), dimensions={0,3,2,1}
convert.33707 = f32[1,12,2048,2048]{3,2,1,0} convert(transpose.6584)
broadcast.22832 = f32[1,12,2048,2048]{3,2,1,0} broadcast(constant_468), dimensions={}
select.591 = f32[1,12,2048,2048]{3,2,1,0} select(broadcast.22833, convert.33707, broadcast.22832)
broadcast.22821 = f32[1,12,2048,2048]{3,2,1,0} broadcast(reduce.1614), dimensions={1,2}
subtract.1131 = f32[1,12,2048,2048]{3,2,1,0} subtract(select.591, broadcast.22821)
exponential.420 = f32[1,12,2048,2048]{3,2,1,0} exponential(subtract.1131)
broadcast.18351 = f32[1,12,2048,2048]{3,2,1,0} broadcast(reduce.1613), dimensions={1,2}
divide.340 = f32[1,12,2048,2048]{3,2,1,0} divide(exponential.420, broadcast.18351)
ROOT convert.29418 = bf16[1,12,2048,2048]{3,2,1,0} convert(divide.340)
})";
using Kind = HloFusionAnalysis::EmitterFusionKind;
EXPECT_THAT(
RunAndGetFusionKinds(kHlo),
UnorderedElementsAre(Kind::kLoop, Kind::kLoop, Kind::kLoop,
Kind::kReduction, Kind::kReduction, Kind::kTranspose,
Kind::kTranspose, Kind::kTranspose));
}
TEST_F(PriorityFusionTest, DoNotFuseReduceIntoReduce) {
absl::string_view kHlo = R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add.13235 = f32[] add(p0, p1)
}
ENTRY main {
p0 = f32[8,4,128,226]{3,2,1,0} parameter(0)
c0 = f32[] constant(0)
r0 = f32[8,4,128]{2,1,0} reduce(p0, c0), dimensions={3}, to_apply=add
ROOT r1 = f32[8,4]{1,0} reduce(r0, c0), dimensions={2}, to_apply=add
})";
RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"(
CHECK: ROOT {{.*}} reduce(
CHECK: ROOT {{.*}} reduce(
)");
}
TEST_F(PriorityFusionTest, ConvertFusedIntoReduce) {
absl::string_view kHlo = R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add.13235 = f32[] add(p0, p1)
}
ENTRY main {
param_0_0.79 = bf16[1024,8192]{1,0} parameter(0)
param_1_0.79 = bf16[1024,8192]{1,0} parameter(1)
param_2.483 = f32[8192]{0} parameter(2)
param_4.2892 = bf16[1024,8192]{1,0} parameter(3)
convert.21854 = f32[1024,8192]{1,0} convert(param_0_0.79)
convert.21855 = f32[1024,8192]{1,0} convert(param_1_0.79)
constant_7773 = f32[] constant(0)
broadcast.14555 = f32[1024,8192]{1,0} broadcast(param_2.483), dimensions={1}
multiply.6906 = f32[1024,8192]{1,0} multiply(broadcast.14555, convert.21854)
reduce.4813 = f32[1024]{0} reduce(multiply.6906, constant_7773), dimensions={1}, to_apply=add
convert.13970 = bf16[1024]{0} convert(reduce.4813)
convert.21534 = f32[1024,8192]{1,0} convert(param_4.2892)
multiply.6910.clone.1 = f32[1024,8192]{1,0} multiply(broadcast.14555, convert.21534)
reduce.4811.clone.1 = f32[1024]{0} reduce(multiply.6910.clone.1, constant_7773), dimensions={1}, to_apply=add
convert.13967.clone.1 = bf16[1024]{0} convert(reduce.4811.clone.1)
multiply.6908.clone.1 = f32[1024,8192]{1,0} multiply(broadcast.14555, convert.21855)
reduce.4812.clone.1 = f32[1024]{0} reduce(multiply.6908.clone.1, constant_7773), dimensions={1}, to_apply=add
convert.13969.clone.1 = bf16[1024]{0} convert(reduce.4812.clone.1)
ROOT fusion.241 = (bf16[1024]{0}, bf16[1024]{0}, bf16[1024]{0}) tuple(convert.13970, convert.13967.clone.1, convert.13969.clone.1)
})";
RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"(
CHECK-COUNT-3: ROOT {{.*}} convert(
CHECK: ENTRY %main
CHECK-COUNT-3: fusion(
CHECK-NOT: fusion(
)");
}
TEST_F(PriorityFusionTest, DoNotFuseDynamicUpdateSliceIntoReduce) {
GTEST_SKIP() << "b/294198633";
absl::string_view kHlo = R"(
HloModule test_module
add {
Arg_1.1046 = f32[] parameter(1)
Arg_0.1045 = f32[] parameter(0)
ROOT add.3303 = f32[] add(Arg_0.1045, Arg_1.1046)
}
ENTRY main {
param_0.10549 = f32[4,2112]{1,0} parameter(0)
param_5.2561 = pred[] parameter(5)
broadcast.19725 = pred[4,1]{1,0} broadcast(param_5.2561), dimensions={}
param_1.11587 = pred[4]{0} parameter(1)
constant_5837 = f32[] constant(1)
broadcast.19723 = f32[4]{0} broadcast(constant_5837), dimensions={}
param_2.5952 = f32[4,8000]{1,0} parameter(2)
param_3.4004 = f32[4]{0} parameter(3)
broadcast.19718 = f32[4,8000]{1,0} broadcast(param_3.4004), dimensions={0}
subtract.1112 = f32[4,8000]{1,0} subtract(param_2.5952, broadcast.19718)
exponential.418 = f32[4,8000]{1,0} exponential(subtract.1112)
constant_6254 = f32[] constant(0)
reduce.1154 = f32[4]{0} reduce(exponential.418, constant_6254), dimensions={1}, to_apply=add
log.38 = f32[4]{0} log(reduce.1154)
broadcast.19717 = f32[4,8000]{1,0} broadcast(log.38), dimensions={0}
subtract.1111 = f32[4,8000]{1,0} subtract(subtract.1112, broadcast.19717)
iota.170 = s32[4,1]{1,0} iota(), iota_dimension=0
constant_6281 = s32[] constant(0)
broadcast.19735 = s32[4]{0} broadcast(constant_6281), dimensions={}
param_4.3400 = s32[4,8000]{1,0} parameter(4)
slice.3186 = s32[4,40]{1,0} slice(param_4.3400), slice={[0:4], [0:40]}
iota.168 = s32[4,1]{1,0} iota(), iota_dimension=0
param_7.1596 = s32[4]{0} parameter(7)
compare.341 = pred[4]{0} compare(param_7.1596, broadcast.19735), direction=LT
constant_5833 = s32[] constant(40)
broadcast.19731 = s32[4]{0} broadcast(constant_5833), dimensions={}
add.8348 = s32[4]{0} add(param_7.1596, broadcast.19731)
select.418 = s32[4]{0} select(compare.341, add.8348, param_7.1596)
bitcast.20942 = s32[4,1]{1,0} bitcast(select.418)
concatenate.1337 = s32[4,2]{1,0} concatenate(iota.168, bitcast.20942), dimensions={1}
gather.43 = s32[4,1,1]{2,1,0} gather(slice.3186, concatenate.1337), offset_dims={1,2}, collapsed_slice_dims={}, start_index_map={0,1}, index_vector_dim=1, slice_sizes={1,1}
bitcast.20941 = s32[4]{0} bitcast(gather.43)
select.398 = s32[4]{0} select(param_1.11587, broadcast.19735, bitcast.20941)
compare.334 = pred[4]{0} compare(select.398, broadcast.19735), direction=LT
constant_6260 = s32[] constant(8000)
broadcast.19720 = s32[4]{0} broadcast(constant_6260), dimensions={}
add.8336 = s32[4]{0} add(select.398, broadcast.19720)
select.396 = s32[4]{0} select(compare.334, add.8336, select.398)
bitcast.20830 = s32[4,1]{1,0} bitcast(select.396)
concatenate.1308 = s32[4,2]{1,0} concatenate(iota.170, bitcast.20830), dimensions={1}
gather.41 = f32[4,1,1]{2,1,0} gather(subtract.1111, concatenate.1308), offset_dims={1,2}, collapsed_slice_dims={}, start_index_map={0,1}, index_vector_dim=1, slice_sizes={1,1}
bitcast.20824 = f32[4]{0} bitcast(gather.41)
select.389 = f32[4]{0} select(param_1.11587, broadcast.19723, bitcast.20824)
bitcast.20823 = f32[4,1]{1,0} bitcast(select.389)
param_6.1719 = s32[] parameter(6)
constant_6323 = s32[] constant(2048)
add.8549 = s32[] add(param_6.1719, constant_6323)
compare.388 = pred[] compare(add.8549, constant_6281), direction=LT
constant_5436 = s32[] constant(4160)
add.8339 = s32[] add(param_6.1719, constant_5436)
select.409 = s32[] select(compare.388, add.8339, add.8549)
dynamic-slice.36 = f32[4,1]{1,0} dynamic-slice(param_0.10549, constant_6281, select.409), dynamic_slice_sizes={4,1}
select.388 = f32[4,1]{1,0} select(broadcast.19725, bitcast.20823, dynamic-slice.36)
ROOT dynamic-update-slice.307 = f32[4,2112]{1,0} dynamic-update-slice(param_0.10549, select.388, constant_6281, select.409)
})";
RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"(
CHECK: ROOT {{.*}} dynamic-update-slice(
CHECK: %[[REDUCE:.*]] = {{.*}} reduce(
CHECK: ROOT {{.*}} log(%[[REDUCE]])
CHECK: ENTRY
CHECK-COUNT-2: fusion(
)");
}
TEST_F(PriorityFusionTest, DontFuseIntoFirstOperandOfScatter) {
auto module = *ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY FuseIntoScatter {
p0 = s32[3,3] parameter(0)
operand = s32[3,3] add(p0, p0)
p1 = s32[2] parameter(1)
indices = s32[2] add(p1, p1)
p2 = s32[2,3] parameter(2)
updates = s32[2,3] add(p2, p2)
scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
ROOT add = s32[3,3] add(scatter, scatter)
})");
EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(true));
HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root, GmockMatch(m::Add(m::Fusion(&fusion), m::Fusion())));
EXPECT_EQ(fusion->fusion_kind(), HloInstruction::FusionKind::kInput);
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Scatter(m::Parameter(), m::Add(), m::Add())));
}
TEST_F(PriorityFusionTest, DontFuseConstantIntoFirstOperandOfScatter) {
auto module = *ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY FuseIntoScatter {
operand = s32[1] constant({0})
indices = s32[24,1] parameter(0)
constant = s32[] constant(1)
updates = s32[24,1] broadcast(constant)
ROOT scatter = s32[1] scatter(operand, indices, updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
})");
EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(true));
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_THAT(root, GmockMatch(m::Fusion(m::Constant(), m::Parameter())));
EXPECT_EQ(root->fusion_kind(), HloInstruction::FusionKind::kInput);
EXPECT_THAT(root->fused_expression_root(),
GmockMatch(m::Scatter(m::Parameter(), m::Parameter(),
m::Broadcast(m::Constant()))));
}
TEST_F(PriorityFusionTest, DoNotFuseReduceIntoReduceEvenIfOccupancyIsHigh) {
constexpr absl::string_view kHlo = R"(
HloModule test_module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY main {
p0 = f32[4,3584,128,168]{3,2,1,0} parameter(0)
c = f32[] constant(0)
r1 = f32[4,3584,128]{2,1,0} reduce(p0, c), dimensions={3}, to_apply=add
ROOT r2 = f32[4,3584]{1,0} reduce(r1, c), dimensions={2}, to_apply=add
})";
RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"(
CHECK: ROOT {{.*}} reduce(
CHECK: ROOT {{.*}} reduce(
)");
}
TEST_F(PriorityFusionTest, FuseReductionEpilogueWithMultipleUsers) {
constexpr absl::string_view kHlo = R"(
HloModule test_module
add {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
fused_computation {
p0 = f32[64,16384]{1,0} parameter(0)
c0 = f32[] constant(0)
ROOT reduce.858 = f32[64]{0} reduce(p0, c0), dimensions={1}, to_apply=add
}
ENTRY main {
p0 = f32[64,16384]{1,0} parameter(0)
fusion = f32[64]{0} fusion(p0), kind=kInput, calls=fused_computation
log = f32[64]{0} log(fusion)
negate = f32[64]{0} custom-call(log), custom_call_target="negate"
ROOT add = f32[64]{0} add(negate, log)
}
)";
RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"(
CHECK: ENTRY
CHECK: %[[PARAM:.*]] = {{.*}} parameter(0)
CHECK: %[[FUSION:.*]] = {{.*}} fusion(%[[PARAM]])
CHECK: custom-call(%[[FUSION]])
)");
}
TEST_F(PriorityFusionTest, EpilogueFusion) {
absl::string_view kHlo = R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add.13235 = f32[] add(p0, p1)
}
fused_computation.1 {
p0 = f32[8,4,128,226]{3,2,1,0} parameter(0)
c0 = f32[] constant(0)
ROOT r0 = f32[8,4,128]{2,1,0} reduce(p0, c0), dimensions={3}, to_apply=add
}
fused_computation.2 {
p0 = f32[8,4,128]{2,1,0} parameter(0)
r1 = f32[8,4,128]{2,1,0} log(p0)
ROOT r2 = f32[8,4,128]{2,1,0} log(r1)
}
ENTRY main {
p0 = f32[8,4,128,226]{3,2,1,0} parameter(0)
f1 = f32[8,4,128]{2,1,0} fusion(p0), kind=kInput, calls=%fused_computation.1
ROOT fusion = f32[8,4,128]{2,1,0} fusion(f1), kind=kLoop, calls=%fused_computation.2
})";
RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"(
CHECK: ROOT {{.*}} = f32[8,4,128]{2,1,0} fusion(%p{{.*}}), kind=kInput, calls=%fused_computation)");
}
TEST_F(PriorityFusionTest, EpilogueFusionFails) {
auto module = *ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add.13235 = f32[] add(p0, p1)
}
fused_computation.1 {
p0 = f32[28672,4096]{1,0} parameter(0)
c0 = f32[] constant(0)
ROOT r = f32[28672]{0} reduce(p0, c0), dimensions={1}, to_apply=add
}
fused_computation.2 {
p0 = f32[28672]{0} parameter(0)
p1 = f32[28672]{0} parameter(1)
ROOT a = f32[28672]{0} add(p0, p1)
}
ENTRY main {
p0 = f32[28672,4096]{1,0} parameter(0)
p1 = f32[28672]{0} parameter(1)
f = f32[28672]{0} fusion(p0), kind=kInput, calls=%fused_computation.1
ROOT fusion = f32[28672]{0} fusion(f,p1), kind=kLoop, calls=%fused_computation.2
})");
EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(false));
}
TEST_F(PriorityFusionTest, DoNotFuseIntoRoot) {
auto module = *ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY %main (p.0: u32[2], p.1: u32[]) -> u32[2] {
%p.0 = u32[2]{0} parameter(0)
%p.1 = u32[] parameter(1)
ROOT %broadcast = u32[2]{0} broadcast(u32[] %p.1), dimensions={}, sharding={replicated}
%add = u32[2]{0} add(u32[2]{0} %p.0, u32[2]{0} %broadcast)
%tuple.1 = (u32[2]{0}) tuple(u32[2]{0} %add)
%token.0 = token[] after-all()
%outfeed.6 = token[] outfeed((u32[2]{0}) %tuple.1, token[] %token.0), outfeed_shape=(u32[2]{0}), sharding={maximal device=0}
})");
EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(false));
}
TEST_F(PriorityFusionTest, DontFuseConcat) {
auto module = *ParseAndReturnVerifiedModule(R"(
HloModule module
%maximum (param_0: f32[], param_1: f32[]) -> f32[] {
%param_0 = f32[] parameter(0)
%param_1 = f32[] parameter(1)
ROOT %maximum = f32[] maximum(f32[] %param_0, f32[] %param_1)
}
%fused_concat (param_0: f32[1,4,401,8,8], param_1: f32[1,1,4,1023,8], param_2: bf16[1,4,1023,8,8]) -> f32[1,4,1424,8,8] {
%param_2 = bf16[1,4,1023,8,8]{4,3,2,1,0} parameter(2)
%convert = f32[1,4,1023,8,8]{4,3,2,1,0} convert(bf16[1,4,1023,8,8]{4,3,2,1,0} %param_2)
%param_1 = f32[1,1,4,1023,8]{4,3,2,1,0} parameter(1)
%bitcast = f32[4,1023,8]{2,1,0} bitcast(f32[1,1,4,1023,8]{4,3,2,1,0} %param_1)
%broadcast = f32[1,4,1023,8,8]{4,3,2,1,0} broadcast(f32[4,1023,8]{2,1,0} %bitcast), dimensions={1,2,4}
%add = f32[1,4,1023,8,8]{4,3,2,1,0} add(f32[1,4,1023,8,8]{4,3,2,1,0} %convert, f32[1,4,1023,8,8]{4,3,2,1,0} %broadcast)
%param_0 = f32[1,4,401,8,8]{4,3,2,1,0} parameter(0)
ROOT %concatenate = f32[1,4,1424,8,8]{4,3,2,1,0} concatenate(f32[1,4,1023,8,8]{4,3,2,1,0} %add, f32[1,4,401,8,8]{4,3,2,1,0} %param_0), dimensions={2}
}
%fused_reduce (param_0: f32[], param_1: f32[1,4,1424,8,8]) -> f32[4,8,8] {
%param_1 = f32[1,4,1424,8,8]{4,3,2,1,0} parameter(1)
%bitcast = f32[4,1424,8,8]{3,2,1,0} bitcast(f32[1,4,1424,8,8]{4,3,2,1,0} %param_1)
%param_0 = f32[] parameter(0)
ROOT %reduce = f32[4,8,8]{2,1,0} reduce(f32[4,1424,8,8]{3,2,1,0} %bitcast, f32[] %param_0), dimensions={1}, to_apply=%maximum
}
%fused_broadcast (param_0: f32[1,4,1424,8,8], param_1: f32[4,8,8]) -> f32[1,4,1424,8,8] {
%param_0 = f32[1,4,1424,8,8]{4,3,2,1,0} parameter(0)
%param_1 = f32[4,8,8]{2,1,0} parameter(1)
%broadcast = f32[1,4,1424,8,8]{4,3,2,1,0} broadcast(f32[4,8,8]{2,1,0} %param_1), dimensions={1,3,4}
ROOT %subtract = f32[1,4,1424,8,8]{4,3,2,1,0} subtract(f32[1,4,1424,8,8]{4,3,2,1,0} %param_0, f32[1,4,1424,8,8]{4,3,2,1,0} %broadcast)
}
ENTRY fusion {
%param_0 = f32[1,4,401,8,8]{4,3,2,1,0} parameter(0)
%param_1 = f32[1,1,4,1023,8]{4,3,2,1,0} parameter(1)
%param_2 = bf16[1,4,1023,8,8]{4,3,2,1,0} parameter(2)
%concat = f32[1,4,1424,8,8]{4,3,2,1,0} fusion(%param_0, %param_1, %param_2), kind=kLoop, calls=fused_concat
%param_3 = f32[] parameter(3)
%reduce = f32[4,8,8]{2,1,0} fusion(%param_3, %concat), kind=kLoop, calls=fused_reduce
%param_4 = f32[4,8 |
2,038 | cpp | tensorflow/tensorflow | command_buffer_scheduling | third_party/xla/xla/service/gpu/transforms/command_buffer_scheduling.cc | third_party/xla/xla/service/gpu/transforms/command_buffer_scheduling_test.cc | #ifndef XLA_SERVICE_GPU_COMMAND_BUFFER_SCHEDULING_H_
#define XLA_SERVICE_GPU_COMMAND_BUFFER_SCHEDULING_H_
#include <cstdint>
#include <memory>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/stream_executor/device_description.h"
namespace xla::gpu {
class CommandBufferScheduling : public HloModulePass {
public:
struct CommandBufferConfig {
absl::flat_hash_set<DebugOptions::CommandBufferCmdType> enabled_commands;
const se::DeviceDescription& device_description;
};
CommandBufferScheduling(const se::DeviceDescription& device_description,
int32_t gpu_toolkit_version,
int32_t gpu_driver_version);
absl::string_view name() const override {
return "command-buffer-scheduling";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
static std::vector<HloInstructionSequence> CollectCommandBufferSequences(
HloInstructionSequence schedule, const CommandBufferConfig& config,
int32_t min_num_commands = 1);
static absl::Status MoveParametersAndConstantsToFront(
HloComputation* computation);
struct CommandBuffer {
std::vector<HloInstruction*> arguments;
std::vector<HloInstruction*> results;
std::unique_ptr<HloComputation> computation;
absl::flat_hash_map<HloInstruction*, HloInstruction*> inst_mapping;
};
static absl::StatusOr<CommandBuffer> PrepareCommandBuffer(
const HloInstructionSequence& seq);
static absl::StatusOr<HloComputation*> RewriteCommandBuffer(
HloComputation* parent, const HloInstructionSequence& seq,
CommandBuffer command_buffer);
private:
se::DeviceDescription device_description_;
int32_t gpu_toolkit_version_;
int32_t gpu_driver_version_;
};
}
#endif
#include "xla/service/gpu/command_buffer_scheduling.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/ffi/ffi_api.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/variant_visitor.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
using CommandBuffer = CommandBufferScheduling::CommandBuffer;
using CommandBufferConfig = CommandBufferScheduling::CommandBufferConfig;
static bool IsCommand(const HloComputation* computation,
const CommandBufferConfig& config);
static bool IsConstant(const HloInstruction* hlo) {
return hlo->opcode() == HloOpcode::kConstant;
}
static bool IsParameter(const HloInstruction* hlo) {
return hlo->opcode() == HloOpcode::kParameter;
}
static bool IsNoOp(const HloInstruction* hlo) {
return HloPredicateIsOp<HloOpcode::kBitcast, HloOpcode::kTuple,
HloOpcode::kGetTupleElement>(hlo);
};
template <HloOpcode op>
static bool IsCommand(const HloInstruction*, const CommandBufferConfig&);
template <>
bool IsCommand<HloOpcode::kWhile>(const HloInstruction* hlo,
const CommandBufferConfig& config) {
return config.enabled_commands.contains(DebugOptions::CONDITIONALS) &&
IsCommand(hlo->while_body(), config) &&
IsCommand(hlo->while_condition(), config);
}
template <>
bool IsCommand<HloOpcode::kConditional>(const HloInstruction* hlo,
const CommandBufferConfig& config) {
return config.enabled_commands.contains(DebugOptions::CONDITIONALS) &&
absl::c_all_of(hlo->branch_computations(),
[&](const HloComputation* comp) {
return IsCommand(comp, config);
});
}
static bool IsCommand(const HloCustomCallInstruction* hlo,
const CommandBufferConfig& config) {
if (config.enabled_commands.contains(DebugOptions::CUBLAS) &&
IsLegacyCublasMatmul(*hlo)) {
return true;
}
if (config.enabled_commands.contains(DebugOptions::CUBLASLT) &&
(IsCublasLtMatmul(*hlo) || IsCublasLtMatmulF8(*hlo))) {
return true;
}
if (!config.enabled_commands.contains(DebugOptions::CUSTOM_CALL)) {
return false;
}
if (hlo->custom_call_target() == "triton_kernel_call" &&
!absl::StrContains(hlo->metadata().op_name(), "Autotuner")) {
return true;
}
auto registration = ffi::FindHandler(hlo->custom_call_target(), "gpu");
return registration.ok()
? ffi::IsCommandBufferCompatible(registration->traits)
: false;
}
static bool IsCommand(const HloInstruction* hlo,
const CommandBufferConfig& config) {
if (auto* fusion = DynCast<HloFusionInstruction>(hlo)) {
auto gpu_config = fusion->backend_config<GpuBackendConfig>();
const FusionBackendConfig& backend_config =
gpu_config->fusion_backend_config();
if (backend_config.kind() == kCuDnnFusionKind) {
return config.enabled_commands.contains(DebugOptions::CUDNN);
}
const auto& custom_config = backend_config.custom_fusion_config();
if (custom_config.name() == "address_computation") {
auto fusion_analysis =
HloFusionAnalysis::Create(fusion, &config.device_description);
const HloFusionAdaptor& adaptor = fusion_analysis.fusion();
auto custom_call_adaptor = HloFindIf(
adaptor.GetRoots(), adaptor,
[](auto node) { return node.opcode() == HloOpcode::kCustomCall; });
const auto* custom_call = static_cast<const HloCustomCallInstruction*>(
&custom_call_adaptor->instruction());
return IsCommand(custom_call, config);
}
if (custom_config.name() == "dynamic_address_computation") {
return false;
}
return config.enabled_commands.contains(DebugOptions::FUSION);
}
if (auto* sort = DynCast<HloSortInstruction>(hlo))
return config.enabled_commands.contains(DebugOptions::FUSION);
if (hlo->opcode() == HloOpcode::kPartitionId ||
hlo->opcode() == HloOpcode::kReplicaId) {
return config.enabled_commands.contains(DebugOptions::FUSION);
}
if (auto* custom_call = DynCast<HloCustomCallInstruction>(hlo))
return IsCommand(custom_call, config);
if (hlo->opcode() == HloOpcode::kWhile)
return IsCommand<HloOpcode::kWhile>(hlo, config);
if (hlo->opcode() == HloOpcode::kConditional)
return IsCommand<HloOpcode::kConditional>(hlo, config);
return false;
}
static bool IsAsyncStartCommand(const HloInstruction* hlo,
const CommandBufferConfig& config) {
if (hlo->opcode() == HloOpcode::kAllReduceStart ||
hlo->opcode() == HloOpcode::kAllGatherStart) {
return config.enabled_commands.contains(DebugOptions::COLLECTIVES);
}
if (hlo->opcode() == HloOpcode::kAsyncStart) {
if (hlo->async_wrapped_opcode() == HloOpcode::kReduceScatter) {
return config.enabled_commands.contains(DebugOptions::COLLECTIVES);
}
}
return false;
}
static bool IsAsyncDoneCommand(const HloInstruction* hlo,
const CommandBufferConfig& config) {
if (hlo->opcode() == HloOpcode::kAllReduceDone ||
hlo->opcode() == HloOpcode::kAllGatherDone) {
return config.enabled_commands.contains(DebugOptions::COLLECTIVES);
}
if (hlo->opcode() == HloOpcode::kAsyncDone) {
if (hlo->async_wrapped_opcode() == HloOpcode::kReduceScatter) {
return config.enabled_commands.contains(DebugOptions::COLLECTIVES);
}
}
return false;
}
static HloInstruction* FindAsyncDoneCommand(const HloInstruction* start) {
if (start->opcode() == HloOpcode::kAllReduceStart ||
start->opcode() == HloOpcode::kAllGatherStart) {
CHECK(start->users().size() == 1);
return start->users().front();
} else if (start->opcode() == HloOpcode::kAsyncStart) {
return start->async_chain_done();
}
return nullptr;
}
static bool IsCommand(const HloComputation* computation,
const CommandBufferConfig& config) {
return absl::c_all_of(
computation->instructions(), [&](const HloInstruction* inst) {
return IsNoOp(inst) || IsConstant(inst) || IsParameter(inst) ||
IsCommand(inst, config) || IsAsyncStartCommand(inst, config) ||
IsAsyncDoneCommand(inst, config);
});
}
static void RemoveTrailingNoOps(HloInstructionSequence& seq) {
std::vector<HloInstruction*> instructions = seq.instructions();
for (int i = instructions.size() - 1; i >= 0; i--) {
if (HloInstruction* inst = instructions[i]; IsNoOp(inst)) {
seq.remove_instruction(inst);
} else {
break;
}
}
}
std::vector<HloInstructionSequence>
CommandBufferScheduling::CollectCommandBufferSequences(
const HloInstructionSequence schedule, const CommandBufferConfig& config,
int32_t min_num_commands) {
std::vector<HloInstructionSequence> sequences;
HloInstructionSequence current_seq;
int64_t num_commands_in_current_seq = 0;
auto collect_current_seq = [&]() {
if (num_commands_in_current_seq >= std::max(1, min_num_commands)) {
RemoveTrailingNoOps(current_seq);
sequences.push_back(std::move(current_seq));
}
current_seq = HloInstructionSequence();
num_commands_in_current_seq = 0;
};
auto& instructions = schedule.instructions();
auto collect_async_region = [&](const HloInstruction* start) {
auto get_index = [&](const HloInstruction* inst) -> size_t {
auto it = std::find(instructions.begin(), instructions.end(), inst);
return std::distance(instructions.begin(), it);
};
HloInstructionSequence seq;
size_t done_index = get_index(FindAsyncDoneCommand(start));
for (size_t i = get_index(start); i <= done_index; i++) {
HloInstruction* inst = instructions.at(i);
if (IsAsyncStartCommand(inst, config)) {
const HloInstruction* done = FindAsyncDoneCommand(inst);
done_index = std::max(done_index, get_index(done));
}
seq.push_back(inst);
}
return seq;
};
auto check_async_region = [&](const HloInstructionSequence& seq) {
if (!absl::c_all_of(seq.instructions(), [&](HloInstruction* inst) {
return IsNoOp(inst) || IsCommand(inst, config) ||
IsAsyncStartCommand(inst, config) ||
IsAsyncDoneCommand(inst, config);
})) {
return false;
}
absl::flat_hash_set<HloInstruction*> done_instructions;
for (const HloInstruction* inst : seq.instructions()) {
if (IsAsyncStartCommand(inst, config)) {
done_instructions.insert(FindAsyncDoneCommand(inst));
}
if (IsAsyncDoneCommand(inst, config)) {
if (!done_instructions.contains(inst)) {
return false;
}
}
}
return true;
};
for (size_t i = 0; i < instructions.size(); i++) {
HloInstruction* inst = instructions.at(i);
if (IsNoOp(inst) && num_commands_in_current_seq) {
current_seq.push_back(inst);
continue;
}
if (IsCommand(inst, config)) {
num_commands_in_current_seq++;
current_seq.push_back(inst);
continue;
}
if (IsAsyncStartCommand(inst, config)) {
HloInstructionSequence seq = collect_async_region(inst);
if (check_async_region(seq)) {
num_commands_in_current_seq += seq.instructions().size();
for (HloInstruction* inst : seq.instructions()) {
current_seq.push_back(inst);
}
i += seq.instructions().size() - 1;
continue;
}
}
collect_current_seq();
}
collect_current_seq();
return sequences;
}
absl::Status CommandBufferScheduling::MoveParametersAndConstantsToFront(
HloComputation* computation) {
HloInstructionSequence new_sequence;
HloSchedule& schedule = computation->parent()->schedule();
HloInstructionSequence& sequence = schedule.GetOrCreateSequence(computation);
for (HloInstruction* inst : sequence.instructions()) {
if (IsParameter(inst) || IsConstant(inst)) {
new_sequence.push_back(inst);
for (HloInstruction* control_predecessor : inst->control_predecessors()) {
for (HloInstruction* user : inst->users()) {
TF_RETURN_IF_ERROR(control_predecessor->AddControlDependencyTo(user));
}
}
TF_RETURN_IF_ERROR(inst->DropAllControlDeps());
}
}
for (HloInstruction* inst : sequence.instructions()) {
if (!IsParameter(inst) && !IsConstant(inst)) {
new_sequence.push_back(inst);
}
}
schedule.set_sequence(computation, new_sequence);
return absl::OkStatus();
}
absl::StatusOr<CommandBuffer> CommandBufferScheduling::PrepareCommandBuffer(
const HloInstructionSequence& seq) {
auto builder = HloComputation::Builder("command_buffer");
absl::Span<HloInstruction* const> instructions =
absl::MakeSpan(seq.instructions());
absl::flat_hash_set<HloInstruction*> in_command_buffer(instructions.begin(),
instructions.end());
absl::flat_hash_map<HloInstruction*, HloParameterInstruction*> parameters;
absl::flat_hash_map<HloInstruction*, HloInstruction*> inst_mapping;
auto mapped_operands = [&](HloInstruction* instr) {
absl::InlinedVector<HloInstruction*, 4> operands;
for (HloInstruction* operand : instr->operands()) {
if (auto it = inst_mapping.find(operand); it != inst_mapping.end())
operands.push_back(it->second);
}
return operands;
};
for (HloInstruction* inst : instructions) {
for (HloInstruction* operand : inst->operands()) {
if (parameters.contains(operand)) continue;
if (in_command_buffer.contains(operand)) continue;
int64_t parameter_id = parameters.size();
auto* parameter = Cast<HloParameterInstruction>(builder.AddInstruction(
HloInstruction::CreateParameter(parameter_id, operand->shape(),
absl::StrCat("p", parameter_id))));
inst_mapping[operand] = parameters[operand] = parameter;
}
}
for (HloInstruction* inst : seq.instructions()) {
HloCloneContext ctx(inst->GetModule());
for (HloComputation* called_computation : inst->called_computations()) {
if (called_computation->IsAsyncComputation()) {
called_computation->RemoveAsyncStart();
}
ctx.MapComputation(called_computation, called_computation);
}
inst_mapping[inst] = builder.AddInstruction(
inst->CloneWithNewOperands(inst->shape(), mapped_operands(inst), &ctx));
}
std::vector<HloInstruction*> arguments(parameters.size());
for (auto& [argument, parameter] : parameters) {
arguments[parameter->parameter_number()] = argument;
}
std::vector<HloInstruction*> results;
std::vector<HloInstruction*> returned;
auto has_external_users = [&](HloInstruction* inst) {
return inst->IsRoot() || absl::c_any_of(inst->users(), [&](auto* user) {
return !in_command_buffer.contains(user);
});
};
for (HloInstruction* inst : instructions) {
if (has_external_users(inst)) {
results.push_back(inst);
returned.push_back(inst_mapping[inst]);
}
}
if (returned.size() > 1) {
builder.AddInstruction(HloInstruction::CreateTuple(returned));
}
return CommandBuffer{std::move(arguments), std::move(results),
builder.Build(), std::move(inst_mapping)};
}
absl::StatusOr<HloComputation*> CommandBufferScheduling::RewriteCommandBuffer(
HloComputation* parent, const HloInstructionSequence& seq,
CommandBuffer command_buffer) {
if (command_buffer.results.empty())
return absl::InternalError("command buffer results must not be empty");
Shape cmd_buffer_result_shape;
bool has_single_result = command_buffer.results.size() == 1;
if (has_single_result) {
cmd_buffer_result_shape = command_buffer.results[0]->shape();
} else {
absl::InlinedVector<Shape, 4> shapes;
shapes.reserve(command_buffer.results.size());
for (auto* res : command_buffer.results) shapes.push_back(res->shape());
cmd_buffer_result_shape = ShapeUtil::MakeTupleShape(shapes);
}
HloComputation* computation =
parent->parent()->AddComputationAndUnifyNamesAndIds(
std::move(command_buffer.computation),
false);
HloInstruction* call = parent->AddInstruction(HloInstruction::CreateCall(
cmd_buffer_result_shape, command_buffer.arguments, computation));
if (has_single_result) {
TF_RETURN_IF_ERROR(command_buffer.results[0]->ReplaceAllUsesWith(call));
} else {
for (int i = 0; i < command_buffer.results.size(); i++) {
TF_RETURN_IF_ERROR(
command_buffer.results[i]->ReplaceAllUsesWith(parent->AddInstruction(
HloInstruction::CreateGetTupleElement(call, i))));
}
}
HloSchedule& schedule = parent->parent()->schedule();
HloInstructionSequence& sequence = schedule.GetOrCreateSequence(parent);
sequence.replace_instruction(seq.instructions().back(), call);
HloInstructionSequence cmd_buffer_schedule;
for (auto* argument : command_buffer.arguments) {
cmd_buffer_schedule.push_back(command_buffer.inst_mapping[argument]);
}
for (auto* inst : seq.instructions()) {
cmd_buffer_schedule.push_back(command_buffer.inst_mapping[inst]);
}
if (!has_single_result) {
cmd_buffer_schedule.push_back(computation->root_instruction());
}
schedule.set_sequence(computation, cmd_buffer_schedule);
auto& inst_mapping = command_buffer.inst_mapping;
for (HloInstruction* inst : seq.instructions()) {
HloInstruction* cmd_inst = inst_mapping[inst];
for (HloInstruction* predecessor : inst->control_predecessors()) {
if (auto it = inst_mapping.find(predecessor); it != inst_mapping.end()) {
HloInstruction* cmd_predecessor = it->second;
if (IsParameter(cmd_predecessor)) {
TF_RETURN_IF_ERROR(predecessor->AddControlDependencyTo(call));
} else {
TF_RETURN_IF_ERROR(cmd_predecessor->AddControlDependencyTo(cmd_inst));
}
} else {
TF_RETURN_IF_ERROR(predecessor->AddControlDependencyTo(call));
}
}
for (HloInstruction* successor : inst->control_successors()) {
if (auto it = inst_mapping.find(successor); it != inst_mapping.end()) {
HloInstruction* cmd_successor = it->second;
TF_RETURN_IF_ERROR(cmd_inst->AddControlDependencyTo(cmd_successor));
} else {
TF_RETURN_IF_ERROR(call->AddControlDependencyTo(successor));
}
}
TF_RETURN_IF_ERROR(inst->DropAllControlDeps());
}
for (int32_t i = seq.instructions().size() - 1; i >= 0; i--) {
TF_RETURN_IF_ERROR(parent->RemoveInstruction(seq.instructions()[i]));
}
return computation;
}
CommandBufferScheduling::CommandBufferScheduling(
const se::DeviceDescription& device_description,
int32_t gpu_toolkit_version, int32_t gpu_driver_version)
: device_description_(device_description),
gpu_toolkit_versi | #include "xla/service/gpu/command_buffer_scheduling.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/hlo_parser.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
class CommandBufferSchedulingTest : public HloTestBase {
public:
static constexpr int32_t kCudaVersion = 12030;
const se::DeviceDescription& device_desc() {
return backend().default_stream_executor()->GetDeviceDescription();
}
DebugOptions GetDebugOptionsForTest() override {
auto debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.add_xla_gpu_enable_command_buffer(DebugOptions::FUSION);
debug_options.add_xla_gpu_enable_command_buffer(DebugOptions::CONDITIONALS);
debug_options.add_xla_gpu_enable_command_buffer(DebugOptions::COLLECTIVES);
debug_options.add_xla_gpu_enable_command_buffer(DebugOptions::CUDNN);
debug_options.set_xla_gpu_graph_min_graph_size(2);
return debug_options;
}
};
using CommandBuffer = CommandBufferScheduling::CommandBuffer;
TEST_F(CommandBufferSchedulingTest, SingleCommandBuffer) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%fused_computation (param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.1 (param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
ENTRY %main (a: s32[], b: s32[]) -> s32[] {
%a = s32[] parameter(0)
%b = s32[] parameter(1)
%fusion = s32[] fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation
%fusion.1 = s32[] fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation.1
ROOT %custom-call = s32[] custom-call(s32[] %fusion, s32[] %fusion.1), custom_call_target="some target"
})";
const char* expected = R"(
RunAndFilecheckHloRewrite(
hlo, CommandBufferScheduling(device_desc(), kCudaVersion, kCudaVersion),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, MultipleCommandBuffers) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%fused_computation(param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.1(param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.2(param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.3(param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
ENTRY %main (a: s32[], b: s32[], c: (s32[], s32[])) -> s32[] {
%a = s32[] parameter(0)
%b = s32[] parameter(1)
%c = (s32[], s32[]) parameter(2)
%fusion = s32[] fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation
%d = s32[] get-tuple-element((s32[], s32[]) %c), index=0
%fusion.1 = s32[] fusion(s32[] %fusion, s32[] %d), kind=kLoop, calls=%fused_computation.1
%e = s32[] get-tuple-element((s32[], s32[]) %c), index=1
%custom-call = s32[] custom-call(s32[] %fusion.1, s32[] %e), custom_call_target="some target"
%fusion.2 = s32[] fusion(s32[] %custom-call, s32[] %a), kind=kLoop, calls=%fused_computation.2
%fusion.3 = s32[] fusion(s32[] %custom-call, s32[] %fusion.2), kind=kLoop, calls=%fused_computation.3
ROOT %custom-call.1 = s32[] custom-call(s32[] %fusion.3), custom_call_target="some target"
})";
const char* expected = R"(
RunAndFilecheckHloRewrite(
hlo, CommandBufferScheduling(device_desc(), kCudaVersion, kCudaVersion),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, AllReduceStartFollowedByDone) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%add (p0: s32[4], p1: s32[4]) -> s32[4] {
%p0 = s32[4] parameter(0)
%p1 = s32[4] parameter(1)
ROOT %add = s32[4] add(s32[4] %p0, s32[4] %p1)
}
ENTRY %main (a: s32[4]) -> s32[4] {
%a = s32[4] parameter(0)
%start = s32[4]{0} all-reduce-start(s32[4]{0} %a),
replica_groups={{0,1}}, to_apply=%add,
backend_config={"collective_backend_config": {"is_sync":true,"no_parallel_custom_call":false}}
ROOT %done = s32[4]{0} all-reduce-done(s32[4]{0} %start)
})";
const char* expected = R"(
CHECK: %command_buffer ([[P0:.+]]: s32[4]) -> s32[4] {
CHECK: %[[P0]] = s32[4]{0} parameter(0)
CHECK: %[[START:.+]] = s32[4]{0} all-reduce-start(%[[P0]])
CHECK: ROOT %[[DONE:.+]] = s32[4]{0} all-reduce-done(%[[START]])
CHECK: }
CHECK: ENTRY %main (a: s32[4]) -> s32[4] {
CHECK: %[[A:.+]] = s32[4]{0} parameter(0)
CHECK: ROOT %[[CALL:.+]] = s32[4]{0} call(%[[A]]),
CHECK: to_apply=%command_buffer
CHECK: })";
RunAndFilecheckHloRewrite(
hlo, CommandBufferScheduling(device_desc(), kCudaVersion, kCudaVersion),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, AllGatherStartFollowedByDone) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
ENTRY %main (a: s32[2]) -> s32[4] {
%a = s32[2] parameter(0)
%start = (s32[2]{0}, s32[4]{0}) all-gather-start(%a),
channel_id=555, replica_groups={{0,1}}, dimensions={0},
backend_config={"collective_backend_config": {"is_sync":true,"no_parallel_custom_call":false}}
ROOT %done = s32[4]{0} all-gather-done(%start)
})";
const char* expected = R"(
CHECK: %command_buffer ([[P0:.+]]: s32[2]) -> s32[4] {
CHECK: %[[P0]] = s32[2]{0} parameter(0)
CHECK: %[[START:.+]] = {{.*}} all-gather-start(%[[P0]])
CHECK: ROOT %[[DONE:.+]] = s32[4]{0} all-gather-done(%[[START]])
CHECK: }
CHECK: ENTRY %main (a: s32[2]) -> s32[4] {
CHECK: %[[A:.+]] = s32[2]{0} parameter(0)
CHECK: ROOT %[[CALL:.+]] = s32[4]{0} call(%[[A]]),
CHECK: to_apply=%command_buffer
CHECK: })";
RunAndFilecheckHloRewrite(
hlo, CommandBufferScheduling(device_desc(), kCudaVersion, kCudaVersion),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, ReduceScatterStartFollowedByDone) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%add (p0: s32[], p1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
ENTRY %main (a: s32[4]) -> s32[2] {
%a = s32[4] parameter(0)
%start = ((s32[4]{0}), s32[2]{0}) reduce-scatter-start(%a),
channel_id=555, replica_groups={{0,1}}, dimensions={0}, to_apply=add,
backend_config={"collective_backend_config": {"is_sync":true,"no_parallel_custom_call":false}}
ROOT %done = s32[2]{0} reduce-scatter-done(%start)
})";
const char* expected = R"(
CHECK: %command_buffer ([[P0:.+]]: s32[4]) -> s32[2] {
CHECK: %[[P0]] = s32[4]{0} parameter(0)
CHECK: %[[START:.+]] = {{.*}} reduce-scatter-start(%[[P0]])
CHECK: ROOT %[[DONE:.+]] = s32[2]{0} reduce-scatter-done(%[[START]])
CHECK: }
CHECK: ENTRY %main (a: s32[4]) -> s32[2] {
CHECK: %[[A:.+]] = s32[4]{0} parameter(0)
CHECK: ROOT %[[CALL:.+]] = s32[2]{0} call(%[[A]]),
CHECK: to_apply=%command_buffer
CHECK: })";
RunAndFilecheckHloRewrite(
hlo, CommandBufferScheduling(device_desc(), kCudaVersion, kCudaVersion),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, AllReduceStartFollowedByBitcast) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%add (p0: s32[4], p1: s32[4]) -> s32[4] {
%p0 = s32[4] parameter(0)
%p1 = s32[4] parameter(1)
ROOT %add = s32[4] add(s32[4] %p0, s32[4] %p1)
}
ENTRY %main (a: s32[4]) -> s32[4] {
%a = s32[4] parameter(0)
%start = s32[4]{0} all-reduce-start(s32[4]{0} %a),
replica_groups={{0,1}}, to_apply=%add,
backend_config={"collective_backend_config": {"is_sync":true,"no_parallel_custom_call":false}}
%bitcast = s32[4] bitcast(s32[4]{0} %a)
ROOT %done = s32[4]{0} all-reduce-done(s32[4]{0} %start)
})";
const char* expected = R"(
CHECK: %command_buffer ([[P0:.+]]: s32[4]) -> s32[4] {
CHECK: %[[P0]] = s32[4]{0} parameter(0)
CHECK: %[[START:.+]] = s32[4]{0} all-reduce-start(%[[P0]])
CHECK: %[[BITCAST:.+]] = s32[4]{0} bitcast(%[[P0]])
CHECK: ROOT %[[DONE:.+]] = s32[4]{0} all-reduce-done(%[[START]])
CHECK: }
CHECK: ENTRY %main (a: s32[4]) -> s32[4] {
CHECK: %[[A:.+]] = s32[4]{0} parameter(0)
CHECK: ROOT %[[CALL:.+]] = s32[4]{0} call(%[[A]]),
CHECK: to_apply=%command_buffer
CHECK: })";
RunAndFilecheckHloRewrite(
hlo, CommandBufferScheduling(device_desc(), kCudaVersion, kCudaVersion),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, AllReduceStartFollowedAllReduceStart) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%add (p0: s32[4], p1: s32[4]) -> s32[4] {
%p0 = s32[4] parameter(0)
%p1 = s32[4] parameter(1)
ROOT %add = s32[4] add(s32[4] %p0, s32[4] %p1)
}
ENTRY %main (a: s32[4]) -> s32[4] {
%a = s32[4] parameter(0)
%start1 = s32[4]{0} all-reduce-start(s32[4]{0} %a),
replica_groups={{0,1}}, to_apply=%add,
backend_config={"collective_backend_config": {"is_sync":true,"no_parallel_custom_call":false}}
%start2 = s32[4]{0} all-reduce-start(s32[4]{0} %a),
replica_groups={{0,1}}, to_apply=%add,
backend_config={"collective_backend_config": {"is_sync":true,"no_parallel_custom_call":false}}
%done1 = s32[4]{0} all-reduce-done(s32[4]{0} %start1)
ROOT %done2 = s32[4]{0} all-reduce-done(s32[4]{0} %start2)
})";
const char* expected = R"(
CHECK: %command_buffer ([[P0:.+]]: s32[4]) -> s32[4] {
CHECK: %[[P0]] = s32[4]{0} parameter(0)
CHECK: %[[START1:.+]] = s32[4]{0} all-reduce-start(%[[P0]])
CHECK: %[[START2:.+]] = s32[4]{0} all-reduce-start(%[[P0]])
CHECK: %[[DONE1:.+]] = s32[4]{0} all-reduce-done(%[[START1]])
CHECK: ROOT %[[DONE2:.+]] = s32[4]{0} all-reduce-done(%[[START2]])
CHECK: }
CHECK: ENTRY %main (a: s32[4]) -> s32[4] {
CHECK: %[[A:.+]] = s32[4]{0} parameter(0)
CHECK: ROOT %[[CALL:.+]] = s32[4]{0} call(%[[A]]),
CHECK: to_apply=%command_buffer
CHECK: })";
RunAndFilecheckHloRewrite(
hlo, CommandBufferScheduling(device_desc(), kCudaVersion, kCudaVersion),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, DoNotCaptureUnmatchedAsyncDone) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%fused_computation(param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.1(param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%add (p0: s32[4], p1: s32[4]) -> s32[4] {
%p0 = s32[4] parameter(0)
%p1 = s32[4] parameter(1)
ROOT %add = s32[4] add(s32[4] %p0, s32[4] %p1)
}
ENTRY %main (a: s32[4], b:s32[]) -> s32[] {
%a = s32[4] parameter(0)
%b = s32[] parameter(1)
%start1 = s32[4]{0} all-reduce-start(s32[4]{0} %a),
replica_groups={{0,1}}, to_apply=%add,
backend_config={"collective_backend_config": {"is_sync":true,"no_parallel_custom_call":false}}
%c = s32[] custom-call(), custom_call_target="target"
%start2 = s32[4]{0} all-reduce-start(s32[4]{0} %a),
replica_groups={{0,1}}, to_apply=%add,
backend_config={"collective_backend_config": {"is_sync":true,"no_parallel_custom_call":false}}
%done1 = s32[4]{0} all-reduce-done(s32[4]{0} %start1)
%done2 = s32[4]{0} all-reduce-done(s32[4]{0} %start2)
%fusion = s32[] fusion(s32[] %b, s32[] %c), kind=kLoop, calls=%fused_computation
ROOT %fusion.1 = s32[] fusion(s32[] %b, s32[] %c), kind=kLoop, calls=%fused_computation.1
})";
const char* expected = R"(
CHECK: %command_buffer ([[P0:.+]]: s32[], [[P1:.+]]: s32[]) -> s32[] {
CHECK: %[[P0]] = s32[] parameter(0)
CHECK: %[[P1]] = s32[] parameter(1)
CHECK: %fusion.2 = s32[] fusion(%[[P0]], %[[P1]]), kind=kLoop, calls=%fused_computation
CHECK: ROOT %fusion.3 = s32[] fusion(%[[P0]], %[[P1]]), kind=kLoop, calls=%fused_computation.1
CHECK: }
CHECK: ENTRY %main (a: s32[4], b: s32[]) -> s32[] {
CHECK: %[[A:.+]] = s32[4]{0} parameter(0)
CHECK: %[[B:.+]] = s32[] parameter(1)
CHECK: %[[START1:.+]] = s32[4]{0} all-reduce-start(%[[A]])
CHECK: %[[C:.+]] = s32[] custom-call()
CHECK: %[[START2:.+]] = s32[4]{0} all-reduce-start(%[[A]])
CHECK: %[[DONE1:.+]] = s32[4]{0} all-reduce-done(%[[START1]])
CHECK: %[[DONE2:.+]] = s32[4]{0} all-reduce-done(%[[START2]])
CHECK: %call = s32[] call(%b, %c), to_apply=%command_buffer
CHECK: })";
RunAndFilecheckHloRewrite(
hlo, CommandBufferScheduling(device_desc(), kCudaVersion, kCudaVersion),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, CollectCommandBufferSequence) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%fused_computation(param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.1(param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.2(param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.3(param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
ENTRY %main (a: s32[], b: s32[], c: (s32[], s32[])) -> s32[] {
%a = s32[] parameter(0)
%b = s32[] parameter(1)
%c = (s32[], s32[]) parameter(2)
%fusion = s32[] fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation
%d = s32[] get-tuple-element((s32[], s32[]) %c), index=0
%fusion.1 = s32[] fusion(s32[] %fusion, s32[] %d), kind=kLoop, calls=%fused_computation.1
%e = s32[] get-tuple-element((s32[], s32[]) %c), index=1
%custom-call = s32[] custom-call(s32[] %fusion.1, s32[] %e), custom_call_target="some target"
%fusion.2 = s32[] fusion(s32[] %custom-call, s32[] %a), kind=kLoop, calls=%fused_computation.2
ROOT %fusion.3 = s32[] fusion(s32[] %custom-call, s32[] %fusion.2), kind=kLoop, calls=%fused_computation.3
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstructionSequence seq;
for (HloInstruction* x : module->entry_computation()->instructions()) {
seq.push_back(x);
}
EXPECT_EQ(seq.size(), 10);
CommandBufferScheduling::CommandBufferConfig config{{DebugOptions::FUSION},
device_desc()};
std::vector<HloInstructionSequence> command_buffer_sequences =
CommandBufferScheduling::CollectCommandBufferSequences(seq, config);
EXPECT_EQ(command_buffer_sequences.size(), 2);
std::vector<HloInstruction*> seq_0 =
command_buffer_sequences[0].instructions();
EXPECT_EQ(seq_0.size(), 3);
EXPECT_EQ(seq_0[0]->opcode(), HloOpcode::kFusion);
EXPECT_EQ(seq_0[1]->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(seq_0[2]->opcode(), HloOpcode::kFusion);
std::vector<HloInstruction*> seq_1 =
command_buffer_sequences[1].instructions();
EXPECT_EQ(seq_1.size(), 2);
EXPECT_EQ(seq_1[0]->opcode(), HloOpcode::kFusion);
EXPECT_EQ(seq_1[1]->opcode(), HloOpcode::kFusion);
}
TEST_F(CommandBufferSchedulingTest, MoveParametersToFront) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%fused_computation (param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.1 (param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
ENTRY %main (a: s32[], b: s32[], c: s32[]) -> s32[] {
%a = s32[] parameter(0)
%b = s32[] parameter(1)
%fusion = s32[] fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation
%c = s32[] parameter(2)
ROOT %fusion.1 = s32[] fusion(s32[] %a, s32[] %c), kind=kLoop, calls=%fused_computation.1
})";
const char* expected = R"(
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo));
TF_ASSERT_OK(CommandBufferScheduling::MoveParametersAndConstantsToFront(
module->entry_computation()));
TF_ASSERT_OK_AND_ASSIGN(
bool filecheck_matches,
RunFileCheck(
module->ToString(HloPrintOptions{}.set_print_operand_shape(false)),
expected));
EXPECT_TRUE(filecheck_matches);
}
TEST_F(CommandBufferSchedulingTest, PrepareCommandBuffer) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%fused_computation(param_0: s32[], param_1: s32[]) -> (s32[], s32[]) {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %tuple = (s32[], s32[]) tuple(s32[] %p0, s32[] %p1)
}
%fused_computation.1(param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
ENTRY %main (a: s32[], b: s32[]) -> s32[] {
%a = s32[] parameter(0)
%b = s32[] custom-call(), custom_call_target="target"
%fusion = (s32[], s32[]) fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation
%d = s32[] get-tuple-element((s32[], s32[]) %fusion), index=0
%fusion.1 = s32[] fusion(s32[] %a, s32[] %d), kind=kLoop, calls=%fused_computation.1
ROOT %custom-call = s32[] custom-call(s32[] %fusion.1, s32[] %d), custom_call_target="some target"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(hlo));
EXPECT_EQ(module->entry_computation()->instruction_count(), 6);
std::vector<HloInstruction*> instructions;
HloInstructionSequence seq;
for (HloInstruction* inst : module->entry_computation()->instructions()) {
if (inst->opcode() == HloOpcode::kFusion ||
inst->opcode() == HloOpcode::kGetTupleElement) {
seq.push_back(inst);
}
instructions.push_back(inst);
}
TF_ASSERT_OK_AND_ASSIGN(CommandBuffer command_buffer,
CommandBufferScheduling::PrepareCommandBuffer(seq));
HloComputation* computation = module->AddComputationAndUnifyNamesAndIds(
std::move(command_buffer.computation), false);
const char* expected = R"(
TF_ASSERT_OK_AND_ASSIGN(
bool filecheck_matches,
RunFileCheck(computation->ToString(
HloPrintOptions{}.set_print_operand_shape(false)),
expected));
EXPECT_TRUE(filecheck_matches);
auto& arguments = command_buffer.arguments;
ASSERT_EQ(arguments.size(), 2);
EXPECT_EQ(arguments[0], instructions[0]);
EXPECT_EQ(arguments[1], instructions[1]);
auto& results = command_buffer.results;
ASSERT_EQ(results.size(), 2);
EXPECT_EQ(results[0], instructions[3]);
EXPECT_EQ(results[1], instructions[4]);
}
TEST_F(CommandBufferSchedulingTest, ForwardControlDependencies) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%fused_computation (param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.1 (param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.2 (param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
ENTRY %main (a: s32[], b: s32[]) -> s32[] {
%a = s32[] parameter(0)
%b = s32[] parameter(1)
%custom-call = s32[] custom-call(), custom_call_target="some target"
%fusion = s32[] fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation, control-predecessors={%custom-call}
%fusion.1 = s32[] fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation.1, control-predecessors={%fusion}
%custom-call.1 = s32[] custom-call(), custom_call_target="some target"
%fusion.2 = s32[] fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation.2, control-predecessors={%fusion.1}
ROOT %custom-call.2 = s32[] custom-call(s32[] %fusion.1, s32[] %fusion.2), custom_call_target="some target"
})";
const char* expected = R"(
CHECK: %command_buffer ([[P0:.+]]: s32[], [[P1:.+]]: s32[]) -> s32[] {
CHECK: %[[P0]] = s32[] parameter(0)
CHECK: %[[P1]] = s32[] parameter(1)
CHECK: %[[F0:.+]] = s32[] fusion(%[[P0]], %[[P1]])
CHECK: ROOT {{.*}} = s32[] fusion(%[[P0]], %[[P1]]), {{.*}} control-predecessors={%[[F0]]}
CHECK: }
CHECK: ENTRY %main (a: s32[], b: s32[]) -> s32[] {
CHECK: %a = s32[] parameter(0)
CHECK: %b = s32[] parameter(1)
CHECK: %custom-call = s32[] custom-call(), custom_call_target="some target"
CHECK: %call = s32[] call(%a, %b), to_apply=%command_buffer, control-predecessors={%custom-call}
CHECK: %custom-call.1 = s32[] custom-call(), custom_call_target="some target"
CHECK: %[[F3:.+]] = s32[] fusion(%a, %b), kind=kLoop, calls=%fused_computation.2, control-predecessors={%call}
CHECK: ROOT %custom-call.2 = s32[] custom-call(%call, %[[F3]]), custom_call_target="some target"
CHECK: })";
RunAndFilecheckHloRewrite(
hlo, CommandBufferScheduling(device_desc(), kCudaVersion, kCudaVersion),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, ForwardControlDependenciesToParams) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%fused_computation.0 (p0: s32[], p1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.1 (p0: s32[], p1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
ENTRY %main (a: s32[], b: s32[]) -> s32[] {
%a = s32[] parameter(0)
%b = s32[] parameter(1)
%custom-call = s32[] custom-call(), custom_call_target="some target"
%fusion = s32[] fusion(s32[] %custom-call, s32[] %a), kind=kLoop, calls=%fused_computation.0, control-predecessors={%custom-call}
ROOT %fusion.1 = s32[] fusion(s32[] %fusion, s32[] %b), kind=kLoop, calls=%fused_computation.1
})";
const char* expected = R"(
CHECK: ENTRY %main (a: s32[], b: s32[]) -> s32[] {
CHECK: %a = s32[] parameter(0)
CHECK: %b = s32[] parameter(1)
CHECK: %[[CUSTOM_CALL:.+]] = s32[] custom-call(), custom_call_target="some target"
CHECK: ROOT {{.*}} call(%[[CUSTOM_CALL]], %a, %b), to_apply=%command_buffer, control-predecessors={%[[CUSTOM_CALL]]}
CHECK: })";
RunAndFilecheckHloRewrite(
hlo, CommandBufferScheduling(device_desc(), kCudaVersion, kCudaVersion),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, WhileNotCommand) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%fused_computation (param_0: f32[1]) -> f32[1] {
%param_0 = f32[1]{0} parameter(0)
ROOT %copy.5 = f32[1]{0} copy(f32[1]{0} %param_0)
}
%fused_computation.1 (param_0.1: f32[1], param_1: f32[1]) -> f32[1] {
%param_0.1 = f32[1]{0} parameter(0)
%param_1 = f32[1]{0} parameter(1)
ROOT %add.2 = f32[1]{0} add(f32[1]{0} %param_0.1, f32[1]{0} %param_1)
}
%fused_computation.2 (param_0.2: f32[1], param_1.1: f32[1]) -> pred[1] {
%param_0.2 = f32[1]{0} parameter(0)
%param_1.1 = f32[1]{0} parameter(1)
ROOT %compare.3 = pred[1]{0} compare(f32[1]{0} %param_0.2, f32[1]{0} %param_1.1), direction=LT
}
%fused_computation.3 (param_0.1: f32[1], param_1: f32[1]) -> f32[1] {
%param_0.1 = f32[1]{0} parameter(0)
%param_1 = f32[1]{0} parameter(1)
ROOT %add.2 = f32[1]{0} add(f32[1]{0} %param_0.1, f32[1]{0} %param_1)
}
%body (Arg_.3: f32[1]) -> f32[1] {
%constant_4 = f32[1]{0} constant({1})
%Arg_.3 = f32[1]{0} parameter(0)
%custom-call = s32[] custom-call(), custom_call_target="some target"
%add = f32[1]{0} fusion(f32[1]{0} %Arg_.3, f32[1]{0} %constant_4), kind=kLoop, calls=%fused_computation.1, control-predecessors={%custom-call}
ROOT %wrapped_add.1 = f32[1]{0} fusion(f32[1]{0} %add, f32[1]{0} %constant_4), kind=kLoop, calls=%fused_computation.3, control-predecessors={%custom-call}
}
%cond (Arg_.11: f32[1]) -> pred[] {
%constant = f32[1]{0} constant({100})
%Arg_.11 = f32[1]{0} parameter(0)
%wrapped_compare.2 = pred[1]{0} fusion(f32[1]{0} %Arg_.11, f32[1]{0} %constant), kind=kLoop, calls=%fused_computation.2
ROOT %bitcast = pred[] bitcast(pred[1]{0} %wrapped_compare.2)
}
ENTRY %main.18 (Arg_0.1: f32[1]) -> f32[] {
%Arg_0.1 = f32[1]{0} parameter(0), sharding={replicated}
%wrapped_copy.4 = f32[1]{0} fusion(f32[1]{0} %Arg_0.1), kind=kLoop, calls=%fused_computation
%while.16 = f32[1]{0} while(f32[1]{0} %wrapped_copy.4), condition=%cond, body=%body
ROOT %bitcast.1 = f32[] bitcast(f32[1]{0} %while.16)
})";
const char* expected = R"(
CHECK: %command_buffer ([[P0:.+]]: f32[1], [[P1:.+]]: f32[1]) -> f32[1] {
CHECK: %[[P0]] = f32[1]{0} parameter(0)
CHECK: %[[P1]] = f32[1]{0} parameter(1)
CHECK: %[[ADD:.*]] = f32[1]{0} fusion(%[[P0]], %[[P1]]), kind=kLoop
CHECK: ROOT {{.*}} = f32[1]{0} fusion(%[[ADD]], %[[P1]]), kind=kLoop
CHECK: }
CHECK: %[[BODY:[a-z_0-9.]+]] ([[P0:.+]]: f32[1]) -> f32[1] {
CHECK: %[[C1:.*]] = f32[1]{0} constant({1})
CHECK: %[[P0]] = f32[1]{0} parameter(0)
CHE |
2,039 | cpp | tensorflow/tensorflow | cudnn_pad_for_convolutions | third_party/xla/xla/service/gpu/transforms/cudnn_pad_for_convolutions.cc | third_party/xla/xla/service/gpu/transforms/cudnn_pad_for_convolutions_test.cc | #ifndef XLA_SERVICE_GPU_CUDNN_PAD_FOR_CONVOLUTIONS_H_
#define XLA_SERVICE_GPU_CUDNN_PAD_FOR_CONVOLUTIONS_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
namespace xla {
namespace gpu {
class CudnnPadForConvolutions : public HloModulePass {
public:
explicit CudnnPadForConvolutions(se::CudaComputeCapability compute_capability)
: compute_capability_(compute_capability) {}
absl::string_view name() const override {
return "cudnn_pad_for_convolutions";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
const se::CudaComputeCapability compute_capability_;
};
}
}
#endif
#include "xla/service/gpu/cudnn_pad_for_convolutions.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/functional/bind_front.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/literal_util.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/cudnn_support_utils.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
static HloInstruction* PadInstruction(HloInstruction* instr,
const Shape& new_shape) {
HloComputation* comp = instr->parent();
const Shape& shape = instr->shape();
PaddingConfig pad_config = MakeNoPaddingConfig(shape.rank());
bool added_padding = false;
for (int64_t dim = 0; dim < shape.rank(); ++dim) {
if (shape.dimensions(dim) == new_shape.dimensions(dim)) {
continue;
}
CHECK_GT(new_shape.dimensions(dim), shape.dimensions(dim));
pad_config.mutable_dimensions(dim)->set_edge_padding_high(
new_shape.dimensions(dim) - shape.dimensions(dim));
added_padding = true;
}
if (!added_padding) {
return instr;
}
auto* zero = comp->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(shape.element_type())));
return comp->AddInstruction(
HloInstruction::CreatePad(new_shape, instr, zero, pad_config),
&instr->metadata());
}
static absl::Status PadConv(HloCustomCallInstruction* conv,
absl::Span<const Shape> new_input_shapes,
const Shape& new_result_shape) {
CHECK_EQ(0, conv->shape().tuple_shapes(1).dimensions(0))
<< "conv must use 0 scratch bytes, i.e. this pass must be run "
"before CudnnConvAlgorithmPicker.";
std::vector<HloInstruction*> new_operands;
new_operands.reserve(conv->operand_count());
for (int i = 0; i < conv->operand_count(); ++i) {
new_operands.push_back(
PadInstruction(conv->mutable_operand(i), new_input_shapes[i]));
}
const Shape& result_shape = conv->shape().tuple_shapes(0);
bool changed = false;
for (int i = 0; i < conv->operand_count(); ++i) {
changed |= (new_operands[i] != conv->mutable_operand(i));
}
CHECK(changed) << "We should have had to pad at least one input operand.";
auto add = [&](std::unique_ptr<HloInstruction> new_instr) {
return conv->parent()->AddInstruction(std::move(new_instr));
};
Shape new_conv_shape = ShapeUtil::MakeTupleShape(
{new_result_shape, ShapeUtil::MakeShape(U8, {0})});
auto* new_conv =
add(conv->CloneWithNewOperands(new_conv_shape, new_operands));
new_conv->SetAndSanitizeName(conv->name());
VLOG(2) << "Padded features of " << conv->ToString() << ", replaced with "
<< new_conv->ToString();
if (!ShapeUtil::Equal(result_shape, new_result_shape)) {
std::vector<int64_t> start_indices(result_shape.dimensions_size(), 0);
std::vector<int64_t> end_indices(result_shape.dimensions().begin(),
result_shape.dimensions().end());
std::vector<int64_t> strides(result_shape.dimensions_size(), 1);
auto* new_conv_result = add(
HloInstruction::CreateGetTupleElement(new_result_shape, new_conv, 0));
auto* empty_temp_buffer =
add(HloInstruction::CreateConstant(LiteralUtil::CreateR1<uint8_t>({})));
auto* sliced_result = add(HloInstruction::CreateSlice(
result_shape, new_conv_result, start_indices, end_indices, strides));
new_conv =
add(HloInstruction::CreateTuple({sliced_result, empty_temp_buffer}));
}
return conv->parent()->ReplaceInstruction(conv, new_conv);
}
static std::vector<HloCustomCallInstruction*> GetRelevantConvs(
HloComputation* comp) {
std::vector<HloCustomCallInstruction*> convs;
for (HloInstruction* instr : comp->instructions()) {
if (IsCustomCallToDnnConvolution(*instr)) {
convs.push_back(Cast<HloCustomCallInstruction>(instr));
}
}
return convs;
}
static absl::StatusOr<bool> ResolveAndPad(
HloCustomCallInstruction* conv,
std::function<absl::StatusOr<bool>(HloCustomCallInstruction* conv,
std::vector<Shape>* new_input_shapes,
Shape* new_result_shape)>
resolve_pad_shapes) {
std::vector<Shape> new_input_shapes;
Shape new_result_shape;
TF_ASSIGN_OR_RETURN(bool result, resolve_pad_shapes(conv, &new_input_shapes,
&new_result_shape));
if (result) {
TF_RETURN_IF_ERROR(PadConv(conv, new_input_shapes, new_result_shape));
return true;
}
return false;
}
static absl::StatusOr<bool> TryResolvePaddedShapesForTensorCore(
HloCustomCallInstruction* conv, std::vector<Shape>* new_input_shapes_ptr,
Shape* new_result_shape_ptr) {
TF_ASSIGN_OR_RETURN(auto kind, GetCudnnConvKind(conv));
const auto& dnums = conv->convolution_dimension_numbers();
auto* lhs = conv->mutable_operand(0);
auto* rhs = conv->mutable_operand(1);
const Shape& result_shape = conv->shape().tuple_shapes(0);
if (result_shape.element_type() != PrimitiveType::F16) {
return false;
}
if (conv->feature_group_count() > 1 || conv->batch_group_count() > 1) {
VLOG(2) << "Do not pad grouped convolution.";
return false;
}
if (kind == CudnnConvKind::kForwardActivation) {
return false;
}
Shape new_lhs_shape = lhs->shape();
Shape new_rhs_shape = rhs->shape();
Shape& new_result_shape = *new_result_shape_ptr;
new_result_shape = conv->shape().tuple_shapes(0);
Shape* new_input_shape;
Shape* new_filter_shape;
Shape* new_output_shape;
std::tie(new_input_shape, new_filter_shape, new_output_shape) = [&] {
switch (kind) {
case CudnnConvKind::kForward:
case CudnnConvKind::kForwardActivation:
case CudnnConvKind::kForwardGraph:
return std::make_tuple(&new_lhs_shape, &new_rhs_shape,
&new_result_shape);
case CudnnConvKind::kBackwardInput:
return std::make_tuple(&new_result_shape, &new_rhs_shape,
&new_lhs_shape);
case CudnnConvKind::kBackwardFilter:
return std::make_tuple(&new_lhs_shape, &new_result_shape,
&new_rhs_shape);
}
}();
auto input_features =
new_input_shape->dimensions(dnums.input_feature_dimension());
auto output_features =
new_output_shape->dimensions(dnums.output_feature_dimension());
if (input_features == 3 && (output_features == 32 || output_features == 64)) {
new_input_shape->set_dimensions(dnums.input_feature_dimension(), 4);
new_filter_shape->set_dimensions(dnums.kernel_input_feature_dimension(), 4);
} else {
auto pad_dim = [](Shape* s, int64_t dim) {
s->set_dimensions(dim, RoundUpTo<int64_t>(s->dimensions(dim), 8));
};
pad_dim(new_input_shape, dnums.input_feature_dimension());
pad_dim(new_filter_shape, dnums.kernel_input_feature_dimension());
pad_dim(new_filter_shape, dnums.kernel_output_feature_dimension());
pad_dim(new_output_shape, dnums.output_feature_dimension());
static constexpr double kMaxBytesTouchedBound = 1.35;
auto check_size_increase = [&](const Shape& old_shape,
const Shape& new_shape) {
int64_t old_bytes = ShapeUtil::ByteSizeOf(old_shape);
int64_t new_bytes = ShapeUtil::ByteSizeOf(new_shape);
if (new_bytes <= old_bytes * kMaxBytesTouchedBound) {
return true;
}
VLOG(3)
<< "Not padding convolution; doing so would change input / result "
"shape from "
<< ShapeUtil::HumanString(old_shape) << " to "
<< ShapeUtil::HumanString(new_shape) << ", a size increase of "
<< new_bytes / static_cast<double>(old_bytes) << "x > "
<< kMaxBytesTouchedBound << "x: " << conv->ToString();
return false;
};
if (!check_size_increase(lhs->shape(), new_lhs_shape) ||
!check_size_increase(rhs->shape(), new_rhs_shape) ||
!check_size_increase(result_shape, new_result_shape)) {
return false;
}
}
if (ShapeUtil::Equal(lhs->shape(), new_lhs_shape) &&
ShapeUtil::Equal(rhs->shape(), new_rhs_shape)) {
VLOG(3) << "No need to pad features of " << conv->ToString();
return false;
}
new_input_shapes_ptr->push_back(new_lhs_shape);
new_input_shapes_ptr->push_back(new_rhs_shape);
return true;
}
absl::StatusOr<bool> TryResolvePaddedShapesForIntegerConvolution(
int pad_to, const se::CudaComputeCapability& compute_capability,
HloCustomCallInstruction* conv, std::vector<Shape>* new_input_shapes_ptr,
Shape* new_result_shape_ptr) {
TF_ASSIGN_OR_RETURN(auto kind, GetCudnnConvKind(conv));
const Shape& input_shape = conv->operand(0)->shape();
const Shape& kernel_shape = conv->operand(1)->shape();
const Shape& result_shape = conv->shape().tuple_shapes(0);
if (!primitive_util::IsIntegralType(input_shape.element_type())) {
return false;
}
if (kind != CudnnConvKind::kForward &&
kind != CudnnConvKind::kForwardActivation) {
return false;
}
const auto& dnums = conv->convolution_dimension_numbers();
std::vector<Shape>& new_input_shapes = *new_input_shapes_ptr;
for (auto operand : conv->operands()) {
new_input_shapes.push_back(operand->shape());
}
Shape& new_result_shape = *new_result_shape_ptr;
new_result_shape = conv->shape().tuple_shapes(0);
std::optional<int64_t> input_vect_dim;
std::optional<int64_t> kernel_vect_dim;
std::optional<int64_t> result_vect_dim;
std::tie(input_vect_dim, kernel_vect_dim, result_vect_dim) =
FindVectorizedFeatureDims(dnums, input_shape, kernel_shape, result_shape);
int64_t input_vect_size =
input_vect_dim.has_value() ? input_shape.dimensions(*input_vect_dim) : 1;
int64_t kernel_vect_size = kernel_vect_dim.has_value()
? kernel_shape.dimensions(*kernel_vect_dim)
: 1;
int64_t result_vect_size = result_vect_dim.has_value()
? result_shape.dimensions(*result_vect_dim)
: 1;
if (pad_to % input_vect_size != 0 || pad_to % kernel_vect_size != 0 ||
pad_to % result_vect_size != 0) {
return false;
}
TF_ASSIGN_OR_RETURN(bool cudnn_supports,
CudnnSupportsOptimizedIntegerConvolution(
compute_capability, *conv, pad_to));
if (!cudnn_supports) {
return false;
}
{
auto pad_dim = [&](Shape* s, int64_t dim, int64_t cur_vect_size) {
CHECK_EQ(pad_to % cur_vect_size, 0);
s->set_dimensions(
dim, RoundUpTo<int64_t>(s->dimensions(dim), pad_to / cur_vect_size));
};
switch (kind) {
case CudnnConvKind::kForward:
CHECK_EQ(new_input_shapes.size(), 2);
pad_dim(new_input_shapes.data(), dnums.input_feature_dimension(),
input_vect_size);
pad_dim(&new_input_shapes[1], dnums.kernel_input_feature_dimension(),
kernel_vect_size);
pad_dim(&new_input_shapes[1], dnums.kernel_output_feature_dimension(),
1);
pad_dim(&new_result_shape, dnums.output_feature_dimension(),
result_vect_size);
break;
case CudnnConvKind::kForwardActivation:
CHECK(new_input_shapes.size() == 3 || new_input_shapes.size() == 4);
pad_dim(new_input_shapes.data(), dnums.input_feature_dimension(),
input_vect_size);
pad_dim(&new_input_shapes[1], dnums.kernel_input_feature_dimension(),
kernel_vect_size);
pad_dim(&new_input_shapes[1], dnums.kernel_output_feature_dimension(),
1);
pad_dim(&new_input_shapes[2], 0, 1);
if (new_input_shapes.size() == 4) {
pad_dim(&new_input_shapes[3], dnums.output_feature_dimension(),
result_vect_size);
}
pad_dim(&new_result_shape, dnums.output_feature_dimension(),
result_vect_size);
break;
default:
CHECK(false);
}
static constexpr double kMaxBytesTouchedBound = 2;
auto check_size_increase = [&](const Shape& old_shape,
const Shape& new_shape) {
int64_t old_bytes = ShapeUtil::ByteSizeOf(old_shape);
int64_t new_bytes = ShapeUtil::ByteSizeOf(new_shape);
if (new_bytes < old_bytes * kMaxBytesTouchedBound) {
return true;
}
VLOG(3)
<< "Not padding convolution; doing so would change input / result "
"shape from "
<< ShapeUtil::HumanString(old_shape) << " to "
<< ShapeUtil::HumanString(new_shape) << ", a size increase of "
<< new_bytes / static_cast<double>(old_bytes)
<< "x >= " << kMaxBytesTouchedBound << "x: " << conv->ToString();
return false;
};
if (!check_size_increase(conv->operand(0)->shape(), new_input_shapes[0]) ||
!check_size_increase(result_shape, new_result_shape)) {
return false;
}
}
bool changed = false;
for (int64_t i = 0; i < conv->operand_count(); ++i) {
changed |=
!ShapeUtil::Equal(conv->operand(i)->shape(), new_input_shapes[i]);
}
if (!changed) {
VLOG(3) << "No need to pad features of " << conv->ToString();
}
return changed;
}
absl::StatusOr<bool> CudnnPadForConvolutions::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* comp :
module->MakeNonfusionComputations(execution_threads)) {
for (HloCustomCallInstruction* conv : GetRelevantConvs(comp)) {
bool local_changed = false;
if (compute_capability_.IsAtLeast(7, 5)) {
TF_ASSIGN_OR_RETURN(
local_changed,
ResolveAndPad(conv, absl::bind_front(
TryResolvePaddedShapesForIntegerConvolution,
32, compute_capability_)));
}
if (!local_changed) {
TF_ASSIGN_OR_RETURN(
local_changed,
ResolveAndPad(conv, absl::bind_front(
TryResolvePaddedShapesForIntegerConvolution,
4, compute_capability_)));
}
changed |= local_changed;
}
if (compute_capability_.IsAtLeast(se::CudaComputeCapability::VOLTA)) {
for (HloCustomCallInstruction* conv : GetRelevantConvs(comp)) {
TF_ASSIGN_OR_RETURN(
bool local_changed,
ResolveAndPad(conv, TryResolvePaddedShapesForTensorCore));
changed |= local_changed;
}
}
}
return changed;
}
}
} | #include "xla/service/gpu/cudnn_pad_for_convolutions.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
namespace {
namespace m = xla::match;
class CudnnPadForConvolutionsTest : public HloTestBase {};
TEST_F(CudnnPadForConvolutionsTest, DoNotPadF16ForwardConvWhenGrouped) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = f16[704,48,1,49]{3,2,1,0} parameter(0)
filter = f16[44,768,1,50]{3,2,1,0} parameter(1)
ROOT result = (f16[1,128,48,768]{3,2,1,0}, u8[0]{0})
custom-call(input, filter)
, window={size=1x50 pad=0_0x64_64}
, dim_labels=fb01_io01->01bf
, feature_group_count=16
, custom_call_target="__cudnn$convForward"
})")
.value();
EXPECT_FALSE(CudnnPadForConvolutions({7, 5}).Run(module.get()).value());
}
TEST_F(CudnnPadForConvolutionsTest, PadF16ForwardConvInputChannels) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = f16[10,20,30,41] parameter(0)
filter = f16[2,2,41,40] parameter(1)
ROOT result = (f16[10,20,30,40], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
EXPECT_TRUE(CudnnPadForConvolutions({7, 0}).Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
SCOPED_TRACE(module->ToString());
EXPECT_THAT(
root,
GmockMatch(m::CustomCall(
{kCudnnConvForwardCallTarget},
m::Pad(m::Parameter(0), m::Op()).WithShape(F16, {10, 20, 30, 48}),
m::Pad(m::Parameter(1), m::Op()).WithShape(F16, {2, 2, 48, 40}))));
}
TEST_F(CudnnPadForConvolutionsTest, PadF16BackwardInputConvOutputChannels) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
output = f16[10,20,30,41] parameter(0)
filter = f16[2,2,40,41] parameter(1)
ROOT result = (f16[10,20,30,40], u8[0]) custom-call(output, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convBackwardInput"
})")
.value();
EXPECT_TRUE(CudnnPadForConvolutions({7, 0}).Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
GmockMatch(m::CustomCall(
{kCudnnConvBackwardInputCallTarget},
m::Pad(m::Parameter(0), m::Op()).WithShape(F16, {10, 20, 30, 48}),
m::Pad(m::Parameter(1), m::Op()).WithShape(F16, {2, 2, 40, 48}))));
}
TEST_F(CudnnPadForConvolutionsTest, PadF16ForwardConvOutputChannels) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = f16[10,20,30,40] parameter(0)
filter = f16[2,2,40,41] parameter(1)
ROOT result = (f16[10,20,30,41], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
EXPECT_TRUE(CudnnPadForConvolutions({7, 0}).Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Tuple(
m::Slice(m::GetTupleElement(m::CustomCall(
{kCudnnConvForwardCallTarget}, m::Parameter(0),
m::Pad(m::Parameter(1), m::Op())))),
m::Op())));
}
TEST_F(CudnnPadForConvolutionsTest, PadF16BackwardInputConvInputChannels) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
output = f16[10,20,30,40] parameter(0)
filter = f16[2,2,41,40] parameter(1)
result = (f16[10,20,30,41], u8[0]) custom-call(output, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convBackwardInput"
ROOT gte = f16[10,20,30,41] get-tuple-element(result), index=0
})")
.value();
EXPECT_TRUE(CudnnPadForConvolutions({7, 0}).Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
GmockMatch(m::GetTupleElement(m::Tuple(
m::Slice(m::GetTupleElement(m::CustomCall(
{kCudnnConvBackwardInputCallTarget}, m::Parameter(0),
m::Pad(m::Parameter(1), m::Op())))),
m::Op()))));
}
TEST_F(CudnnPadForConvolutionsTest, PadF16BackwardFilterConvInputChannels) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = f16[10,20,30,41] parameter(0)
output = f16[10,20,30,40] parameter(1)
result = (f16[2,2,41,40], u8[0]) custom-call(input, output),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convBackwardFilter"
ROOT gte = f16[2,2,41,40] get-tuple-element(result), index=0
})")
.value();
EXPECT_TRUE(CudnnPadForConvolutions({7, 0}).Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
GmockMatch(m::GetTupleElement(m::Tuple(
m::Slice(m::GetTupleElement(m::CustomCall(
{kCudnnConvBackwardFilterCallTarget},
m::Pad(m::Parameter(0), m::Op()), m::Parameter(1)))),
m::Op()))));
}
TEST_F(CudnnPadForConvolutionsTest, PadF16BackwardFilterConvOutputChannels) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = f16[10,20,30,40] parameter(0)
output = f16[10,20,30,41] parameter(1)
result = (f16[2,2,40,41], u8[0]) custom-call(input, output),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convBackwardFilter"
ROOT gte = f16[2,2,40,41] get-tuple-element(result), index=0
})")
.value();
EXPECT_TRUE(CudnnPadForConvolutions({7, 0}).Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
GmockMatch(m::GetTupleElement(m::Tuple(
m::Slice(m::GetTupleElement(m::CustomCall(
{kCudnnConvBackwardFilterCallTarget}, m::Parameter(0),
m::Pad(m::Parameter(1), m::Op())))),
m::Op()))));
}
TEST_F(CudnnPadForConvolutionsTest, PadInputFeatures3To4) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = f16[10,20,30,3] parameter(0)
filter = f16[2,2,3,32] parameter(1)
ROOT result = (f16[10,20,30,32], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
EXPECT_TRUE(CudnnPadForConvolutions({7, 0}).Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
SCOPED_TRACE(module->ToString());
EXPECT_THAT(
root,
GmockMatch(m::CustomCall(
{kCudnnConvForwardCallTarget},
m::Pad(m::Parameter(0), m::Op()).WithShape(F16, {10, 20, 30, 4}),
m::Pad(m::Parameter(1), m::Op()).WithShape(F16, {2, 2, 4, 32}))));
}
TEST_F(CudnnPadForConvolutionsTest, PadIntForwardConvInputChannels) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,41] parameter(0)
filter = s8[2,2,41,40] parameter(1)
ROOT result = (f32[10,20,30,40], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
EXPECT_TRUE(CudnnPadForConvolutions({7, 0}).Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
SCOPED_TRACE(module->ToString());
EXPECT_THAT(
root,
GmockMatch(m::CustomCall(
{kCudnnConvForwardCallTarget},
m::Pad(m::Parameter(0), m::Op()).WithShape(S8, {10, 20, 30, 44}),
m::Pad(m::Parameter(1), m::Op()).WithShape(S8, {2, 2, 44, 40}))));
}
TEST_F(CudnnPadForConvolutionsTest, PadIntForwardConvOutputChannels) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,40] parameter(0)
filter = s8[2,2,40,41] parameter(1)
ROOT result = (f32[10,20,30,41], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
EXPECT_TRUE(CudnnPadForConvolutions({7, 0}).Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Tuple(
m::Slice(m::GetTupleElement(m::CustomCall(
{kCudnnConvForwardCallTarget}, m::Parameter(0),
m::Pad(m::Parameter(1), m::Op())))),
m::Op())));
}
TEST_F(CudnnPadForConvolutionsTest, PadInt8To32OnSm75) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,40] parameter(0)
filter = s8[2,2,40,41] parameter(1)
ROOT result = (s8[10,20,30,41], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
EXPECT_TRUE(CudnnPadForConvolutions({7, 5}).Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
GmockMatch(m::Tuple(
m::Slice(m::GetTupleElement(m::CustomCall(
{kCudnnConvForwardCallTarget},
m::Pad(m::Parameter(0), m::Op()).WithShape(S8, {10, 20, 30, 64}),
m::Pad(m::Parameter(1), m::Op()).WithShape(S8, {2, 2, 64, 64})))),
m::Op())));
}
TEST_F(CudnnPadForConvolutionsTest, NoPadInt8To32OnSm70) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,40] parameter(0)
filter = s8[2,2,40,41] parameter(1)
ROOT result = (s8[10,20,30,41], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
EXPECT_TRUE(CudnnPadForConvolutions({7, 0}).Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
GmockMatch(m::Tuple(
m::Slice(m::GetTupleElement(m::CustomCall(
{kCudnnConvForwardCallTarget}, m::Parameter(0),
m::Pad(m::Parameter(1), m::Op()).WithShape(S8, {2, 2, 40, 44})))),
m::Op())));
}
TEST_F(CudnnPadForConvolutionsTest, NoPadInt8To32FloatOutputSm75) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,38] parameter(0)
filter = s8[2,2,38,41] parameter(1)
ROOT result = (f32[10,20,30,41], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
EXPECT_TRUE(CudnnPadForConvolutions({7, 5}).Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
GmockMatch(m::Tuple(
m::Slice(m::GetTupleElement(m::CustomCall(
{kCudnnConvForwardCallTarget},
m::Pad(m::Parameter(0), m::Op()).WithShape(S8, {10, 20, 30, 40}),
m::Pad(m::Parameter(1), m::Op()).WithShape(S8, {2, 2, 40, 44})))),
m::Op())));
}
TEST_F(CudnnPadForConvolutionsTest, NoPadInt8UnsupportedFilterTypeOutputSm75) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,38] parameter(0)
filter = f32[2,2,38,41] parameter(1)
ROOT result = (s8[10,20,30,41], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
EXPECT_FALSE(CudnnPadForConvolutions({7, 5}).Run(module.get()).value());
}
TEST_F(CudnnPadForConvolutionsTest, NoPadToInt8x32ExcessiveBlowup) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[128,4,48,48] parameter(0)
filter = s8[64,4,3,3] parameter(1)
ROOT result = (f32[128,64,48,48], u8[0]) custom-call(input, filter),
window={size=3x3}, dim_labels=bf01_io01->bf01,
custom_call_target="__cudnn$convForward"
})")
.value();
EXPECT_FALSE(CudnnPadForConvolutions({7, 5}).Run(module.get()).value());
}
TEST_F(CudnnPadForConvolutionsTest, PadInt8x4To32) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,41,4] parameter(0)
filter = s8[2,2,41,4,168] parameter(1)
ROOT result = (s8[10,20,30,42,4], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f?_01i?o->b01f?,
custom_call_target="__cudnn$convForward"
})")
.value();
EXPECT_TRUE(CudnnPadForConvolutions({7, 5}).Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
GmockMatch(m::Tuple(
m::Slice(m::GetTupleElement(
m::CustomCall({kCudnnConvForwardCallTarget},
m::Pad(m::Parameter(0), m::Op())
.WithShape(S8, {10, 20, 30, 48, 4}),
m::Pad(m::Parameter(1), m::Op())
.WithShape(S8, {2, 2, 48, 4, 192})))
.WithShape(S8, {10, 20, 30, 48, 4})),
m::Op())));
}
TEST_F(CudnnPadForConvolutionsTest, PadInt8x4To32BiasActivation) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,41,4] parameter(0)
filter = s8[2,2,41,4,168] parameter(1)
bias = f32[10] parameter(2)
side_input = s8[10,20,30,42,4] parameter(3)
ROOT result = (s8[10,20,30,42,4], u8[0]) custom-call(input, filter, bias, side_input),
window={size=2x2}, dim_labels=b01f?_01i?o->b01f?,
custom_call_target="__cudnn$convBiasActivationForward"
})")
.value();
EXPECT_TRUE(CudnnPadForConvolutions({7, 5}).Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
GmockMatch(m::Tuple(
m::Slice(
m::GetTupleElement(
m::CustomCall(
{kCudnnConvBiasActivationForwardCallTarget},
m::Pad(m::Parameter(0), m::Op())
.WithShape(S8, {10, 20, 30, 48, 4}),
m::Pad(m::Parameter(1), m::Op())
.WithShape(S8, {2, 2, 48, 4, 192}),
m::Pad(m::Parameter(2), m::Op()).WithShape(F32, {32}),
m::Pad(m::Parameter(3), m::Op())
.WithShape(S8, {10, 20, 30, 48, 4})))
.WithShape(S8, {10, 20, 30, 48, 4})),
m::Op())));
}
TEST_F(CudnnPadForConvolutionsTest,
PadIntFusedForwardConvInputAndOutputChannels) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule Test
ENTRY %Test (input: s8[1,3,3,2], filter: s8[3,3,2,5], side_input: s8[1,3,3,5], bias: s8[5]) -> f32[1,3,3,5] {
%input = s8[1,3,3,3]{3,2,1,0} parameter(0)
%filter = s8[3,3,2,5]{3,2,1,0} parameter(1)
%bias = s8[5]{0} parameter(3)
%convert = f32[5]{0} convert(s8[5]{0} %bias)
%side_input = f32[1,3,3,5]{3,2,1,0} parameter(2)
%custom-call.1 = (f32[1,3,3,5]{3,2,1,0}, u8[0]{0}) custom-call(s8[1,3,3,3]{3,2,1,0} %input, s8[3,3,2,5]{3,2,1,0} %filter, f32[5]{0} %convert, f32[1,3,3,5]{3,2,1,0} %side_input), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convBiasActivationForward", backend_config="{\"activationMode\":\"2\",\"convResultScale\":1,\"sideInputScale\":1}"
ROOT %get-tuple-element.1 = f32[1,3,3,5]{3,2,1,0} get-tuple-element((f32[1,3,3,5]{3,2,1,0}, u8[0]{0}) %custom-call.1), index=0
})")
.value();
EXPECT_TRUE(CudnnPadForConvolutions({7, 0}).Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::GetTupleElement(m::Tuple(
m::Slice(m::GetTupleElement(m::CustomCall(
{kCudnnConvBiasActivationForwardCallTarget},
m::Pad(m::Parameter(0), m::Op()),
m::Pad(m::Parameter(1), m::Op()),
m::Pad(m::Convert(m::Parameter(3)), m::Op()),
m::Pad(m::Parameter(2), m::Op())))),
m::Op()))));
}
}
}
} |
2,040 | cpp | tensorflow/tensorflow | gpu_hlo_schedule | third_party/xla/xla/service/gpu/gpu_hlo_schedule.cc | third_party/xla/xla/service/gpu/gpu_hlo_schedule_test.cc | #ifndef XLA_SERVICE_GPU_GPU_HLO_SCHEDULE_H_
#define XLA_SERVICE_GPU_GPU_HLO_SCHEDULE_H_
#include <cstdint>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/shape.h"
#include "xla/stream_executor/device_description.h"
#include "tsl/profiler/protobuf/profiled_instructions.pb.h"
namespace xla {
namespace gpu {
absl::Status IsProfileApplicable(
const HloModule* module,
const tensorflow::profiler::ProfiledInstructionsProto& profile);
struct ScheduleMetadata {
int64_t scheduler_mem_limit;
};
absl::StatusOr<ScheduleMetadata> ScheduleGpuModule(
HloModule* module, int64_t pointer_size,
const se::DeviceDescription& gpu_device_info);
HloInstructionSequence PostProcessSchedule(const HloInstructionSequence& input);
constexpr absl::string_view kFingerprintBeforeLHS = "fingerprint_before_lhs";
}
}
#endif
#include "xla/service/gpu/gpu_hlo_schedule.h"
#include <cstddef>
#include <cstdint>
#include <deque>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/buffer_value.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/gpu_latency_hiding_scheduler.h"
#include "xla/service/gpu/gpu_schedule_postprocessing.h"
#include "xla/service/gpu/model/analytical_latency_estimator.h"
#include "xla/service/hlo_memory_scheduler.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/latency_hiding_scheduler.h"
#include "xla/service/p2p_schedule_preparation.h"
#include "xla/service/profile_guided_latency_estimator.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
bool ShouldScheduleAsEarlyAsPossible(const HloInstruction& instr) {
switch (instr.opcode()) {
case HloOpcode::kAllReduceStart:
case HloOpcode::kCollectivePermuteStart:
return !IsSyncCollective(&instr);
case HloOpcode::kCustomCall:
return static_cast<const HloCustomCallInstruction&>(instr)
.custom_call_schedule() ==
CustomCallSchedule::SCHEDULE_EARLIEST;
default:
return false;
}
}
bool ShouldScheduleSuccessor(const HloInstruction& sussessor,
const HloPredicate& is_scheduled) {
return ShouldScheduleAsEarlyAsPossible(sussessor) &&
absl::c_all_of(sussessor.operands(), is_scheduled) &&
absl::c_all_of(sussessor.control_predecessors(), is_scheduled);
}
bool ShouldScheduleAsLateAsPossible(const HloInstruction& instr) {
switch (instr.opcode()) {
case HloOpcode::kAllReduceDone:
case HloOpcode::kCollectivePermuteDone:
return ShouldScheduleAsEarlyAsPossible(*instr.operand(0));
case HloOpcode::kCustomCall:
return static_cast<const HloCustomCallInstruction&>(instr)
.custom_call_schedule() == CustomCallSchedule::SCHEDULE_LATEST;
default:
return false;
}
}
bool ShouldSchedulePredecessor(const HloInstruction& predecessor,
const HloPredicate& is_scheduled) {
return ShouldScheduleAsLateAsPossible(predecessor) &&
absl::c_all_of(predecessor.users(), is_scheduled) &&
absl::c_all_of(predecessor.control_successors(), is_scheduled);
}
HloInstructionSequence PostprocessorToScheduleAsEarlyOrLateAsPossible(
const HloInstructionSequence& input) {
std::vector<HloInstruction*> earliest_scheduled;
{
absl::flat_hash_set<HloInstruction*> scheduled;
auto is_scheduled = [&](const HloInstruction* instr) -> bool {
return scheduled.contains(instr);
};
auto add_to_schedule = [&](HloInstruction* instr) {
earliest_scheduled.push_back(instr);
scheduled.insert(instr);
};
for (HloInstruction* instr : input.instructions()) {
if (is_scheduled(instr)) continue;
add_to_schedule(instr);
for (HloInstruction* user : instr->users()) {
if (is_scheduled(user)) continue;
if (ShouldScheduleSuccessor(*user, is_scheduled)) {
add_to_schedule(user);
}
}
for (HloInstruction* successor : instr->control_successors()) {
if (is_scheduled(successor)) continue;
if (ShouldScheduleSuccessor(*successor, is_scheduled)) {
add_to_schedule(successor);
}
}
}
}
std::deque<HloInstruction*> latest_scheduled;
{
absl::flat_hash_set<HloInstruction*> scheduled;
auto is_scheduled = [&](const HloInstruction* instr) -> bool {
return scheduled.contains(instr);
};
auto add_to_schedule = [&](HloInstruction* instr) {
latest_scheduled.push_front(instr);
scheduled.insert(instr);
};
for (auto it = earliest_scheduled.rbegin(); it != earliest_scheduled.rend();
it++) {
if (is_scheduled(*it)) continue;
add_to_schedule(*it);
for (HloInstruction* operand : (*it)->operands()) {
if (is_scheduled(operand)) continue;
if (ShouldSchedulePredecessor(*operand, is_scheduled)) {
add_to_schedule(operand);
}
}
for (HloInstruction* predecessor : (*it)->control_predecessors()) {
if (is_scheduled(predecessor)) continue;
if (ShouldSchedulePredecessor(*predecessor, is_scheduled)) {
add_to_schedule(predecessor);
}
}
}
}
HloInstructionSequence result;
absl::c_for_each(latest_scheduled,
[&](HloInstruction* i) { result.push_back(i); });
CHECK(input.instructions().size() == result.size())
<< "schedule as early or late post-processing changed schedule size from "
<< input.instructions().size() << " to " << result.size();
return result;
}
HloInstructionSequence PostprocessorToScheduleSyncCollectives(
const HloInstructionSequence& input) {
HloInstructionSequence result;
auto is_sync_start = [](const HloInstruction* instr) {
return hlo_query::IsAsyncCollectiveStartOp(instr,
true) &&
IsSyncCollective(instr);
};
for (HloInstruction* instr : input.instructions()) {
if (is_sync_start(instr)) continue;
if (hlo_query::IsAsyncCollectiveDoneOp(instr, true)) {
HloInstruction* start = instr->mutable_operand(0);
if (is_sync_start(start)) result.push_back(start);
}
result.push_back(instr);
}
CHECK(input.instructions().size() == result.size())
<< "sync collectives post-processing changed schedule size from "
<< input.instructions().size() << " to " << result.size();
return result;
}
absl::StatusOr<HloSchedule> ScheduleGpuModuleWithMemoryScheduler(
const HloModule* module, int64_t pointer_size) {
return ScheduleModule(
module,
[pointer_size](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(), pointer_size);
},
ComputationSchedulerToModuleScheduler(DefaultMemoryScheduler,
PostProcessSchedule));
}
SchedulerConfig GetSchedulerConfig(int64_t memory_limit) {
SchedulerConfig config;
config.all_reduce_overlap_limit = 1;
config.collective_broadcast_overlap_limit = 1;
config.collective_permute_overlap_limit = 1;
config.use_real_cost_model = false;
config.aggressive_scheduling_policies = true;
config.schedule_send_recvs = true;
config.memory_limit = memory_limit;
return config;
}
tensorflow::profiler::ProfiledInstructionsProto GetProfileForFingerprint(
tensorflow::profiler::ProfiledInstructionsProto& profile,
const std::string& fingerprint) {
tensorflow::profiler::ProfiledInstructionsProto result;
bool merge_remat_clones = false;
for (const auto& cost : profile.costs()) {
absl::string_view cost_name = cost.name();
std::string new_cost_name = cost.name();
absl::string_view cost_sep = "::";
if (absl::StrContains(cost_name, cost_sep)) {
std::vector<std::string> split_names =
absl::StrSplit(cost_name, cost_sep);
if (split_names.size() != 2 || split_names[0] != fingerprint) {
continue;
}
new_cost_name = split_names[1];
}
merge_remat_clones |= absl::StrContains(new_cost_name, ".remat");
auto* new_cost = result.add_costs();
new_cost->set_cost_us(cost.cost_us());
new_cost->set_name(new_cost_name);
}
if (!merge_remat_clones) {
return result;
}
auto strip_remat_suffix = [](absl::string_view name) -> absl::string_view {
absl::string_view suffix = ".remat";
size_t index = name.rfind(suffix);
if (index == std::string::npos) {
return name;
}
auto after_suffix = name.substr(index + suffix.size());
int64_t numeric_suffix;
if (after_suffix.empty() ||
absl::SimpleAtoi(after_suffix, &numeric_suffix)) {
return name.substr(0, index);
}
return name;
};
absl::flat_hash_map<absl::string_view, std::pair<double, int64_t>> costs;
for (const auto& cost : result.costs()) {
std::pair<double, int64_t>& data = costs[strip_remat_suffix(cost.name())];
data.first += cost.cost_us();
data.second++;
}
tensorflow::profiler::ProfiledInstructionsProto merged_result;
for (const auto& cost : costs) {
auto* new_cost = merged_result.add_costs();
double average = cost.second.first / cost.second.second;
new_cost->set_cost_us(average);
new_cost->set_name(std::string(cost.first));
}
return merged_result;
}
std::optional<tensorflow::profiler::ProfiledInstructionsProto> ReadPGLEProfile(
const HloModule* module, const std::string& fingerprint) {
tensorflow::profiler::ProfiledInstructionsProto profile;
absl::string_view fdo_profile = module->config().fdo_profile();
if (!fdo_profile.empty()) {
if (tsl::ParseProtoUnlimited(&profile, fdo_profile.data(),
fdo_profile.size())) {
LOG(INFO) << "Using PGLE profile for module from fdo_profile (binary)";
return GetProfileForFingerprint(profile, fingerprint);
}
profile.Clear();
if (tsl::protobuf::TextFormat::ParseFromString(std::string(fdo_profile),
&profile)) {
LOG(INFO) << "Using PGLE profile for module from fdo_profile (text)";
return GetProfileForFingerprint(profile, fingerprint);
}
LOG(ERROR) << "Unable to prase FDO profile: not a valid text or binary "
"ProfiledInstructionsProto";
}
const std::string& pgle_profile_file_or_dir_path =
module->config()
.debug_options()
.xla_gpu_pgle_profile_file_or_directory_path();
if (pgle_profile_file_or_dir_path.empty()) {
return std::nullopt;
}
tsl::Env* env = tsl::Env::Default();
auto read_text_or_binary_profile = [&profile, env, &fingerprint](
const std::string& text_path,
const std::string& binary_path)
-> std::optional<tensorflow::profiler::ProfiledInstructionsProto> {
if (env->FileExists(text_path).ok()) {
absl::Status s = tsl::ReadTextProto(env, text_path, &profile);
if (s.ok()) {
LOG(INFO) << "Using PGLE profile from " << text_path;
return GetProfileForFingerprint(profile, fingerprint);
} else {
LOG(ERROR) << "Unable to read PGLE text proto from " << text_path
<< ": " << s.message();
}
profile.Clear();
}
if (env->FileExists(binary_path).ok()) {
absl::Status s = tsl::ReadBinaryProto(env, binary_path, &profile);
if (s.ok()) {
LOG(INFO) << "Using PGLE profile from " << binary_path;
return GetProfileForFingerprint(profile, fingerprint);
} else {
LOG(ERROR) << "Unable to read PGLE binary proto from " << binary_path
<< ": " << s.message();
}
profile.Clear();
}
return std::nullopt;
};
if (env->IsDirectory(pgle_profile_file_or_dir_path).ok()) {
std::string pgle_profile_path_prefix =
pgle_profile_file_or_dir_path + "/" + fingerprint;
return read_text_or_binary_profile(pgle_profile_path_prefix + ".pbtxt",
pgle_profile_path_prefix + ".pb");
}
auto extension = tsl::io::Extension(pgle_profile_file_or_dir_path);
if (extension == "pbtxt") {
return read_text_or_binary_profile(pgle_profile_file_or_dir_path, "");
} else if (extension == "pb") {
return read_text_or_binary_profile("", pgle_profile_file_or_dir_path);
} else {
return read_text_or_binary_profile(pgle_profile_file_or_dir_path,
pgle_profile_file_or_dir_path);
}
}
}
absl::Status IsProfileApplicable(
const HloModule* module,
const tensorflow::profiler::ProfiledInstructionsProto& profile) {
absl::flat_hash_set<absl::string_view> all_instruction_names;
for (HloComputation* comp : module->MakeNonfusionComputations()) {
for (HloInstruction* instr : comp->instructions()) {
all_instruction_names.insert(instr->name());
}
}
std::vector<std::string> missing_costs_names;
for (const auto& cost : profile.costs()) {
if (!all_instruction_names.contains(cost.name())) {
missing_costs_names.push_back(cost.name());
}
}
std::vector<std::string> missing_latency_names;
for (const auto& latency : profile.latencies()) {
if (!all_instruction_names.contains(latency.source())) {
missing_latency_names.push_back(latency.source());
}
if (!all_instruction_names.contains(latency.target())) {
missing_latency_names.push_back(latency.target());
}
}
if (!(missing_costs_names.empty() && missing_latency_names.empty())) {
return absl::InvalidArgumentError(
absl::StrFormat("\nMissing costs: %s;\nMissing latencies: %s",
absl::StrJoin(missing_costs_names, ", "),
absl::StrJoin(missing_latency_names, ", ")));
}
return absl::OkStatus();
}
static int64_t GetSchedulerMemoryLimit(
const HloModule* module, const se::DeviceDescription& gpu_device_info,
int pointer_size);
absl::StatusOr<ScheduleMetadata> ScheduleGpuModule(
HloModule* module, int64_t pointer_size,
const se::DeviceDescription& gpu_device_info) {
int64_t memory_limit =
GetSchedulerMemoryLimit(module, gpu_device_info, pointer_size);
if (module->has_schedule()) {
return ScheduleMetadata{memory_limit};
}
HloPassPipeline prepare_pipeline("p2p-schedule-preparation");
prepare_pipeline.AddPass<P2PSchedulePreparation>();
TF_RETURN_IF_ERROR(prepare_pipeline.Run(module).status());
TF_ASSIGN_OR_RETURN(
HloSchedule schedule,
ScheduleGpuModuleWithMemoryScheduler(module, pointer_size));
TF_RETURN_IF_ERROR(module->set_schedule(std::move(schedule)));
std::string fingerprint = module->GetFingerprint128(
HloPrintOptions::Canonical().set_print_backend_config(true));
FrontendAttributes attributes;
(*attributes.mutable_map())[std::string(kFingerprintBeforeLHS)] = fingerprint;
module->add_frontend_attributes(attributes);
VLOG(1) << "Fingerprint before LHS for module " << module->name() << "("
<< module->unique_id() << ") = " << fingerprint;
const bool enable_latency_hiding_scheduler =
module->config()
.debug_options()
.xla_gpu_enable_latency_hiding_scheduler();
if (!enable_latency_hiding_scheduler) {
return ScheduleMetadata{memory_limit};
}
SchedulerConfig config = GetSchedulerConfig(memory_limit);
auto gpu_latency_estimator =
std::make_unique<GpuLatencyEstimator>(pointer_size);
std::unique_ptr<LatencyEstimator> latency_estimator;
std::optional<tensorflow::profiler::ProfiledInstructionsProto> profile =
ReadPGLEProfile(module, fingerprint);
const bool enable_analytical_latency_estimator =
module->config()
.debug_options()
.xla_gpu_enable_analytical_latency_estimator();
if (profile.has_value()) {
latency_estimator = std::make_unique<ProfileGuidedLatencyEstimator>(
config, std::move(gpu_latency_estimator), profile.value());
LOG(INFO)
<< "Found profile, using profile guided latency estimator. Profile:\n"
<< profile->DebugString();
absl::Status s = IsProfileApplicable(module, profile.value());
if (!s.ok()) {
LOG(INFO) << "PGLE profile may not applicable to the module, but will "
"still be used : "
<< s.message();
}
} else if (enable_analytical_latency_estimator) {
latency_estimator = std::make_unique<AnalyticalLatencyEstimator>(
config, std::move(gpu_latency_estimator), gpu_device_info,
[input_pointer_size = pointer_size](const Shape& shape) {
return GetSizeOfShape(shape, input_pointer_size);
},
module->entry_computation());
LOG(INFO) << "Using analytical latency estimator";
} else {
latency_estimator = std::move(gpu_latency_estimator);
}
auto async_tracker = [&]() -> std::unique_ptr<AsyncTracker> {
return module->config()
.debug_options()
.xla_gpu_lhs_enable_gpu_async_tracker()
? std::make_unique<GpuAsyncTracker>(config)
: std::make_unique<GpuAsyncTrackerBase>(config);
}();
auto shape_size_in_bytes = [pointer_size](const Shape& shape) {
return GetSizeOfShape(shape, pointer_size);
};
HloPassPipeline pipeline("latency-hiding-scheduler");
auto scheduler_core = std::make_unique<DefaultSchedulerCore>(
shape_size_in_bytes, async_tracker.get(), latency_estimator.get(),
config);
pipeline.AddPass<LatencyHidingScheduler>(
std::move(latency_estimator), std::move(async_tracker),
std::move(scheduler_core), shape_size_in_bytes);
TF_RETURN_IF_ERROR(pipeline.Run(module).status());
HloPassPipeline postprocessing_pipeline("gpu-schedule-postprocessing");
postprocessing_pipeline.AddPass<GpuSchedulePostprocessing>();
TF_RETURN_IF_ERROR(postprocessing_pipeline.Run(module).status());
return ScheduleMetadata{memory_limit};
}
HloInstructionSequence PostProcessSchedule(
const HloInstructionSequence& input) {
HloInstructionSequence result = PostprocessorToScheduleSyncCollectives(input);
return PostprocessorToScheduleAsEarlyOrLateAsPossible(result);
}
static int64_t GetSchedulerMemoryLimit(
const HloModule* module, const se::DeviceDescription& gpu_device_info,
int pointer_size) {
const int64_t base_limit =
module->config().device_memory_size() != 0
? module->config().device_memory_size()
: gpu_device_info.device_memory_size() * 80 / 100;
int64_t total_io_size = 0;
for (HloInstruction* param :
module->entry_computation()->parameter_instructions()) {
ShapeUtil::ForEachSubshape(
param->shape(),
[&](const Shape& subshape, const ShapeIndex& ) {
total_io_size += GetSizeOfShape(subshape, pointer_size);
});
}
ShapeUtil::ForEachSubshape(
module->result_shape(),
[&](const Shape& subshape, const ShapeIndex& ) {
total_io_size += GetSizeOfShape(subshape, pointer_size);
});
module->input_output_alias_config().ForEachAlias(
[&](const ShapeIndex& output_index,
const HloInputOutputAliasConfig::Alias&) {
const Shape& subshape =
ShapeUtil::GetSubshape(module->result_shape(), output_index);
total_io_size -= GetSizeOfShape(subshape, pointer_size);
});
int64_t limit =
(base_limit - total_io_size) *
module->config().debug_options().xla_gpu_memory_limit_slop_factor() / 100;
return limit;
}
}
} | #include "xla/service/gpu/gpu_hlo_schedule.h"
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/backend.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_ordering.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_utils.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/protobuf/profiled_instructions.pb.h"
namespace xla {
namespace gpu {
using ::testing::HasSubstr;
using ::tsl::testing::StatusIs;
class GpuHloScheduleTest : public HloTestBase {
protected:
using HloVec = std::vector<HloInstruction*>;
Shape f32_2x2_ = ShapeUtil::MakeShape(F32, {2, 2});
SequentialHloOrdering BuildHloOrdering(HloModule* module) {
Backend& test_backend = backend();
const se::DeviceDescription& gpu_device_info =
test_backend.default_stream_executor()->GetDeviceDescription();
TF_CHECK_OK(ScheduleGpuModule(module, 8, gpu_device_info)
.status());
return SequentialHloOrdering{module->schedule()};
}
HloModuleConfig GetModuleConfig(bool enable_latency_hiding_scheduler,
bool enable_gpu_async_tracker = false,
absl::string_view fdo_profile = "") {
HloModuleConfig config;
DebugOptions debug_options = GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_latency_hiding_scheduler(
enable_latency_hiding_scheduler);
debug_options.set_xla_gpu_lhs_enable_gpu_async_tracker(
enable_gpu_async_tracker);
config.set_debug_options(debug_options);
*config.mutable_fdo_profile() = fdo_profile;
return config;
}
std::unique_ptr<HloModule> CreateNewVerifiedModule(
bool enable_latency_hiding_scheduler = false) {
return std::make_unique<HloModule>(
"test_module", GetModuleConfig(enable_latency_hiding_scheduler));
}
static bool HasValidFingerprint(HloModule* module) {
const FrontendAttributes& attrs = module->frontend_attributes();
auto it = attrs.map().find(kFingerprintBeforeLHS);
return it != attrs.map().end() && it->second.size() == 128 / 4;
}
};
TEST_F(GpuHloScheduleTest, SequentialMatMul) {
HloComputation::Builder builder("entry_computation");
HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(
0, f32_2x2_, "x"));
HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(
1, f32_2x2_, "y"));
HloInstruction* z = builder.AddInstruction(HloInstruction::CreateParameter(
2, f32_2x2_, "z"));
HloInstruction* dot1 =
builder.AddInstruction(CreateCanonicalDot(f32_2x2_, x, y));
HloInstruction* dot2 =
builder.AddInstruction(CreateCanonicalDot(f32_2x2_, dot1, z));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build(dot2));
SequentialHloOrdering order = BuildHloOrdering(module.get());
EXPECT_TRUE(order.ExecutesBefore(y, x));
EXPECT_TRUE(order.ExecutesBefore(y, dot1));
EXPECT_TRUE(order.ExecutesBefore(z, dot1));
EXPECT_TRUE(order.ExecutesBefore(z, dot2));
EXPECT_TRUE(order.ExecutesBefore(dot1, dot2));
EXPECT_TRUE(HasValidFingerprint(module.get()));
}
TEST_F(GpuHloScheduleTest, SequentialAdd) {
HloComputation::Builder builder("entry_computation");
HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(
0, f32_2x2_, "x"));
HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(
1, f32_2x2_, "y"));
HloInstruction* z = builder.AddInstruction(HloInstruction::CreateParameter(
2, f32_2x2_, "z"));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, x, y));
HloInstruction* add2 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, y, z));
HloInstruction* add3 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add1, add2));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build(add3));
SequentialHloOrdering order = BuildHloOrdering(module.get());
EXPECT_TRUE(order.ExecutesBefore(y, x));
EXPECT_TRUE(order.ExecutesBefore(y, add1));
EXPECT_TRUE(order.ExecutesBefore(z, add1));
EXPECT_TRUE(order.ExecutesBefore(z, add2));
EXPECT_TRUE(order.ExecutesBefore(add1, add2));
EXPECT_TRUE(order.ExecutesBefore(add2, add3));
EXPECT_TRUE(HasValidFingerprint(module.get()));
}
TEST_F(GpuHloScheduleTest, AsyncCustomCall) {
HloComputation::Builder builder("entry_computation");
HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(
0, f32_2x2_, "x"));
HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(
1, f32_2x2_, "y"));
HloInstruction* z = builder.AddInstruction(HloInstruction::CreateParameter(
2, f32_2x2_, "z"));
HloInstruction* add0 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, x, y));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add0, y));
HloInstruction* add2 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add1, z));
HloInstruction* nonblocking_call =
builder.AddInstruction(HloInstruction::CreateCustomCall(
f32_2x2_, {add0},
"nonblocking-call-start",
""));
static_cast<HloCustomCallInstruction*>(nonblocking_call)
->set_custom_call_schedule(SCHEDULE_EARLIEST);
TF_CHECK_OK(add1->AddControlDependencyTo(nonblocking_call));
HloInstruction* blocking_call =
builder.AddInstruction(HloInstruction::CreateCustomCall(
f32_2x2_, {nonblocking_call},
"blocking-call-done",
""));
static_cast<HloCustomCallInstruction*>(blocking_call)
->set_custom_call_schedule(SCHEDULE_LATEST);
HloInstruction* add3 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add1, add2));
HloInstruction* add4 = builder.AddInstruction(HloInstruction::CreateBinary(
f32_2x2_, HloOpcode::kAdd, add3, blocking_call));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build(add4));
SequentialHloOrdering order = BuildHloOrdering(module.get());
VLOG(2) << order.ToString();
EXPECT_TRUE(order.ExecutesBefore(add0, nonblocking_call));
EXPECT_TRUE(order.ExecutesBefore(add1, nonblocking_call));
EXPECT_TRUE(order.ExecutesBefore(nonblocking_call, add2));
EXPECT_TRUE(order.ExecutesBefore(nonblocking_call, add3));
EXPECT_TRUE(order.ExecutesBefore(nonblocking_call, add4));
EXPECT_TRUE(order.ExecutesBefore(add3, blocking_call));
EXPECT_TRUE(order.ExecutesBefore(blocking_call, add4));
EXPECT_TRUE(HasValidFingerprint(module.get()));
}
TEST_F(GpuHloScheduleTest, AsyncCollectivePermute) {
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
HloComputation::Builder builder("entry_computation");
HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(
0, f32_2x2_, "x"));
HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(
1, f32_2x2_, "y"));
HloInstruction* z = builder.AddInstruction(HloInstruction::CreateParameter(
2, f32_2x2_, "z"));
HloInstruction* add0 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, x, y));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add0, y));
HloInstruction* add2 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add1, z));
Shape u32_scalar = ShapeUtil::MakeShape(U32, {});
Shape collective_permute_start_shape =
ShapeUtil::MakeTupleShape({f32_2x2_, f32_2x2_});
HloInstruction* collective_permute_start =
builder.AddInstruction(HloInstruction::CreateCollectivePermuteStart(
collective_permute_start_shape, add0,
{{0, 1}}, std::nullopt));
TF_CHECK_OK(add1->AddControlDependencyTo(collective_permute_start));
HloInstruction* collective_permute_done = builder.AddInstruction(
HloInstruction::CreateUnary(f32_2x2_, HloOpcode::kCollectivePermuteDone,
collective_permute_start));
HloInstruction* add3 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add1, add2));
HloInstruction* add4 = builder.AddInstruction(HloInstruction::CreateBinary(
f32_2x2_, HloOpcode::kAdd, add3, collective_permute_done));
module->AddEntryComputation(builder.Build(add4));
SequentialHloOrdering order = BuildHloOrdering(module.get());
VLOG(2) << order.ToString();
EXPECT_TRUE(order.ExecutesBefore(add0, collective_permute_start));
EXPECT_TRUE(order.ExecutesBefore(add1, collective_permute_start));
EXPECT_TRUE(order.ExecutesBefore(collective_permute_start, add2));
EXPECT_TRUE(order.ExecutesBefore(collective_permute_start, add3));
EXPECT_TRUE(order.ExecutesBefore(collective_permute_start, add4));
EXPECT_TRUE(order.ExecutesBefore(add3, collective_permute_done));
EXPECT_TRUE(order.ExecutesBefore(collective_permute_done, add4));
EXPECT_TRUE(HasValidFingerprint(module.get()));
}
TEST_F(GpuHloScheduleTest, LHSCostModel) {
const char* hlo_text = R"(
HloModule AsyncAR
apply_op {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT apply_op = f32[] add(x, y)
}
ENTRY ar {
p0 = f32[32] parameter(0)
p1 = f32[32, 32] parameter(1)
p2 = f32[32, 32] parameter(2)
p3 = f32[32] parameter(3)
dot0 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm"
dot1 = f32[32,32]{1,0} custom-call(dot0, p2), custom_call_target="__cublas$gemm"
dot2 = f32[32,32]{1,0} custom-call(dot1, p2), custom_call_target="__cublas$gemm"
dot3 = f32[32,32]{1,0} custom-call(dot2, p2), custom_call_target="__cublas$gemm"
dot4 = f32[32,32]{1,0} custom-call(dot3, p2), custom_call_target="__cublas$gemm"
dot5 = f32[32,32]{1,0} custom-call(dot4, p2), custom_call_target="__cublas$gemm"
dot6 = f32[32,32]{1,0} custom-call(dot5, p2), custom_call_target="__cublas$gemm"
ar-start = f32[32] all-reduce-start(p0), to_apply=apply_op
ar-done = f32[32] all-reduce-done(ar-start)
ar-start1 = f32[32] all-reduce-start(p3), to_apply=apply_op
ar-done1 = f32[32] all-reduce-done(ar-start1)
add0 = f32[32,32] add(dot0, dot1)
add1 = f32[32,32] add(add0, dot2)
add2 = f32[32,32] add(add1, dot3)
add3 = f32[32,32] add(add2, dot4)
add4 = f32[32,32] add(add3, dot5)
add5 = f32[32,32] add(add4, dot6)
ROOT t = (f32[32], f32[32], f32[32,32]) tuple(ar-done, ar-done1, add5)
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
ParseAndReturnVerifiedModule(
hlo_text, GetModuleConfig(true)));
SequentialHloOrdering order = BuildHloOrdering(module.get());
HloComputation* entry = module->entry_computation();
std::vector<int64_t> count_between_pairs;
bool in_between = false;
for (const HloInstruction* inst :
order.SequentialOrder(*entry)->instructions()) {
if (inst->opcode() == HloOpcode::kAllReduceStart) {
in_between = true;
count_between_pairs.push_back(0);
} else if (inst->opcode() == HloOpcode::kAllReduceDone) {
in_between = false;
} else if (in_between && inst->opcode() == HloOpcode::kCustomCall) {
count_between_pairs.back()++;
}
}
EXPECT_EQ(count_between_pairs.size(), 2);
EXPECT_GT(count_between_pairs[0], 0);
EXPECT_GT(count_between_pairs[1], 0);
EXPECT_TRUE(HasValidFingerprint(module.get()));
}
TEST_F(GpuHloScheduleTest, LHSCostModelCostlyAR) {
const char* hlo_text = R"(
HloModule AsyncAR
apply_op {
x = bf16[] parameter(0)
y = bf16[] parameter(1)
ROOT apply_op = bf16[] add(x, y)
}
ENTRY ar {
p0 = bf16[32505856] parameter(0)
p1 = f32[32, 32] parameter(1)
p2 = f32[32, 32] parameter(2)
dot0 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm"
dot1 = f32[32,32]{1,0} custom-call(dot0, p2), custom_call_target="__cublas$gemm"
dot2 = f32[32,32]{1,0} custom-call(dot1, p2), custom_call_target="__cublas$gemm"
dot3 = f32[32,32]{1,0} custom-call(dot2, p2), custom_call_target="__cublas$gemm"
dot4 = f32[32,32]{1,0} custom-call(dot3, p2), custom_call_target="__cublas$gemm"
dot5 = f32[32,32]{1,0} custom-call(dot4, p2), custom_call_target="__cublas$gemm"
dot6 = f32[32,32]{1,0} custom-call(dot5, p2), custom_call_target="__cublas$gemm"
ar-start = bf16[32505856] all-reduce-start(p0), to_apply=apply_op
ar-done = bf16[32505856] all-reduce-done(ar-start)
ROOT t = (bf16[32505856], f32[32,32]) tuple(ar-done, dot6)
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
ParseAndReturnVerifiedModule(
hlo_text, GetModuleConfig(true)));
SequentialHloOrdering order = BuildHloOrdering(module.get());
HloComputation* entry = module->entry_computation();
std::vector<int64_t> count_between_pairs;
bool in_between = false;
for (const HloInstruction* inst :
order.SequentialOrder(*entry)->instructions()) {
if (inst->opcode() == HloOpcode::kAllReduceStart) {
in_between = true;
count_between_pairs.push_back(0);
} else if (inst->opcode() == HloOpcode::kAllReduceDone) {
in_between = false;
} else if (in_between && inst->opcode() == HloOpcode::kCustomCall) {
count_between_pairs.back()++;
}
}
EXPECT_EQ(count_between_pairs.size(), 1);
EXPECT_EQ(count_between_pairs[0], 7);
EXPECT_TRUE(HasValidFingerprint(module.get()));
}
TEST_F(GpuHloScheduleTest, ProfileGuidedCostModel) {
const char* hlo_text = R"(
HloModule AsyncAR
apply_op {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT apply_op = f32[] add(x, y)
}
ENTRY ar {
p0 = f32[32] parameter(0)
p1 = f32[32, 32] parameter(1)
p2 = f32[32, 32] parameter(2)
p3 = f32[32] parameter(3)
dot0 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm"
dot1 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm"
add0 = f32[32,32] add(dot0, dot1)
ar-start = f32[32] all-reduce-start(p0), to_apply=apply_op
ar-done = f32[32] all-reduce-done(ar-start)
ar-start1 = f32[32] all-reduce-start(p3), to_apply=apply_op
ar-done1 = f32[32] all-reduce-done(ar-start1)
ROOT t = (f32[32], f32[32], f32[32,32]) tuple(ar-done, ar-done1, add0)
})";
struct SubTest {
std::string profile;
std::string target_start, target_done;
};
std::vector<SubTest> subtests;
const std::string ar_long_latency_proto_text = R"pb(
costs { name: "dot0" cost_us: 100.0 }
costs { name: "dot1" cost_us: 100.0 }
costs { name: "add0" cost_us: 10.0 }
costs { name: "ar-start" cost_us: 1000.0 }
costs { name: "ar-start1" cost_us: 10.0 }
)pb";
subtests.push_back({ar_long_latency_proto_text, "ar-start", "ar-done"});
const std::string ar1_long_latency_proto_text = R"pb(
costs { name: "dot0" cost_us: 100.0 }
costs { name: "dot1" cost_us: 100.0 }
costs { name: "add0" cost_us: 10.0 }
costs { name: "ar-start" cost_us: 10.0 }
costs { name: "ar-start1" cost_us: 1000.0 }
)pb";
tensorflow::profiler::ProfiledInstructionsProto profile;
ASSERT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
ar1_long_latency_proto_text, &profile));
std::string ar1_long_latency_proto_binary = profile.SerializeAsString();
subtests.push_back({profile.SerializeAsString(), "ar-start1", "ar-done1"});
for (const SubTest& subtest : subtests) {
TF_ASSERT_OK_AND_ASSIGN(
auto module,
ParseAndReturnVerifiedModule(
hlo_text, GetModuleConfig(true,
true,
subtest.profile)));
SequentialHloOrdering order = BuildHloOrdering(module.get());
HloComputation* entry = module->entry_computation();
bool between_target_collective_pair = false;
for (const HloInstruction* inst :
order.SequentialOrder(*entry)->instructions()) {
if (inst->name() == subtest.target_start) {
between_target_collective_pair = true;
} else if (inst->name() == subtest.target_done) {
between_target_collective_pair = false;
} else if (inst->opcode() == HloOpcode::kDot ||
inst->opcode() == HloOpcode::kAdd) {
EXPECT_TRUE(between_target_collective_pair);
}
}
}
}
TEST_F(GpuHloScheduleTest,
ProfileGuidedCostModelApplicabilityListsMissingCostsAndLatencies) {
const char* hlo_text = R"(
HloModule AsyncAR
apply_op {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT apply_op = f32[] add(x, y)
}
ENTRY ar {
p0 = f32[32] parameter(0)
p1 = f32[32, 32] parameter(1)
p2 = f32[32, 32] parameter(2)
p3 = f32[32] parameter(3)
dot0 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm"
ar-start = f32[32] all-reduce-start(p0), to_apply=apply_op
ar-done = f32[32] all-reduce-done(ar-start)
ar-start1 = f32[32] all-reduce-start(p3), to_apply=apply_op
ar-done1 = f32[32] all-reduce-done(ar-start1)
ROOT t = (f32[32], f32[32], f32[32,32]) tuple(ar-done, ar-done1, dot0)
})";
const std::string ar_long_latency_proto_text = R"pb(
costs { name: "dot0" cost_us: 100.0 }
costs { name: "dot1" cost_us: 100.0 }
costs { name: "add0" cost_us: 10.0 }
costs { name: "ar-start" cost_us: 10.0 }
costs { name: "ar-start-2" cost_us: 10.0 }
)pb";
tensorflow::profiler::ProfiledInstructionsProto profile;
ASSERT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
ar_long_latency_proto_text, &profile));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(
hlo_text,
GetModuleConfig(true,
true,
ar_long_latency_proto_text)));
absl::Status result = IsProfileApplicable(module.get(), profile);
EXPECT_THAT(result, StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(result.message(), HasSubstr("add0"));
EXPECT_THAT(result.message(), HasSubstr("dot1"));
EXPECT_THAT(result.message(), HasSubstr("ar-start-2"));
}
TEST_F(GpuHloScheduleTest, ProfileGuidedCostModelWithRematData) {
const char* hlo_text = R"(
HloModule AsyncAR
apply_op {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT apply_op = f32[] add(x, y)
}
ENTRY ar {
p0 = f32[32] parameter(0)
p1 = f32[32, 32] parameter(1)
p2 = f32[32, 32] parameter(2)
p3 = f32[32] parameter(3)
dot0 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm"
dot1 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm"
add0 = f32[32,32] add(dot0, dot1)
ar-start = f32[32] all-reduce-start(p0), to_apply=apply_op
ar-done = f32[32] all-reduce-done(ar-start)
ar-start1 = f32[32] all-reduce-start(p3), to_apply=apply_op
ar-done1 = f32[32] all-reduce-done(ar-start1)
ROOT t = (f32[32], f32[32], f32[32,32]) tuple(ar-done, ar-done1, add0)
})";
const std::string ar_long_latency_proto_text = R"pb(
costs { name: "dot0" cost_us: 100.0 }
costs { name: "dot1" cost_us: 100.0 }
costs { name: "add0" cost_us: 10.0 }
costs { name: "ar-start" cost_us: 1.0 }
costs { name: "ar-start1" cost_us: 1.0 }
costs { name: "ar-start.remat100" cost_us: 2000.0 }
)pb";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
ParseAndReturnVerifiedModule(
hlo_text,
GetModuleConfig(true,
true,
ar_long_latency_proto_text)));
SequentialHloOrdering order = BuildHloOrdering(module.get());
HloComputation* entry = module->entry_computation();
bool between_target_collective_pair = false;
for (const HloInstruction* inst :
order.SequentialOrder(*entry)->instructions()) {
if (inst->name() == "ar-start") {
between_target_collective_pair = true;
} else if (inst->name() == "ar-done") {
between_target_collective_pair = false;
} else if (inst->opcode() == HloOpcode::kDot ||
inst->opcode() == HloOpcode::kAdd) {
EXPECT_TRUE(between_target_collective_pair);
}
}
}
TEST_F(GpuHloScheduleTest, LHSSendRecv) {
const char* hlo_text = R"(
HloModule test
while_cond {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(%param), index=0
ub = u32[] constant(25)
ROOT cond_result = pred[] compare(count, ub), direction=LT
}
while_body {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(%param), index=0
send-data = get-tuple-element(%param), index=1
after-all = token[] after-all()
recv = (f32[1, 1024, 1024], u32[], token[]) recv(after-all), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}}"
}
send = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}}"
}
recv-done = (f32[1, 1024, 1024], token[]) recv-done(recv), channel_id=1
send-done = token[] send-done(send), channel_id=1
recv-data = f32[1, 1024, 1024] get-tuple-element(recv-done), index=0
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
replica = u32[] replica-id()
c10 = u32[] constant(10)
sum = u32[] add(replica, c10)
sum2 = u32[] add(sum, count)
conv = f32[] convert(sum2)
p = f32[1, 1024, 1024] broadcast(conv), dimensions={}
b = f32[1, 1024, 1024] add(p, recv-data)
c = f32[1, 1024, 1024] multiply(b, b)
d = f32[1, 1024, 1024] tan(c)
s = f32[1, 1024, 1024] dot(c, d), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
ROOT result = (u32[], f32[1, 1024, 1024]) tuple(new_count, s)
}
ENTRY test_computation {
c0 = u32[] constant(0)
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
while_init = (u32[], f32[1, 1024, 1024]) tuple(c0, init)
while_result = (u32[], f32[1, 1024, 1024]) while(while_init),
body=while_body, condition=while_cond
ROOT entry_result = f32[1, 1024, 1024] get-tuple-element(while_result), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
ParseAndReturnVerifiedModule(
hlo_text, GetModuleConfig(true)));
SequentialHloOrdering order = BuildHloOrdering(module.get());
HloComputation* while_body = module->GetComputationWithName("while_body");
const std::vector<HloInstruction*>& instruction_sequence =
order.SequentialOrder(*while_body)->instructions();
auto get_index = [&](absl::string_view hlo_name) {
return absl::c_find_if(instruction_sequence,
[hlo_name](HloInstruction* instruction) {
return instruction->name() == hlo_name;
}) -
instruction_sequence.begin();
};
EXPECT_LT(get_index("recv"), get_index("send"));
EXPECT_LT(get_index("send"), get_index("recv-done"));
EXPECT_GE(get_index("send-done") - get_index("recv-done"), 8);
EXPECT_LT(abs(get_index("send-done") - get_index("result")), 2);
EXPECT_TRUE(HasValidFingerprint(module.get()));
}
TEST_F(GpuHloScheduleTest, LHSSendRecvPairs2) {
const char* hlo_text = R"(
HloModule test
while_cond {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(%param), index=0
ub = u32[] constant(25)
ROOT cond_result = pred[] compare(count, ub), direction=LT
}
while_body {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(%param), index=0
send-data = get-tuple-element(%param), index=1
after-all-0 = token[] after-all()
recv-0 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all-0), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}}"
}
send-0 = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all-0),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}}"
}
recv-done-0 = (f32[1, 1024, 1024], token[]) recv-done(recv-0), channel_id=1
send-done-0 = token[] send-done(send-0), channel_id=1
recv-data-0 = f32[1, 1024, 1024] get-tuple-element(recv-done-0), index=0
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
replica = u32[] replica-id()
c10 = u32[] constant(10)
sum = u32[] add(replica, c10)
sum2 = u32[] add(sum, count)
conv = f32[] convert(sum2)
bc1 = f32[1, 1024, 1024] broadcast(conv), dimensions={}
after-all-1 = token[] after-all()
recv-1 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all-1), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{1, 0}}"
}
send-1 = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all-1),
channel_id=2, frontend_attributes={
_xla_send_recv_source_target_pairs="{{1, 0}}"
}
recv-done-1 = (f32[1, 1024, 1024], token[]) recv-done(recv-1), channel_id=2
send-done-1 = token[] send-done(send-1), channel_id=2
recv-data-1 = f32[1, 1024, 1024] get-tuple-element(recv-done-1), index=0
add2 = f32[1, 1024, 1024] add(recv-data-0, bc1)
add = f32[1, 1024, 1024] add(recv-data-1, add2)
ROOT result = (u32[], f32[1, 1024, 1024]) tuple(new_count, add)
}
ENTRY test_computation {
c0 = u32[] constant(0)
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
while_init = (u32[], f32[1, 1024, 1024]) tuple(c0, init)
while_result = (u32[], f32[1, 1024, 1024]) while(while_init),
body=while_body, condition=while_cond
ROOT entry_result = f32[1, 1024, 1024] get-tuple-element(while_result), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
ParseAndReturnVerifiedModule(
hlo_text, GetModuleConfig(true,
true)));
SequentialHloOrdering order = BuildHloOrdering(module.get());
HloComputation* while_body = module->GetComputationWithName("while_body");
const std::vector<HloInstruction*>& instruction_sequence =
order.SequentialOrder(*while_body)->instructions();
auto get_index = [&](absl::string_view hlo_name) {
return absl::c_find_if(instruction_sequence,
[hlo_name](HloInstruction* instruction) {
return instruction->name() == hlo_name;
}) -
instruction_sequence.begin();
};
EXPECT_TRUE(HasValidFingerprint(module.get()));
EXPECT_LT(get_index("recv-1"), get_index("send-1"));
EXPECT_LT(get_index("send-1"), get_index("recv-done-1"));
EXPECT_GT(get_index("send-done-1"), get_index("send-1"));
EXPECT_LT(get_index("send-done-1"), get_index("recv-0"));
EXPECT_LT(abs(get_index("send-done-0") - get_index("result")), 2);
}
TEST_F(GpuHloScheduleTest, LHSSendRecvAllReduce) {
const char* hlo_text = R"(
HloModule test
add (x: f32[], y: f32[]) -> f32[] {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(f32[] x, f32[] y)
}
while_cond {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(%param), index=0
ub = u32[] constant(25)
ROOT cond_result = pred[] compare(count, ub), direction=LT
}
while_body {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(%param), index=0
send-data = get-tuple-element(%param), index=1
after-all = token[] after-all()
recv = (f32[1, 1024, 1024], u32[], token[]) recv(after-all), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}}"
} |
2,041 | cpp | tensorflow/tensorflow | hlo_traversal | third_party/xla/xla/service/gpu/hlo_traversal.cc | third_party/xla/xla/service/gpu/hlo_traversal_test.cc | #ifndef XLA_SERVICE_GPU_HLO_TRAVERSAL_H_
#define XLA_SERVICE_GPU_HLO_TRAVERSAL_H_
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape.h"
namespace xla {
namespace gpu {
class HloFusionAdaptor;
class HloInstructionAdaptor {
public:
HloInstructionAdaptor() = default;
HloInstructionAdaptor(const HloInstruction& instruction,
const HloFusionAdaptor* parent)
: instruction_(&instruction), parent_(parent) {}
HloOpcode opcode() const { return instruction_->opcode(); }
absl::string_view name() const { return instruction_->name(); }
HloInstructionAdaptor GetOperand(int index) const;
absl::InlinedVector<HloInstructionAdaptor, 2> GetOperands() const;
absl::InlinedVector<HloInstructionAdaptor, 2> GetUsers() const;
const xla::Shape& shape() const { return instruction_->shape(); }
std::string ToString() const { return instruction_->ToString(); }
friend bool operator==(const HloInstructionAdaptor& lhs,
const HloInstructionAdaptor& rhs);
template <typename H>
friend H AbslHashValue(H h, const HloInstructionAdaptor& m);
const HloInstruction& instruction() const { return *instruction_; }
const HloFusionAdaptor& parent() const { return *parent_; }
private:
const HloInstruction* instruction_;
const HloFusionAdaptor* parent_;
};
template <typename H>
H AbslHashValue(H h, const HloInstructionAdaptor& m) {
return H::combine(std::move(h), m.instruction_->GetModule(),
m.instruction_->unique_id());
}
template <HloOpcode op, HloOpcode... rest>
bool IsOpcodeAnyOf(const HloInstruction* instr) {
return (instr->opcode() == op) || ((instr->opcode() == rest) || ...);
}
namespace internal {
class HloFusionInstructionAdaptor {
public:
virtual ~HloFusionInstructionAdaptor() = default;
virtual bool ContainsInstruction(const HloInstruction* instruction) const = 0;
virtual absl::InlinedVector<HloInstructionAdaptor, 2> GetRoots() const = 0;
virtual absl::InlinedVector<const HloInstruction*, 2> GetParameters()
const = 0;
virtual const HloInstruction& FusionInstruction() const = 0;
virtual absl::InlinedVector<HloInstructionAdaptor, 2>
MakeInstructionPostOrder() const = 0;
virtual std::string ToString() const = 0;
};
}
class HloFusionAdaptor {
public:
bool ContainsInstruction(HloInstructionAdaptor instruction) const;
bool ContainsInstruction(const HloInstruction* instruction) const;
absl::InlinedVector<HloInstructionAdaptor, 2> GetRoots() const;
absl::InlinedVector<const HloInstruction*, 2> GetParameters() const;
absl::InlinedVector<HloInstructionAdaptor, 2> MakeInstructionPostOrder()
const;
std::string ToString() const;
static std::unique_ptr<HloFusionAdaptor> ForInstruction(
const HloInstruction* instruction);
static std::unique_ptr<HloFusionAdaptor> ForProducerConsumer(
const HloInstruction* producer, const HloInstruction* consumer);
static std::unique_ptr<HloFusionAdaptor> ForComputation(
const HloComputation* computation);
private:
void AddInstruction(const HloInstruction* instruction);
void AddComputation(const HloComputation* computation);
absl::InlinedVector<std::unique_ptr<internal::HloFusionInstructionAdaptor>, 2>
fusion_instructions_;
};
enum class TraversalResult {
kAdvance,
kInterrupt,
kSkip,
};
void HloBfsConsumersFirstTraversal(
absl::Span<const HloInstructionAdaptor> roots,
const HloFusionAdaptor& fusion,
const std::function<TraversalResult(HloInstructionAdaptor node)>&
visit_node,
const std::function<void(HloInstructionAdaptor producer)>& visit_arg =
[](HloInstructionAdaptor) {});
void HloBfsProducersFirstTraversal(
absl::Span<const HloInstructionAdaptor> producers,
const HloFusionAdaptor& fusion,
const std::function<TraversalResult(HloInstructionAdaptor node)>&
visit_node);
bool HloAnyOf(absl::Span<const HloInstructionAdaptor> roots,
const HloFusionAdaptor& fusion,
const std::function<bool(HloInstructionAdaptor node)>& visit,
bool visit_operands = true);
bool HloAnyOf(absl::Span<const HloInstruction* const> roots,
const std::function<bool(const HloInstruction* node)>& visit,
bool visit_operands = true);
std::optional<HloInstructionAdaptor> HloFindIf(
absl::Span<const HloInstructionAdaptor> roots,
const HloFusionAdaptor& fusion,
const std::function<bool(HloInstructionAdaptor node)>& visit,
bool visit_operands = true);
std::optional<const HloInstruction*> HloFindIf(
absl::Span<const HloInstruction* const> roots,
const std::function<bool(const HloInstruction* node)>& visit,
bool visit_operands = true);
std::vector<const HloInstruction*> HloFindAll(
absl::Span<const HloInstruction* const> roots,
const std::function<bool(const HloInstruction* node)>& visit,
bool visit_operands = true);
std::vector<HloInstructionAdaptor> HloFindUseChain(HloInstructionAdaptor parent,
HloInstructionAdaptor root);
}
}
#endif
#include "xla/service/gpu/hlo_traversal.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <iterator>
#include <memory>
#include <optional>
#include <queue>
#include <sstream>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
namespace xla {
namespace gpu {
namespace {
template <typename F>
void ResolveUsers(const HloInstruction* value, const HloInstruction* user,
const HloFusionAdaptor& fusion_adaptor, F&& fn) {
if (user->opcode() == HloOpcode::kTuple && user->IsRoot()) {
if (auto* fusion = user->parent()->FusionInstruction()) {
for (const auto* gte : fusion->users()) {
if (gte->opcode() != HloOpcode::kGetTupleElement) {
fn(gte);
continue;
}
for (const auto* gte_user : gte->users()) {
ResolveUsers(gte, gte_user, fusion_adaptor, fn);
}
}
}
} else if (fusion_adaptor.ContainsInstruction(user) &&
user->opcode() == HloOpcode::kFusion) {
auto* param = user->fused_parameter(user->operand_index(value));
for (const auto* param_user : param->users()) {
fn(param_user);
}
} else {
fn(user);
}
}
const HloInstruction* ResolveOperand(const HloInstruction* operand,
const HloFusionAdaptor& fusion_adaptor) {
if (operand->opcode() == HloOpcode::kGetTupleElement &&
operand->operand(0)->opcode() == HloOpcode::kFusion &&
operand->operand(0)->fused_expression_root()->opcode() ==
HloOpcode::kTuple &&
fusion_adaptor.ContainsInstruction(operand->operand(0))) {
return operand->operand(0)->fused_expression_root()->operand(
operand->tuple_index());
}
if (!fusion_adaptor.ContainsInstruction(operand)) {
return operand;
}
if (operand->opcode() == HloOpcode::kFusion) {
return operand->fused_expression_root();
}
if (operand->opcode() == HloOpcode::kParameter) {
if (auto* fusion = operand->parent()->FusionInstruction()) {
return ResolveOperand(fusion->operand(operand->parameter_number()),
fusion_adaptor);
}
}
return operand;
}
}
class SingleInstructionFusion : public internal::HloFusionInstructionAdaptor {
public:
explicit SingleInstructionFusion(const HloInstruction* instruction,
const HloFusionAdaptor* parent)
: instruction_(instruction), parent_(parent) {
CHECK_NE(instruction->opcode(), HloOpcode::kFusion)
<< "Use HloComputationFusion";
}
bool ContainsInstruction(const HloInstruction* instruction) const override {
return instruction == instruction_;
}
absl::InlinedVector<HloInstructionAdaptor, 2> GetRoots() const override {
return {HloInstructionAdaptor{*instruction_, parent_}};
}
absl::InlinedVector<const HloInstruction*, 2> GetParameters() const override {
const auto& operands = instruction_->operands();
return absl::InlinedVector<const HloInstruction*, 2>(operands.begin(),
operands.end());
}
const HloInstruction& FusionInstruction() const override {
return *instruction_;
}
absl::InlinedVector<HloInstructionAdaptor, 2> MakeInstructionPostOrder()
const override {
return {HloInstructionAdaptor{*instruction_, parent_}};
}
std::string ToString() const override { return instruction_->ToString(); }
private:
const HloInstruction* instruction_;
const HloFusionAdaptor* parent_;
};
class HloComputationFusion : public internal::HloFusionInstructionAdaptor {
public:
explicit HloComputationFusion(const HloComputation* computation,
const HloFusionAdaptor* parent)
: computation_(computation), parent_(parent) {
CHECK(computation->IsFusionComputation());
roots_ = FindRoots(computation);
}
absl::InlinedVector<HloInstructionAdaptor, 2> FindRoots(
const HloComputation* computation) {
absl::InlinedVector<HloInstructionAdaptor, 2> roots;
std::function<void(const HloInstruction*)> get_roots;
get_roots = [&](const HloInstruction* instr) {
if (instr->opcode() == HloOpcode::kTuple) {
for (const auto* operand : instr->operands()) {
get_roots(operand);
}
} else {
HloInstructionAdaptor wrapped{*instr, parent_};
roots.push_back(wrapped);
}
};
get_roots(computation->root_instruction());
return roots;
}
bool ContainsInstruction(const HloInstruction* instruction) const override {
return instruction->parent() == computation_ ||
instruction == computation_->FusionInstruction();
}
absl::InlinedVector<HloInstructionAdaptor, 2> GetRoots() const override {
CHECK(!roots_.empty())
<< "No roots found in the computation. HloFusionAdaptor was likely "
"created for a non-fusion computation: "
<< computation_->ToString();
return roots_;
}
absl::InlinedVector<const HloInstruction*, 2> GetParameters() const override {
const auto& operands = computation_->FusionInstruction()->operands();
return absl::InlinedVector<const HloInstruction*, 2>(operands.begin(),
operands.end());
}
const HloInstruction& FusionInstruction() const override {
return *computation_->FusionInstruction();
}
absl::InlinedVector<HloInstructionAdaptor, 2> MakeInstructionPostOrder()
const override {
auto post_order = computation_->MakeInstructionPostOrder();
absl::InlinedVector<HloInstructionAdaptor, 2> result;
result.reserve(post_order.size() - computation_->num_parameters());
for (auto* instr : post_order) {
if (instr->opcode() == HloOpcode::kParameter ||
(instr->opcode() == HloOpcode::kTuple && instr->IsRoot())) {
continue;
}
result.emplace_back(*instr, parent_);
}
return result;
}
std::string ToString() const override { return computation_->ToString(); }
private:
const HloComputation* computation_;
absl::InlinedVector<HloInstructionAdaptor, 2> roots_;
const HloFusionAdaptor* parent_;
};
std::unique_ptr<HloFusionAdaptor> HloFusionAdaptor::ForInstruction(
const HloInstruction* instruction) {
if (instruction->opcode() == HloOpcode::kFusion) {
return ForComputation(instruction->fused_instructions_computation());
}
auto fusion_adaptor = std::make_unique<HloFusionAdaptor>();
fusion_adaptor->AddInstruction(instruction);
return fusion_adaptor;
}
std::unique_ptr<HloFusionAdaptor> HloFusionAdaptor::ForProducerConsumer(
const HloInstruction* producer, const HloInstruction* consumer) {
auto fusion_adaptor = std::make_unique<HloFusionAdaptor>();
fusion_adaptor->AddInstruction(producer);
fusion_adaptor->AddInstruction(consumer);
return fusion_adaptor;
}
std::unique_ptr<HloFusionAdaptor> HloFusionAdaptor::ForComputation(
const HloComputation* computation) {
auto fusion_adaptor = std::make_unique<HloFusionAdaptor>();
fusion_adaptor->AddComputation(computation);
return fusion_adaptor;
}
bool HloFusionAdaptor::ContainsInstruction(
HloInstructionAdaptor instruction) const {
return ContainsInstruction(&instruction.instruction());
}
bool HloFusionAdaptor::ContainsInstruction(
const HloInstruction* instruction) const {
for (const auto& fusion_instruction : fusion_instructions_) {
if (fusion_instruction->ContainsInstruction(instruction)) return true;
}
return false;
}
absl::InlinedVector<HloInstructionAdaptor, 2> HloFusionAdaptor::GetRoots()
const {
auto roots = fusion_instructions_.back()->GetRoots();
if (fusion_instructions_.size() == 1) {
return roots;
}
CHECK_EQ(fusion_instructions_.size(), 2);
auto producer_roots = fusion_instructions_[0]->GetRoots();
const HloInstruction& producer_fusion =
fusion_instructions_[0]->FusionInstruction();
const HloInstruction& consumer_fusion =
fusion_instructions_.back()->FusionInstruction();
for (auto& root : roots) {
if (root.opcode() != HloOpcode::kParameter) {
continue;
}
const HloInstruction* operand =
consumer_fusion.operand(root.instruction().parameter_number());
int64_t root_index = 0;
if (operand->opcode() == HloOpcode::kGetTupleElement) {
root_index = operand->tuple_index();
operand = operand->operand(0);
}
if (operand == &producer_fusion) {
root = producer_roots[root_index];
}
}
if (!producer_fusion.IsMultiOutputFusion()) {
return roots;
}
absl::flat_hash_set<int64_t> root_indices_with_outside_usage;
for (HloInstruction* instr : producer_fusion.users()) {
bool has_outside_user = false;
int64_t root_index = 0;
if (instr->opcode() == HloOpcode::kGetTupleElement) {
for (HloInstruction* user : instr->users()) {
if (user != &consumer_fusion) {
root_index = instr->tuple_index();
has_outside_user = true;
break;
}
}
} else if (instr != &consumer_fusion) {
has_outside_user = true;
}
if (has_outside_user) {
root_indices_with_outside_usage.insert(root_index);
}
}
for (int64_t i = 0; i < producer_roots.size(); ++i) {
if (!root_indices_with_outside_usage.contains(i)) {
continue;
}
if (producer_roots[i].opcode() != HloOpcode::kParameter) {
roots.push_back(producer_roots[i]);
}
}
return roots;
}
absl::InlinedVector<const HloInstruction*, 2> HloFusionAdaptor::GetParameters()
const {
if (fusion_instructions_.size() == 1) {
return fusion_instructions_.back()->GetParameters();
}
CHECK_EQ(fusion_instructions_.size(), 2);
absl::InlinedVector<const HloInstruction*, 2> combined_parameters;
const HloInstruction& producer_fusion =
fusion_instructions_[0]->FusionInstruction();
for (const auto& param : fusion_instructions_.back()->GetParameters()) {
const HloInstruction* operand = param;
if (operand->opcode() == HloOpcode::kGetTupleElement) {
operand = operand->operand(0);
}
if (operand != &producer_fusion) {
combined_parameters.push_back(param);
}
}
absl::flat_hash_set<const HloInstruction*> params(combined_parameters.begin(),
combined_parameters.end());
auto producer_roots = fusion_instructions_[0]->GetRoots();
absl::flat_hash_set<const HloInstruction*> parameters_to_skip;
for (const auto& root : producer_roots) {
if (root.opcode() == HloOpcode::kParameter) {
if (&root.instruction() == &producer_fusion) {
parameters_to_skip.insert(&producer_fusion);
} else if (root.instruction().user_count() <= 1) {
parameters_to_skip.insert(
producer_fusion.operand(root.instruction().parameter_number()));
}
}
}
for (auto param : fusion_instructions_[0]->GetParameters()) {
if (!parameters_to_skip.contains(param) && params.insert(param).second) {
combined_parameters.push_back(param);
}
}
return combined_parameters;
}
absl::InlinedVector<HloInstructionAdaptor, 2>
HloFusionAdaptor::MakeInstructionPostOrder() const {
absl::InlinedVector<HloInstructionAdaptor, 2> result_post_order;
for (const auto& fusion_instruction : fusion_instructions_) {
absl::c_move(fusion_instruction->MakeInstructionPostOrder(),
std::back_inserter(result_post_order));
}
return result_post_order;
}
std::string HloFusionAdaptor::ToString() const {
std::ostringstream ss;
for (const auto& fusion_instruction : fusion_instructions_) {
ss << fusion_instruction->ToString() << "\n";
}
return ss.str();
}
void HloFusionAdaptor::AddInstruction(const HloInstruction* instruction) {
if (instruction->opcode() == HloOpcode::kFusion) {
AddComputation(instruction->fused_instructions_computation());
} else {
fusion_instructions_.push_back(
std::make_unique<SingleInstructionFusion>(instruction, this));
}
}
void HloFusionAdaptor::AddComputation(const HloComputation* computation) {
fusion_instructions_.push_back(
std::make_unique<HloComputationFusion>(computation, this));
}
absl::InlinedVector<HloInstructionAdaptor, 2>
HloInstructionAdaptor::GetOperands() const {
absl::InlinedVector<HloInstructionAdaptor, 2> operands;
if (instruction_->opcode() == HloOpcode::kParameter) {
auto operand = ResolveOperand(instruction_, *parent_);
if (operand != instruction_) {
operands.emplace_back(*operand, parent_);
}
} else {
for (const auto* operand : instruction_->operands()) {
operands.emplace_back(*ResolveOperand(operand, *parent_), parent_);
}
}
return operands;
}
HloInstructionAdaptor HloInstructionAdaptor::GetOperand(int index) const {
return HloInstructionAdaptor{
*ResolveOperand(instruction_->operand(index), *parent_), parent_};
}
absl::InlinedVector<HloInstructionAdaptor, 2> HloInstructionAdaptor::GetUsers()
const {
absl::InlinedVector<HloInstructionAdaptor, 2> users;
auto add_user = [&](const HloInstruction* instr) {
users.emplace_back(*instr, parent_);
};
if (instruction_->IsRoot()) {
if (auto* fusion = instruction_->parent()->FusionInstruction()) {
for (auto* user : fusion->users()) {
ResolveUsers(fusion, user, *parent_, add_user);
}
}
}
for (auto* user : instruction_->users()) {
ResolveUsers(instruction_, user, *parent_, add_user);
}
return users;
}
bool operator==(const HloInstructionAdaptor& lhs,
const HloInstructionAdaptor& rhs) {
return lhs.instruction_->GetModule() == rhs.instruction_->GetModule() &&
lhs.instruction_->unique_id() == rhs.instruction_->unique_id();
}
namespace {
void HloBfsTraversal(
absl::Span<const HloInstructionAdaptor> roots,
const HloFusionAdaptor& fusion,
const std::function<TraversalResult(HloInstructionAdaptor node)>&
visit_node,
const std::function<void(HloInstructionAdaptor producer)>& visit_arg,
bool visit_operands) {
absl::flat_hash_set<HloInstructionAdaptor> visited;
std::queue<HloInstructionAdaptor> q;
auto enqueue = [&](const HloInstructionAdaptor& node) {
const auto& adjacent_nodes =
visit_operands ? node.GetOperands() : node.GetUsers();
for (const auto& node : adjacent_nodes) {
if (visited.insert(node).second) {
if (fusion.ContainsInstruction(node)) {
q.push(node);
} else {
visit_arg(node);
}
}
}
};
for (auto root : roots) {
if (visited.insert(root).second) {
q.push(root);
}
}
while (!q.empty()) {
HloInstructionAdaptor node = q.front();
q.pop();
switch (visit_node(node)) {
case TraversalResult::kAdvance:
enqueue(node);
break;
case TraversalResult::kInterrupt:
return;
case TraversalResult::kSkip:
break;
}
}
}
}
void HloBfsConsumersFirstTraversal(
absl::Span<const HloInstructionAdaptor> roots,
const HloFusionAdaptor& fusion,
const std::function<TraversalResult(HloInstructionAdaptor node)>&
visit_node,
const std::function<void(HloInstructionAdaptor producer)>& visit_arg) {
HloBfsTraversal(roots, fusion, visit_node, visit_arg,
true);
}
void HloBfsProducersFirstTraversal(
absl::Span<const HloInstructionAdaptor> producers,
const HloFusionAdaptor& fusion,
const std::function<TraversalResult(HloInstructionAdaptor node)>&
visit_node) {
HloBfsTraversal(
producers, fusion, visit_node, [](HloInstructionAdaptor) {},
false);
}
bool HloAnyOf(absl::Span<const HloInstructionAdaptor> roots,
const HloFusionAdaptor& fusion,
const std::function<bool(HloInstructionAdaptor node)>& visit,
bool visit_operands) {
return HloFindIf(roots, fusion, visit, visit_operands).has_value();
}
bool HloAnyOf(absl::Span<const HloInstruction* const> roots,
const std::function<bool(const HloInstruction* node)>& visit,
bool visit_operands) {
return HloFindIf(roots, visit, visit_operands).has_value();
}
std::optional<HloInstructionAdaptor> HloFindIf(
absl::Span<const HloInstructionAdaptor> roots,
const HloFusionAdaptor& fusion,
const std::function<bool(HloInstructionAdaptor node)>& visit,
bool visit_operands) {
std::optional<HloInstructionAdaptor> result = std::nullopt;
HloBfsTraversal(
roots, fusion,
[&](HloInstructionAdaptor node) {
if (visit(node)) {
result = node;
return TraversalResult::kInterrupt;
}
return TraversalResult::kAdvance;
},
[](HloInstructionAdaptor) {}, visit_operands);
return result;
}
std::vector<const HloInstruction*> HloFindAllImpl(
absl::Span<const HloInstruction* const> roots,
const std::function<bool(const HloInstruction* node)>& visit,
bool visit_operands, bool find_first_only = false) {
std::vector<const HloInstruction*> result;
absl::flat_hash_set<const HloInstruction*> visited;
std::queue<const HloInstruction*> q;
auto enqueue = [&](const HloInstruction* node) {
if (visit_operands) {
for (const HloInstruction* operand : node->operands()) {
if (visited.insert(operand).second) {
q.push(operand);
}
}
} else {
for (const HloInstruction* operand : node->users()) {
if (visited.insert(operand).second) {
q.push(operand);
}
}
}
};
for (auto root : roots) {
if (visited.insert(root).second) {
q.push(root);
}
}
while (!q.empty()) {
const HloInstruction* node = q.front();
q.pop();
if (visit(node)) {
result.push_back(node);
if (find_first_only) {
return result;
}
}
enqueue(node);
}
return result;
}
std::optional<const HloInstruction*> HloFindIf(
absl::Span<const HloInstruction* const> roots,
const std::function<bool(const HloInstruction* node)>& visit,
bool visit_operands) {
auto result = HloFindAllImpl(roots, visit, visit_operands,
true);
if (result.empty()) {
return std::nullopt;
}
return result[0];
}
std::vector<const HloInstruction*> HloFindAll(
absl::Span<const HloInstruction* const> roots,
const std::function<bool(const HloInstruction* node)>& visit,
bool visit_operands) {
std::vector<const HloInstruction*> result;
return HloFindAllImpl(roots, visit, visit_operands);
}
std::vector<HloInstructionAdaptor> HloFindUseChain(HloInstructionAdaptor parent,
HloInstructionAdaptor root) {
absl::flat_hash_set<HloInstructionAdaptor> visited;
std::vector<HloInstructionAdaptor> result;
std::function<bool(HloInstructionAdaptor)> visit;
visit = [&](HloInstructionAdaptor node) {
if (node == root) return true;
for (const auto& user : node.GetUsers()) {
if (visited.insert(user).second && visit(user)) {
result.push_back(user);
return true;
}
}
return false;
};
if (visit(parent)) {
result.push_back(parent);
std::reverse(result.begin(), result.end());
} else {
result.clear();
}
return result;
}
}
} | #include "xla/service/gpu/hlo_traversal.h"
#include <optional>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
using ::testing::ElementsAre;
using ::testing::IsEmpty;
MATCHER_P(InstructionAdaptorName, name, "") { return arg.name() == name; }
class HloTraversalTest : public HloTestBase {};
const char kTestModule[] = R"(
HloModule test
scalar_add_computation {
scalar_lhs.0 = f32[] parameter(0)
scalar_rhs.0 = f32[] parameter(1)
ROOT add.0 = f32[] add(scalar_lhs.0, scalar_rhs.0)
}
fused_computation {
p0.1 = f32[] parameter(0)
p1.1 = f32[128] parameter(1)
mul = f32[128] multiply(p1.1, p1.1)
ROOT reduce.1 = f32[] reduce(mul, p0.1), dimensions={0}, to_apply=scalar_add_computation
}
fused_computation_1 {
p0.2 = f32[] parameter(0)
zero = f32[] constant(0.0)
is_positive = pred[] compare(p0.2, zero), direction=GE
not = pred[] not(is_positive)
ROOT tuple = (pred[], pred[]) tuple(is_positive, not)
}
ENTRY entry {
p0 = f32[] parameter(0)
p1 = f32[128] parameter(1)
sum = f32[128] add(p1, p1)
log = f32[128] log(sum)
negate = f32[128] negate(log)
fusion = f32[] fusion(p0, negate), kind=kLoop, calls=fused_computation
fusion2 = (pred[], pred[]) fusion(fusion), kind=kLoop, calls=fused_computation_1
gte = pred[] get-tuple-element(fusion2), index=0
ROOT select = f32[] select(gte, fusion, p0)
})";
TEST_F(HloTraversalTest, AdaptorOperands) {
auto module = ParseAndReturnVerifiedModule(kTestModule).value();
auto fusion_adaptor = HloFusionAdaptor::ForProducerConsumer(
module->entry_computation()->GetInstructionWithName("fusion2"),
module->entry_computation()->GetInstructionWithName("select"));
HloInstructionAdaptor instr = fusion_adaptor->GetRoots()[0];
EXPECT_THAT(instr.GetOperands(),
ElementsAre(InstructionAdaptorName("is_positive"),
InstructionAdaptorName("fusion"),
InstructionAdaptorName("p0")));
}
TEST_F(HloTraversalTest, AdaptorUsers) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
fused_computation {
p0 = f32[] parameter(0)
neg = f32[] negate(p0)
add = f32[] add(p0, neg)
ROOT t = (f32[], f32[]) tuple(neg, add)
}
fused_computation_1 {
p0.0 = f32[] parameter(0)
mul = f32[] multiply(p0.0, p0.0)
ROOT neg.1 = f32[] negate(mul)
}
ENTRY entry {
p0 = f32[] parameter(0)
fusion = (f32[], f32[]) fusion(p0), kind=kLoop, calls=fused_computation
gte = f32[] get-tuple-element(fusion), index=0
add.1 = f32[] add(p0, gte)
fusion2 = f32[] fusion(gte), kind=kLoop, calls=fused_computation_1
exp.1 = f32[] exponential(fusion2)
ROOT res = (f32[], (f32[], f32[]), f32[], f32[]) tuple(add.1, fusion, fusion2, exp.1)
}
)")
.value();
auto fusion_adaptor1 = HloFusionAdaptor::ForProducerConsumer(
module->entry_computation()->GetInstructionWithName("fusion"),
module->entry_computation()->GetInstructionWithName("fusion2"));
HloInstructionAdaptor add{*module->GetComputationWithName("fused_computation")
->GetInstructionWithName("add"),
fusion_adaptor1.get()};
EXPECT_THAT(add.GetUsers(), ElementsAre(InstructionAdaptorName("add.1"),
InstructionAdaptorName("mul"),
InstructionAdaptorName("res")));
auto fusion_adaptor2 = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("fusion2"));
HloInstructionAdaptor mul{
*module->GetComputationWithName("fused_computation_1")
->GetInstructionWithName("mul"),
fusion_adaptor2.get()};
EXPECT_THAT(mul.GetUsers(), ElementsAre(InstructionAdaptorName("neg.1")));
HloInstructionAdaptor neg{
*module->GetComputationWithName("fused_computation_1")
->GetInstructionWithName("neg.1"),
fusion_adaptor2.get()};
EXPECT_THAT(neg.GetUsers(), ElementsAre(InstructionAdaptorName("exp.1")));
}
TEST_F(HloTraversalTest, TraverseFusionConsumerFirst) {
auto module = ParseAndReturnVerifiedModule(kTestModule).value();
std::vector<std::string> visited_nodes;
std::vector<std::string> visited_args;
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("fusion"));
HloBfsConsumersFirstTraversal(
fusion->GetRoots(), *fusion,
[&](HloInstructionAdaptor node) {
visited_nodes.emplace_back(node.name());
return TraversalResult::kAdvance;
},
[&](HloInstructionAdaptor arg) {
visited_args.emplace_back(arg.name());
});
EXPECT_THAT(visited_nodes, ElementsAre("reduce.1", "mul"));
EXPECT_THAT(visited_args, ElementsAre("p0", "negate"));
}
TEST_F(HloTraversalTest,
TraverseFusionConsumerFirstFromFusionRootAndInnerNode) {
auto module = ParseAndReturnVerifiedModule(kTestModule).value();
std::vector<std::string> visited_nodes;
std::vector<std::string> visited_args;
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("fusion"));
auto root = fusion->GetRoots()[0];
HloBfsConsumersFirstTraversal(
{root, root.GetOperand(0)}, *fusion,
[&](HloInstructionAdaptor node) {
visited_nodes.emplace_back(node.name());
return TraversalResult::kAdvance;
},
[&](HloInstructionAdaptor arg) {
visited_args.emplace_back(arg.name());
});
EXPECT_THAT(visited_nodes, ElementsAre("reduce.1", "mul"));
EXPECT_THAT(visited_args, ElementsAre("p0", "negate"));
}
TEST_F(HloTraversalTest, TraverseFusionProducerFirst) {
auto module = ParseAndReturnVerifiedModule(kTestModule).value();
std::vector<std::string> visited_nodes;
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("fusion"));
auto root = fusion->GetRoots()[0];
HloBfsProducersFirstTraversal({root.GetOperand(0)}, *fusion,
[&](HloInstructionAdaptor node) {
visited_nodes.emplace_back(node.name());
return TraversalResult::kAdvance;
});
EXPECT_THAT(visited_nodes, ElementsAre("mul", "reduce.1"));
}
TEST_F(HloTraversalTest, AbortTraversal) {
auto module = ParseAndReturnVerifiedModule(kTestModule).value();
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("fusion"));
std::vector<std::string> visited_nodes;
HloBfsConsumersFirstTraversal(fusion->GetRoots(), *fusion,
[&](HloInstructionAdaptor node) {
visited_nodes.emplace_back(node.name());
return node.opcode() == HloOpcode::kReduce
? TraversalResult::kAdvance
: TraversalResult::kInterrupt;
});
EXPECT_THAT(visited_nodes, ElementsAre("reduce.1", "mul"));
}
TEST_F(HloTraversalTest, FindArguments) {
auto module = ParseAndReturnVerifiedModule(kTestModule).value();
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("fusion"));
std::vector<std::string> producers;
absl::c_for_each(fusion->GetParameters(),
[&](const HloInstruction* producer) {
producers.emplace_back(producer->name());
});
EXPECT_THAT(producers, ElementsAre("p0", "negate"));
}
TEST_F(HloTraversalTest, FindArgumentsAfterFusion) {
auto module = ParseAndReturnVerifiedModule(kTestModule).value();
auto fusion = HloFusionAdaptor::ForProducerConsumer(
module->entry_computation()->GetInstructionWithName("negate"),
module->entry_computation()->GetInstructionWithName("fusion"));
std::vector<std::string> producers;
absl::c_for_each(fusion->GetParameters(),
[&](const HloInstruction* producer) {
producers.emplace_back(producer->name());
});
EXPECT_THAT(producers, ElementsAre("p0", "log"));
}
TEST_F(HloTraversalTest, FindIf) {
auto module = ParseAndReturnVerifiedModule(kTestModule).value();
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("fusion"));
auto result =
HloFindIf(fusion->GetRoots(), *fusion, [&](HloInstructionAdaptor node) {
return node.opcode() == HloOpcode::kMultiply;
});
ASSERT_NE(result, std::nullopt);
ASSERT_EQ(result->name(), "mul");
}
TEST_F(HloTraversalTest, NotFound) {
auto module = ParseAndReturnVerifiedModule(kTestModule).value();
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("fusion"));
auto result = HloFindIf(fusion->GetRoots(), *fusion,
[&](HloInstructionAdaptor node) { return false; });
ASSERT_EQ(result, std::nullopt);
}
TEST_F(HloTraversalTest, FindAllMultiple) {
const char kConverts[] = R"(
HloModule test
ENTRY entry {
p0 = s8[128] parameter(0)
p1 = pred[128] parameter(1)
p1c = s8[128] convert(p1)
p1c1 = f16[128] convert(p1c)
p0c = f16[128] convert(p0)
ROOT diff = f16[128] subtract(p0c, p1c1)
})";
auto module = ParseAndReturnVerifiedModule(kConverts).value();
auto root = module->entry_computation()->GetInstructionWithName("diff");
std::vector<const HloInstruction*> converts =
HloFindAll({root}, [&](const HloInstruction* node) {
return node->opcode() == HloOpcode::kConvert;
});
auto get = [&](absl::string_view name) {
return module->entry_computation()->GetInstructionWithName(name);
};
EXPECT_THAT(converts, ElementsAre(get("p0c"), get("p1c1"), get("p1c")));
}
TEST_F(HloTraversalTest, FindAllNotFound) {
const char kConverts[] = R"(
HloModule test
ENTRY entry {
p0 = s8[128] parameter(0)
p1 = f16[128] parameter(1)
p0c = f16[128] convert(p0)
ROOT diff = f16[128] subtract(p0c, p1)
})";
auto module = ParseAndReturnVerifiedModule(kConverts).value();
auto root = module->entry_computation()->GetInstructionWithName("diff");
std::vector<const HloInstruction*> converts =
HloFindAll({root}, [&](const HloInstruction* node) {
return node->opcode() == HloOpcode::kAdd;
});
EXPECT_THAT(converts, IsEmpty());
}
const char kTwoFusions[] = R"(
HloModule test
scalar_add_computation {
scalar_lhs.0 = f32[] parameter(0)
scalar_rhs.0 = f32[] parameter(1)
ROOT add.0 = f32[] add(scalar_lhs.0, scalar_rhs.0)
}
fused_computation_1 {
p0.1 = f32[] parameter(0)
p1.1 = f32[128] parameter(1)
mul = f32[128] multiply(p1.1, p1.1)
ROOT reduce.1 = f32[] reduce(mul, p0.1), dimensions={0}, to_apply=scalar_add_computation
}
fused_computation_2 {
p0.2 = f32[] parameter(0)
p1.2 = f32[128] parameter(1)
ROOT reduce.2 = f32[] reduce(p1.2, p0.2), dimensions={0}, to_apply=scalar_add_computation
}
ENTRY entry {
p0 = f32[] parameter(0)
p1 = f32[128] parameter(1)
sum = f32[128] add(p1, p1)
negate = f32[128] negate(sum)
fusion.1 = f32[] fusion(p0, negate), kind=kLoop, calls=fused_computation_1
fusion.2 = f32[] fusion(fusion.1, negate), kind=kLoop, calls=fused_computation_2
ROOT difference = f32[] subtract(fusion.2, p0)
})";
TEST_F(HloTraversalTest, FuseFusionConsumer) {
auto module = ParseAndReturnVerifiedModule(kTwoFusions).value();
auto producer = module->entry_computation()->GetInstructionWithName("negate");
auto consumer =
module->entry_computation()->GetInstructionWithName("fusion.1");
auto fusion = HloFusionAdaptor::ForProducerConsumer(producer, consumer);
HloInstructionAdaptor reduce_1(
*module->GetComputationWithName("fused_computation_1")
->GetInstructionWithName("reduce.1"),
fusion.get());
EXPECT_THAT(reduce_1.GetUsers(),
ElementsAre(InstructionAdaptorName("fusion.2")));
std::vector<std::string> nodes;
std::vector<std::string> params;
HloBfsConsumersFirstTraversal(
fusion->GetRoots(), *fusion,
[&](HloInstructionAdaptor node) {
nodes.emplace_back(node.name());
return TraversalResult::kAdvance;
},
[&](HloInstructionAdaptor param) { params.emplace_back(param.name()); });
EXPECT_THAT(nodes, ElementsAre("reduce.1", "mul", "negate"));
EXPECT_THAT(params, ElementsAre("p0", "sum"));
}
TEST_F(HloTraversalTest, FuseFusionProducer) {
auto module = ParseAndReturnVerifiedModule(kTwoFusions).value();
auto producer =
module->entry_computation()->GetInstructionWithName("fusion.2");
auto consumer =
module->entry_computation()->GetInstructionWithName("difference");
auto fusion = HloFusionAdaptor::ForProducerConsumer(producer, consumer);
HloInstructionAdaptor reduce_2(
*module->GetComputationWithName("fused_computation_2")
->GetInstructionWithName("reduce.2"),
fusion.get());
EXPECT_THAT(reduce_2.GetOperands(),
ElementsAre(InstructionAdaptorName("negate"),
InstructionAdaptorName("fusion.1")));
std::vector<std::string> nodes;
std::vector<std::string> params;
HloBfsConsumersFirstTraversal(
fusion->GetRoots(), *fusion,
[&](HloInstructionAdaptor node) {
nodes.emplace_back(node.name());
return TraversalResult::kAdvance;
},
[&](HloInstructionAdaptor arg) { params.emplace_back(arg.name()); });
EXPECT_THAT(nodes, ElementsAre("difference", "reduce.2"));
EXPECT_THAT(params, ElementsAre("p0", "negate", "fusion.1"));
}
TEST_F(HloTraversalTest, FuseFusionConsumerAndProducer) {
auto module = ParseAndReturnVerifiedModule(kTwoFusions).value();
auto producer =
module->entry_computation()->GetInstructionWithName("fusion.1");
auto consumer =
module->entry_computation()->GetInstructionWithName("fusion.2");
auto fusion = HloFusionAdaptor::ForProducerConsumer(producer, consumer);
std::vector<std::string> nodes;
HloBfsConsumersFirstTraversal(fusion->GetRoots(), *fusion,
[&](HloInstructionAdaptor node) {
nodes.emplace_back(node.name());
return TraversalResult::kAdvance;
});
std::vector<std::string> params;
absl::c_for_each(fusion->GetParameters(), [&](const HloInstruction* param) {
params.emplace_back(param->name());
});
EXPECT_THAT(nodes, ElementsAre("reduce.2", "reduce.1", "mul"));
EXPECT_THAT(params, ElementsAre("negate", "p0"));
}
TEST_F(HloTraversalTest, FuseNonFusionConsumerAndProducer) {
auto module = ParseAndReturnVerifiedModule(kTestModule).value();
auto producer = module->entry_computation()->GetInstructionWithName("log");
auto consumer = module->entry_computation()->GetInstructionWithName("negate");
auto fusion = HloFusionAdaptor::ForProducerConsumer(producer, consumer);
std::vector<std::string> nodes;
HloBfsConsumersFirstTraversal(fusion->GetRoots(), *fusion,
[&](HloInstructionAdaptor node) {
nodes.emplace_back(node.name());
return TraversalResult::kAdvance;
});
EXPECT_THAT(nodes, ElementsAre("negate", "log"));
}
TEST_F(HloTraversalTest, SingleInstructionFusionOfFusion) {
auto module = ParseAndReturnVerifiedModule(kTwoFusions).value();
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("fusion.1"));
std::vector<std::string> nodes;
HloBfsConsumersFirstTraversal(fusion->GetRoots(), *fusion,
[&](HloInstructionAdaptor node) {
nodes.emplace_back(node.name());
return TraversalResult::kAdvance;
});
EXPECT_THAT(nodes, ElementsAre("reduce.1", "mul"));
}
TEST_F(HloTraversalTest, SingleInstructionFusionOfInstruction) {
auto module = ParseAndReturnVerifiedModule(kTwoFusions).value();
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("negate"));
std::vector<std::string> nodes;
HloBfsConsumersFirstTraversal(fusion->GetRoots(), *fusion,
[&](HloInstructionAdaptor node) {
nodes.emplace_back(node.name());
return TraversalResult::kAdvance;
});
EXPECT_THAT(nodes, ElementsAre("negate"));
}
TEST_F(HloTraversalTest, MultiOutputFusionDuplicateRoot) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
fused_computation {
p0.1 = f32[128] parameter(0)
p1.1 = f32[128] parameter(1)
mul = f32[128] multiply(p0.1, p1.1)
ROOT res = (f32[128], f32[128]) tuple(mul, mul)
}
ENTRY entry {
p0 = f32[128] parameter(0)
p1 = f32[128] parameter(1)
ROOT fusion = (f32[128], f32[128]) fusion(p0, p1), kind=kLoop, calls=fused_computation
})")
.value();
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("fusion"));
EXPECT_THAT(fusion->GetRoots(), ElementsAre(InstructionAdaptorName("mul"),
InstructionAdaptorName("mul")));
}
TEST_F(HloTraversalTest, MakeInstructionsPostOrder_SingleInstruction) {
auto module = ParseAndReturnVerifiedModule(kTwoFusions).value();
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("negate"));
auto nodes = fusion->MakeInstructionPostOrder();
EXPECT_THAT(nodes, ElementsAre(InstructionAdaptorName("negate")));
}
TEST_F(HloTraversalTest, MakeInstructionsPostOrder_TwoFusions) {
auto module = ParseAndReturnVerifiedModule(kTwoFusions).value();
auto fusion = HloFusionAdaptor::ForProducerConsumer(
module->entry_computation()->GetInstructionWithName("fusion.1"),
module->entry_computation()->GetInstructionWithName("fusion.2"));
auto nodes = fusion->MakeInstructionPostOrder();
EXPECT_THAT(nodes, ElementsAre(InstructionAdaptorName("mul"),
InstructionAdaptorName("reduce.1"),
InstructionAdaptorName("reduce.2")));
}
TEST_F(HloTraversalTest, MakeInstructionsPostOrder_TwoMultiOutputFusions) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
scalar_add_computation {
scalar_lhs.0 = f32[] parameter(0)
scalar_rhs.0 = f32[] parameter(1)
ROOT add.0 = f32[] add(scalar_lhs.0, scalar_rhs.0)
}
fused_computation_1 {
p0.1 = f32[] parameter(0)
p1.1 = f32[128] parameter(1)
mul = f32[128] multiply(p1.1, p1.1)
reduce.1 = f32[] reduce(mul, p0.1), dimensions={0}, to_apply=scalar_add_computation
ROOT t = (f32[128], f32[]) tuple(mul, reduce.1)
}
fused_computation_2 {
p0.2 = f32[] parameter(0)
p1.2 = f32[128] parameter(1)
neg = f32[128] negate(p1.2)
reduce.2 = f32[] reduce(neg, p0.2), dimensions={0}, to_apply=scalar_add_computation
ROOT t2 = (f32[], f32[128]) tuple(reduce.2, neg)
}
ENTRY entry {
p0 = f32[] parameter(0)
p1 = f32[128] parameter(1)
sum = f32[128] add(p1, p1)
negate = f32[128] negate(sum)
fusion.1 = (f32[128], f32[]) fusion(p0, negate), kind=kLoop, calls=fused_computation_1
gte1 = f32[128] get-tuple-element(fusion.1), index=0
gte2 = f32[] get-tuple-element(fusion.1), index=1
fusion.2 = (f32[], f32[128]) fusion(p0, gte1), kind=kLoop, calls=fused_computation_2
gte3 = f32[] get-tuple-element(fusion.2), index=0
gte4 = f32[128] get-tuple-element(fusion.2), index=1
difference = f32[] subtract(gte3, p0)
ROOT res = (f32[], f32[128]) tuple(difference, gte4)
})")
.value();
auto fusion = HloFusionAdaptor::ForProducerConsumer(
module->entry_computation()->GetInstructionWithName("fusion.1"),
module->entry_computation()->GetInstructionWithName("fusion.2"));
auto nodes = fusion->MakeInstructionPostOrder();
EXPECT_THAT(nodes, ElementsAre(InstructionAdaptorName("mul"),
InstructionAdaptorName("reduce.1"),
InstructionAdaptorName("neg"),
InstructionAdaptorName("reduce.2")));
}
const char kTwoMultiOutputFusions[] = R"(
HloModule mof
mof_producer {
param0 = f32[10]{0} parameter(0)
param1 = f32[10]{0} parameter(1)
param2 = f32[10]{0} parameter(2)
add = f32[10]{0} add(param0, param1)
sub = f32[10]{0} subtract(param0, param1)
ROOT res = (f32[10]{0}, f32[10]{0}, f32[10]{0}, f32[10]{0}, f32[10]{0}) tuple(param1, add, sub, param0, param2)
}
mof_consumer {
param0.0 = f32[10]{0} parameter(0)
param1.0 = f32[10]{0} parameter(1)
param2.0 = f32[10]{0} parameter(2)
mul = f32[10]{0} multiply(param0.0, param1.0)
div = f32[10]{0} divide(param0.0, param1.0)
ROOT res = (f32[10]{0}, f32[10]{0}, f32[10]{0}) tuple(mul, div, param2.0)
}
ENTRY main {
p0 = f32[10]{0} parameter(0)
p1 = f32[10]{0} parameter(1)
p2 = f32[10]{0} parameter(2)
producer = (f32[10]{0}, f32[10]{0}, f32[10]{0}, f32[10]{0}, f32[10]{0}) fusion(p0, p1, p2), kind=kLoop, calls=mof_producer
gte0 = f32[10]{0} get-tuple-element(producer), index=0
gte1 = f32[10]{0} get-tuple-element(producer), index=1
gte2 = f32[10]{0} get-tuple-element(producer), index=2
gte3 = f32[10]{0} get-tuple-element(producer), index=3
gte4 = f32[10]{0} get-tuple-element(producer), index=4
consumer = (f32[10]{0}, f32[10]{0}, f32[10]{0}) fusion(gte1, gte2, gte3), kind=kLoop, calls=mof_consumer
gte5 = f32[10]{0} get-tuple-element(consumer), index=0
gte6 = f32[10]{0} get-tuple-element(consumer), index=1
gte7 = f32[10]{0} get-tuple-element(consumer), index=2
ROOT res = tuple(gte0, gte1, gte3, gte4, gte5, gte6, gte7)
})";
TEST_F(HloTraversalTest, GetParametersMultiOutputFusion) {
auto module = ParseAndReturnVerifiedModule(kTwoMultiOutputFusions).value();
auto producer =
module->entry_computation()->GetInstructionWithName("producer");
auto consumer =
module->entry_computation()->GetInstructionWithName("consumer");
auto fusion_adaptor =
HloFusionAdaptor::ForProducerConsumer(producer, consumer);
auto p0 = module->entry_computation()->GetInstructionWithName("p0");
auto p1 = module->entry_computation()->GetInstructionWithName("p1");
EXPECT_THAT(fusion_adaptor->GetParameters(), ElementsAre(p0, p1));
consumer->MergeFusionInstructionIntoMultiOutput(producer);
EXPECT_THAT(consumer->operands(), ElementsAre(p0, p1));
}
TEST_F(HloTraversalTest, GetRootsMultiOutputFusion) {
auto module = ParseAndReturnVerifiedModule(kTwoMultiOutputFusions).value();
auto consumer_fusion_instr =
module->entry_computation()->GetInstructionWithName("consumer");
auto producer_fusion_instr =
module->entry_computation()->GetInstructionWithName("producer");
auto fusion_adaptor = HloFusionAdaptor::ForProducerConsumer(
producer_fusion_instr, consumer_fusion_instr);
auto producer_computation = module->GetComputationWithName("mof_producer");
auto producer = HloFusionAdaptor::ForComputation(producer_computation);
auto consumer_computation = module->GetComputationWithName("mof_consumer");
auto consumer = HloFusionAdaptor::ForComputation(consumer_computation);
EXPECT_THAT(fusion_adaptor->GetRoots(),
ElementsAre(
HloInstructionAdaptor{
*consumer_computation->GetInstructionWithName("mul"),
consumer.get()},
HloInstructionAdaptor{
*consumer_computation->GetInstructionWithName("div"),
consumer.get()},
HloInstructionAdaptor{
*producer_computation->GetInstructionWithName("param0"),
producer.get()},
HloInstructionAdaptor{
*producer_computation->GetInstructionWithName("add"),
producer.get()}));
consumer_fusion_instr->MergeFusionInstructionIntoMultiOutput(
producer_fusion_instr);
EXPECT_THAT(consumer_fusion_instr->fused_expression_root(),
GmockMatch(m::Tuple(
m::Multiply(m::Add(m::Parameter(0), m::Parameter(1)),
m::Subtract(m::Parameter(0), m::Parameter(1))),
m::Divide(m::Add(m::Parameter(0), m::Parameter(1)),
m::Subtract(m::Parameter(0), m::Parameter(1))),
m::Parameter(0), m::Add(m::Parameter(0), m::Parameter(1)))));
}
TEST_F(HloTraversalTest, HloFindUseChain) {
auto module = ParseAndReturnVerifiedModule(R"(
fusion {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
negate = f32[] negate(p0)
log = f32[] log(p0)
sum = f32[] add(p0, log)
exp = f32[] exponential(p1)
ROOT call = f32[] custom-call(negate, exp, sum), custom_call_target="it"
}
ENTRY main {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT fusion = f32[] fusion(p0, p1), kind=kLoop, calls=fusion
}
)")
.value();
auto* fusion_computation = module->GetComputationWithName("fusion");
auto fusion = HloFusionAdaptor::ForComputation(fusion_computation);
auto get = [&](absl::string_view name) {
return HloInstructionAdaptor{
*fusion_computation->GetInstructionWithName(name), fusion.get()};
};
auto p0 = get("p0");
auto p1 = get("p1");
auto log = get("log");
auto sum = get("sum");
auto negate = get("negate");
auto exp = get("exp");
auto call = get("call");
EXPECT_THAT(HloFindUseChain(p0, p0), ElementsAre(p0));
EXPECT_THAT(HloFindUseChain(p0, p1), IsEmpty());
EXPECT_THAT(HloFindUseChain(p0, call), ElementsAre(p0, negate, call));
EXPECT_THAT(HloFindUseChain(p0, sum), ElementsAre(p0, log, sum));
EXPECT_THAT(HloFindUseChain(p1, exp), ElementsAre(p1, exp));
EXPECT_THAT(HloFindUseChain(negate, exp), IsEmpty());
EXPECT_THAT(HloFindUseChain(call, p0), IsEmpty());
}
}
}
} |
2,042 | cpp | tensorflow/tensorflow | gemm_fusion | third_party/xla/xla/service/gpu/transforms/gemm_fusion.cc | third_party/xla/xla/service/gpu/transforms/gemm_fusion_test.cc | #ifndef XLA_SERVICE_GPU_GEMM_FUSION_H_
#define XLA_SERVICE_GPU_GEMM_FUSION_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/service/instruction_fusion.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
bool ShouldTritonHandleGEMM(HloDotInstruction&,
const se::GpuComputeCapability&);
class GemmFusion : public HloModulePass {
public:
explicit GemmFusion(const se::GpuComputeCapability& gpu_version)
: gpu_version_(gpu_version) {}
absl::string_view name() const override { return "triton-gemm-rewriter"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
se::GpuComputeCapability gpu_version_;
};
}
}
#endif
#include "xla/service/gpu/gemm_fusion.h"
#include <array>
#include <cstddef>
#include <cstdint>
#include <optional>
#include <queue>
#include <string>
#include <tuple>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_padding_requirements.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/triton_fusion_analysis.h"
#include "xla/service/gpu/triton_support.h"
#include "xla/service/gpu/triton_tiling_propagation.h"
#include "xla/service/instruction_fusion.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using triton_fusion::CombineDotRequirements;
using triton_fusion::DimensionOrder;
using triton_fusion::DimOrderMap;
using triton_fusion::DimOrdersAndReqs;
using triton_fusion::DimOrdersAndReqsOrError;
using triton_fusion::DotProperties;
using triton_fusion::DotRequirements;
using triton_fusion::DotRequirementsOrError;
using triton_fusion::FusionContext;
using triton_fusion::GetPropagatedDimOrdersAndRequirementsIfProfitablyFusible;
using triton_fusion::TransformDirection;
class AdjacencyList {
public:
using NodeId = int64_t;
NodeId AddNode() {
adj_.emplace_back();
return adj_.size() - 1;
}
const std::vector<NodeId>& GetOutNeighbors(NodeId node_id) const {
return adj_.at(node_id);
}
void ReserveSpaceForOutNeighbors(NodeId node_id, size_t count) {
adj_.at(node_id).reserve(count);
}
void AddArc(NodeId from, NodeId to) { adj_.at(from).push_back(to); }
NodeId GetRoot() const {
CHECK(!adj_.empty());
return 0;
}
private:
std::vector<std::vector<NodeId>> adj_;
};
struct HloAndDimOrder {
const HloInstruction* original_hlo = nullptr;
DimensionOrder dim_order;
};
struct HloAndIterSpec {
const HloInstruction* original_hlo;
TensorIterationSpec iter_spec;
auto ToTuple() const { return std::make_tuple(original_hlo, iter_spec); }
bool operator==(const HloAndIterSpec& other) const {
return ToTuple() == other.ToTuple();
}
template <typename H>
friend H AbslHashValue(H h, const HloAndIterSpec& key) {
return H::combine(std::move(h), key.ToTuple());
}
};
struct NodeFusionPlan {
const HloInstruction* original_hlo = nullptr;
bool should_fuse = false;
};
struct FusionPlan {
AdjacencyList graph;
absl::flat_hash_map<AdjacencyList::NodeId, NodeFusionPlan> map;
};
struct FusionPlanAndRequirements {
FusionPlan fusion_plan;
DotRequirements requirements;
};
struct HlosAndRequirements {
const HloInstruction* original_hlo = nullptr;
const HloInstruction* fused_hlo = nullptr;
DotRequirements requirements;
};
HloInstruction& FuseDot(const HloDotInstruction& dot,
const HloInstruction& fused_lhs,
const HloInstruction& fused_rhs,
std::optional<const HloInstruction*> fused_meta,
HloComputation::Builder& builder
) {
VLOG(3) << "Fusing " << dot.ToString();
std::vector<HloInstruction*> hlo_new_operands = {
const_cast<HloInstruction*>(&fused_lhs),
const_cast<HloInstruction*>(&fused_rhs)};
if (fused_meta.has_value()) {
hlo_new_operands.push_back(const_cast<HloInstruction*>(fused_meta.value()));
}
return *builder.AddInstruction(
dot.CloneWithNewOperands(dot.shape(), hlo_new_operands));
}
int64_t NumAddedParameters(const HloInstruction& hlo) {
if (hlo.opcode() == HloOpcode::kParameter ||
(hlo.opcode() == HloOpcode::kConstant &&
!ShapeUtil::IsScalar(hlo.shape()))) {
return 0;
}
return hlo.operand_count() - 1;
}
std::optional<DimOrdersAndReqs> GetOperandDimOrdersAndCombinedReqs(
const HloInstruction& hlo, const DimensionOrder& dim_order,
const DotProperties& properties,
const se::GpuComputeCapability& gpu_version,
const DotRequirements& requirements) {
DimOrdersAndReqsOrError dim_orders_and_new_reqs =
GetPropagatedDimOrdersAndRequirements(
hlo, dim_order, TransformDirection::kOutputToInput, properties);
if (!std::holds_alternative<DimOrdersAndReqs>(dim_orders_and_new_reqs)) {
return std::nullopt;
}
DotRequirementsOrError combined_reqs = CombineDotRequirements(
requirements,
std::get<DimOrdersAndReqs>(dim_orders_and_new_reqs).requirements);
if (!std::holds_alternative<DotRequirements>(combined_reqs)) {
return std::nullopt;
}
return DimOrdersAndReqs{
std::get<DimOrdersAndReqs>(dim_orders_and_new_reqs).dim_orders,
std::get<DotRequirements>(combined_reqs)};
}
std::optional<DimOrdersAndReqs> GetOperandDimOrdersAndCombinedReqsIfProfitable(
const HloInstruction& hlo, const DimensionOrder& dim_order,
const DotProperties& properties,
const se::GpuComputeCapability& gpu_version,
const DotRequirements& requirements) {
DimOrdersAndReqsOrError dim_orders_and_new_reqs =
GetPropagatedDimOrdersAndRequirementsIfProfitablyFusible(
hlo, TransformDirection::kOutputToInput,
std::nullopt, dim_order, gpu_version,
properties);
if (!std::holds_alternative<DimOrdersAndReqs>(dim_orders_and_new_reqs)) {
return std::nullopt;
}
DotRequirementsOrError combined_reqs = CombineDotRequirements(
requirements,
std::get<DimOrdersAndReqs>(dim_orders_and_new_reqs).requirements);
if (!std::holds_alternative<DotRequirements>(combined_reqs)) {
return std::nullopt;
}
return DimOrdersAndReqs{
std::get<DimOrdersAndReqs>(dim_orders_and_new_reqs).dim_orders,
std::get<DotRequirements>(combined_reqs)};
}
std::optional<DimOrdersAndReqs> GetUserDimOrdersAndCombinedReqsIfProfitable(
const HloInstruction& hlo, const DimensionOrder& hlo_dim_order,
const HloInstruction& user, const DotProperties& properties,
const se::GpuComputeCapability& gpu_version,
const DotRequirements& requirements) {
DimOrdersAndReqsOrError dim_orders_and_new_reqs =
GetPropagatedDimOrdersAndRequirementsIfProfitablyFusible(
user, TransformDirection::kInputToOutput, user.operand_index(&hlo),
hlo_dim_order, gpu_version, properties);
if (!std::holds_alternative<DimOrdersAndReqs>(dim_orders_and_new_reqs)) {
return std::nullopt;
}
DotRequirementsOrError combined_reqs = CombineDotRequirements(
requirements,
std::get<DimOrdersAndReqs>(dim_orders_and_new_reqs).requirements);
if (!std::holds_alternative<DotRequirements>(combined_reqs)) {
return std::nullopt;
}
return DimOrdersAndReqs{
std::get<DimOrdersAndReqs>(dim_orders_and_new_reqs).dim_orders,
std::get<DotRequirements>(combined_reqs)};
}
FusionPlanAndRequirements BuildFusionPlanTowardOperands(
const HloInstruction& root_hlo, const DimensionOrder& root_dim_order,
const std::optional<int>& max_params,
const se::GpuComputeCapability& gpu_version,
const DotProperties& properties,
const DotRequirements& requirements_so_far) {
CHECK(!max_params.has_value() || max_params.value() >= 1);
AdjacencyList graph;
absl::flat_hash_map<AdjacencyList::NodeId, HloAndDimOrder>
hlo_and_dim_order_map;
absl::flat_hash_map<AdjacencyList::NodeId, NodeFusionPlan> fusion_plan_map;
absl::flat_hash_map<HloAndIterSpec, AdjacencyList::NodeId> node_reuse_map;
DotRequirements combined_reqs = requirements_so_far;
auto get_or_create_fusion_node =
[&](const HloInstruction& hlo, const DimensionOrder& dim_order,
bool* is_new_node = nullptr) -> AdjacencyList::NodeId {
HloAndIterSpec reuse_key = {&hlo, dim_order.ToTensorIterationSpec()};
if (auto it = node_reuse_map.find(reuse_key); it != node_reuse_map.end()) {
if (is_new_node != nullptr) {
*is_new_node = false;
}
return it->second;
}
AdjacencyList::NodeId node_id = graph.AddNode();
CHECK(hlo_and_dim_order_map.insert({node_id, {&hlo, dim_order}}).second);
CHECK(node_reuse_map.insert({reuse_key, node_id}).second);
if (is_new_node != nullptr) {
*is_new_node = true;
}
return node_id;
};
AdjacencyList::NodeId root =
get_or_create_fusion_node(root_hlo, root_dim_order);
absl::flat_hash_set<AdjacencyList::NodeId> inputs({root});
std::queue<AdjacencyList::NodeId> queue({root});
int64_t num_requeued = 0;
while (queue.size() > num_requeued) {
AdjacencyList::NodeId node_id = queue.front();
queue.pop();
const HloAndDimOrder& hlo_and_dim_order = hlo_and_dim_order_map.at(node_id);
const HloInstruction& original_hlo = *hlo_and_dim_order.original_hlo;
const DimensionOrder& dim_order = hlo_and_dim_order.dim_order;
if (max_params.has_value() &&
inputs.size() + NumAddedParameters(original_hlo) > max_params.value()) {
queue.push(node_id);
++num_requeued;
continue;
}
num_requeued = 0;
if (original_hlo.opcode() == HloOpcode::kParameter) {
CHECK(fusion_plan_map
.insert({node_id, {&original_hlo, false}})
.second);
continue;
}
auto opt_result = GetOperandDimOrdersAndCombinedReqsIfProfitable(
original_hlo, dim_order, properties, gpu_version, combined_reqs);
if (!opt_result.has_value()) {
CHECK(fusion_plan_map
.insert({node_id, {&original_hlo, false}})
.second);
continue;
}
const DimOrderMap operand_dim_orders = std::move(opt_result->dim_orders);
combined_reqs = std::move(opt_result->requirements);
inputs.erase(node_id);
graph.ReserveSpaceForOutNeighbors(node_id, original_hlo.operand_count());
for (int64_t i = 0; i < original_hlo.operand_count(); ++i) {
const HloInstruction& operand = *original_hlo.operand(i);
const DimensionOrder& operand_dim_order = operand_dim_orders.at(&operand);
bool is_new_node = false;
AdjacencyList::NodeId operand_node_id =
get_or_create_fusion_node(operand, operand_dim_order, &is_new_node);
graph.AddArc(node_id, operand_node_id);
if (is_new_node) {
VLOG(6) << "Enqueueing " << operand.ToString() << ":"
<< operand_dim_order.ToString();
inputs.insert(operand_node_id);
queue.push(operand_node_id);
}
}
CHECK(
fusion_plan_map.insert({node_id, {&original_hlo, true}})
.second);
}
while (!queue.empty()) {
AdjacencyList::NodeId node_id = queue.front();
queue.pop();
const HloAndDimOrder& hlo_and_dim_order = hlo_and_dim_order_map.at(node_id);
CHECK(fusion_plan_map
.insert({node_id,
{hlo_and_dim_order.original_hlo, false}})
.second);
}
return {{std::move(graph), std::move(fusion_plan_map)},
std::move(combined_reqs)};
}
HloInstruction& BuildFusionTowardOperandsImpl(
AdjacencyList::NodeId node_id, const FusionPlan& fusion_plan,
absl::flat_hash_map<AdjacencyList::NodeId, HloInstruction*>&
fused_hlo_map,
HloComputation::Builder& builder,
std::vector<HloInstruction*>& fusion_params
) {
if (auto it = fused_hlo_map.find(node_id); it != fused_hlo_map.end()) {
return *it->second;
}
const NodeFusionPlan& node_fusion_plan = fusion_plan.map.at(node_id);
const bool should_fuse = node_fusion_plan.should_fuse;
const HloInstruction& original_hlo = *node_fusion_plan.original_hlo;
HloInstruction* fused_hlo = nullptr;
if (should_fuse) {
HloInstruction::InstructionVector new_operands;
for (AdjacencyList::NodeId operand_id :
fusion_plan.graph.GetOutNeighbors(node_id)) {
new_operands.push_back(&BuildFusionTowardOperandsImpl(
operand_id, fusion_plan, fused_hlo_map, builder, fusion_params));
}
fused_hlo = builder.AddInstruction(
original_hlo.CloneWithNewOperands(original_hlo.shape(), new_operands));
} else {
fusion_params.push_back(const_cast<HloInstruction*>(&original_hlo));
fused_hlo = builder.AddInstruction(HloInstruction::CreateParameter(
fusion_params.size() - 1, original_hlo.shape(),
absl::StrCat("parameter_", fusion_params.size() - 1)));
}
CHECK(fused_hlo_map.insert({node_id, fused_hlo}).second);
return *fused_hlo;
}
HloInstruction& BuildFusionTowardOperands(
const FusionPlan& fusion_plan,
HloComputation::Builder& builder,
std::vector<HloInstruction*>& fusion_params
) {
absl::flat_hash_map<AdjacencyList::NodeId, HloInstruction*> fused_hlo_map;
return BuildFusionTowardOperandsImpl(fusion_plan.graph.GetRoot(), fusion_plan,
fused_hlo_map, builder, fusion_params);
}
HlosAndRequirements FuseTowardOperands(
const HloInstruction& root_hlo, const DimensionOrder& root_dim_order,
const std::optional<int>& max_params,
const se::GpuComputeCapability& gpu_version,
const DotProperties& properties, const DotRequirements& requirements_so_far,
HloComputation::Builder& builder,
std::vector<HloInstruction*>& fusion_params
) {
FusionPlanAndRequirements fusion_plan_and_reqs =
BuildFusionPlanTowardOperands(root_hlo, root_dim_order, max_params,
gpu_version, properties,
requirements_so_far);
HloInstruction& fused_hlo_or_param = BuildFusionTowardOperands(
fusion_plan_and_reqs.fusion_plan, builder, fusion_params);
return HlosAndRequirements{&root_hlo, &fused_hlo_or_param,
fusion_plan_and_reqs.requirements};
}
absl::StatusOr<HlosAndRequirements> FuseDotOperand(
const HloInstruction& dot, int operand_index,
const se::GpuComputeCapability& gpu_version,
HloComputation::Builder& builder,
std::vector<HloInstruction*>& fusion_params
) {
TF_ASSIGN_OR_RETURN(const FusionContext context,
FusionContext::FromDotOperand(dot, operand_index));
const HloInstruction& operand = *dot.operand(operand_index);
return FuseTowardOperands(operand, context.dim_orders().at(&operand),
TritonFusionAnalysis::kMaxParameterPerDotOperand,
gpu_version, context.dot_properties(),
context.requirements(), builder, fusion_params);
}
HlosAndRequirements FuseTowardUsers(
const HloInstruction& hlo, const HloInstruction& fused_hlo,
const DimensionOrder& hlo_dim_order,
const se::GpuComputeCapability& gpu_version,
const DotProperties& properties, const DotRequirements& requirements,
HloComputation::Builder& builder,
std::vector<HloInstruction*>& fusion_params
) {
const HlosAndRequirements existing_hlos_and_requirements = {&hlo, &fused_hlo,
requirements};
if (hlo.user_count() != 1) {
return existing_hlos_and_requirements;
}
const HloInstruction& user = *hlo.users()[0];
if (!legacy_triton::IsDistributiveOverAddition(user)) {
return existing_hlos_and_requirements;
}
auto opt_user_result = GetUserDimOrdersAndCombinedReqsIfProfitable(
hlo, hlo_dim_order, user, properties, gpu_version, requirements);
if (!opt_user_result.has_value()) {
return existing_hlos_and_requirements;
}
DimensionOrder user_dim_order = opt_user_result->dim_orders.at(&user);
DotRequirements combined_requirements = opt_user_result->requirements;
HloInstruction::InstructionVector new_operands;
if (user.operand_count() == 1) {
new_operands.push_back(const_cast<HloInstruction*>(&fused_hlo));
} else {
auto opt_operand_result = GetOperandDimOrdersAndCombinedReqs(
user, user_dim_order, properties, gpu_version, combined_requirements);
if (!opt_operand_result.has_value()) {
return existing_hlos_and_requirements;
}
DimOrderMap operand_dim_orders = opt_operand_result->dim_orders;
combined_requirements = opt_operand_result->requirements;
for (int i = 0; i < user.operand_count(); ++i) {
const HloInstruction& operand = *user.operand(i);
if (&operand == &hlo) {
new_operands.push_back(const_cast<HloInstruction*>(&fused_hlo));
} else {
HlosAndRequirements hlos_and_requirements = FuseTowardOperands(
operand, operand_dim_orders.at(&operand),
std::nullopt, gpu_version, properties,
combined_requirements, builder, fusion_params);
new_operands.push_back(
const_cast<HloInstruction*>(hlos_and_requirements.fused_hlo));
combined_requirements = hlos_and_requirements.requirements;
}
}
}
const HloInstruction& fused_user = *builder.AddInstruction(
user.CloneWithNewOperands(user.shape(), new_operands));
return FuseTowardUsers(user, fused_user, user_dim_order, gpu_version,
properties, combined_requirements, builder,
fusion_params);
}
HlosAndRequirements FuseDotOutput(
const HloInstruction& dot, const HloInstruction& fused_dot,
const se::GpuComputeCapability& gpu_version,
const DotRequirements& requirements,
HloComputation::Builder& builder,
std::vector<HloInstruction*>& fusion_params
) {
const auto context =
FusionContext::FromDotOutput(dot, 1, requirements);
return FuseTowardUsers(dot, fused_dot, context.dim_orders().at(&dot),
gpu_version, context.dot_properties(),
context.requirements(), builder, fusion_params);
}
absl::StatusOr<FusionDecision> CreateDotFusion(
const HloDotInstruction& dot, const se::GpuComputeCapability gpu_version,
HloComputation::Builder& builder,
std::vector<HloInstruction*>& fusion_inputs,
HloInstruction** fusion_output_ptr) {
VLOG(5) << dot.ToString();
if (CodegenDecision is_supported =
legacy_triton::IsTritonSupportedInstruction(dot, gpu_version);
!is_supported) {
VLOG(3) << is_supported.Explain();
return is_supported;
}
if (dot.sparse_operands()) {
const SparsityDescriptor& descriptor = dot.sparsity().front();
if (dot.sparse_operands() != 1 || descriptor.index() != 0) {
return InvalidArgument("Sparsity is only supported on left operand");
}
if (descriptor.type() != SparsityType::SPARSITY_STRUCTURED_N_M ||
descriptor.n() != 2 || descriptor.m() != 4) {
return InvalidArgument("Only 2:4 structured sparsity is supported");
}
CHECK_EQ(descriptor.dimension(), dot.operand(0)->shape().rank() - 1);
}
TF_ASSIGN_OR_RETURN(HlosAndRequirements lhs_hlos_and_reqs,
FuseDotOperand(dot, 0, gpu_version,
builder, fusion_inputs));
TF_ASSIGN_OR_RETURN(HlosAndRequirements rhs_hlos_and_reqs,
FuseDotOperand(dot, 1, gpu_version,
builder, fusion_inputs));
std::optional<const HloInstruction*> meta_hlo;
if (dot.sparse_operands()) {
TF_ASSIGN_OR_RETURN(HlosAndRequirements meta_hlos_and_reqs,
FuseDotOperand(dot, 2, gpu_version,
builder, fusion_inputs));
meta_hlo.emplace(meta_hlos_and_reqs.fused_hlo);
}
HloInstruction& fused_dot =
FuseDot(dot, *lhs_hlos_and_reqs.fused_hlo, *rhs_hlos_and_reqs.fused_hlo,
meta_hlo, builder);
HlosAndRequirements fused_output_and_reqs =
FuseDotOutput(dot, fused_dot, gpu_version, lhs_hlos_and_reqs.requirements,
builder, fusion_inputs);
if (fusion_output_ptr != nullptr) {
*fusion_output_ptr =
const_cast<HloInstruction*>(fused_output_and_reqs.original_hlo);
}
const PrecisionConfig::Algorithm algorithm =
dot.precision_config().algorithm();
if (algorithm == PrecisionConfig::ALG_DOT_BF16_BF16_F32_X6 ||
algorithm == PrecisionConfig::ALG_DOT_BF16_BF16_F32_X3 ||
dot.GetModule()->config().debug_options().xla_gpu_triton_gemm_any() ||
dot.sparse_operands()) {
return FusionDecision{};
}
bool is_pure_matmul = true;
(void)builder.ForEachInstruction([&](const HloInstruction* fused_hlo) {
static constexpr std::array<HloOpcode, 4> kPureOpcodes = {
HloOpcode::kBitcast, HloOpcode::kDot, HloOpcode::kParameter,
HloOpcode::kReshape};
if (absl::c_find(kPureOpcodes, fused_hlo->opcode()) == kPureOpcodes.end()) {
is_pure_matmul = false;
return absl::CancelledError();
}
return absl::OkStatus();
});
if (!is_pure_matmul) {
return FusionDecision{};
}
return "No profitable operations to fuse.";
}
class GemmFusionVisitor : public DfsHloRewriteVisitor {
public:
explicit GemmFusionVisitor(const se::GpuComputeCapability& gpu_version)
: gpu_version_(gpu_version) {}
absl::Status HandleDot(HloInstruction* dot) override {
CHECK_EQ(dot->opcode(), HloOpcode::kDot);
int64_t gemm_rewrite_size_threshold =
dot->GetModule()
->config()
.debug_options()
.xla_gpu_gemm_rewrite_size_threshold();
TF_ASSIGN_OR_RETURN(bool is_matmul_tiny,
IsMatrixMultiplicationTooSmallForRewriting(
*dot, gemm_rewrite_size_threshold));
if (is_matmul_tiny && IsDotSupportedByClassicalEmitters(*dot)) {
return absl::OkStatus();
}
std::string fusion_name = absl::StrCat("gemm_fusion_", dot->name());
HloComputation::Builder builder(absl::StrCat(fusion_name, "_computation"));
std::vector<HloInstruction*> fusion_inputs;
HloInstruction* fusion_output = nullptr;
TF_ASSIGN_OR_RETURN(
const FusionDecision should_fuse,
CreateDotFusion(*Cast<HloDotInstruction>(dot), gpu_version_, builder,
fusion_inputs, &fusion_output));
if (builder.last_added_instruction() == nullptr) {
return absl::OkStatus();
}
if (std::holds_alternative<se::CudaComputeCapability>(gpu_version_)) {
if (!CublasRequiresPadding(
*Cast<HloDotInstruction>(dot),
std::get<se::CudaComputeCapability>(gpu_version_)) &&
!should_fuse) {
return absl::OkStatus();
}
}
HloComputation* computation =
dot->GetModule()->AddComputationAndUnifyNamesAndIds(builder.B | #include "xla/service/gpu/gemm_fusion.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/cublas_padding_requirements.h"
#include "xla/service/gpu/triton_fusion_analysis.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
using ::testing::FieldsAre;
namespace m = ::xla::match;
class GemmFusionTest : public HloTestBase {
public:
GemmFusionTest()
: HloTestBase(true,
false) {}
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_gpu_triton_gemm_any(false);
debug_options.set_xla_gpu_gemm_rewrite_size_threshold(0);
return debug_options;
}
se::GpuComputeCapability gpu_version_{
se::CudaComputeCapability{se::CudaComputeCapability::AMPERE, 0}};
void MatchHloModule(HloModule& module, absl::string_view pattern) {
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_result,
RunFileCheck(module.ToString(), pattern));
EXPECT_TRUE(filecheck_result);
}
};
TEST_F(GemmFusionTest, TransposeSubdimensionGroup) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
ENTRY e {
p0 = f32[32,3] parameter(0)
t1 = f32[3,32] transpose(p0), dimensions={1,0}
r1 = f32[3,8,4] reshape(t1)
r0 = f32[3,32] reshape(r1)
p1 = f16[32,7] parameter(1)
c1 = f32[32,7] convert(p1)
ROOT d = f32[3,7] dot(r0, c1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})")
.value();
EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Parameter(), m::Parameter())));
}
TEST_F(GemmFusionTest, UnsupportedTransposeIsNotFused) {
auto module = ParseAndReturnVerifiedModule(R"(
ENTRY e {
p0 = f16[1,512,8,1024]{3,1,0,2} parameter(0)
c = f16[1,512,8,1024]{3,2,1,0} copy(p0)
b = f16[4096,1024]{1,0} bitcast(c)
p1 = f16[128,1024]{1,0} parameter(1)
ROOT d = f16[4096,128]{1,0} dot(b, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
})")
.value();
EXPECT_FALSE(GemmFusion(gpu_version_).Run(module.get()).value());
}
TEST_F(GemmFusionTest, BitcastChain) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
ENTRY e {
p0 = s8[60,5] parameter(0)
r0 = s8[3,20,5] reshape(p0)
c0 = f16[3,20,5] convert(r0)
p1 = f16[3,200] parameter(1)
r12 = f16[600] reshape(p1)
r11 = f16[30,20] reshape(r12)
r1 = f16[3,10,20] reshape(r11)
ROOT d = f16[3,5,10] dot(c0, r1),
lhs_contracting_dims={1}, rhs_contracting_dims={2},
lhs_batch_dims={0}, rhs_batch_dims={0}
})")
.value();
EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Parameter(), m::Parameter())));
}
TEST_F(GemmFusionTest, SplitDimensionTwice) {
auto module = ParseAndReturnVerifiedModule(R"(
ENTRY e {
p0 = s8[4,2,32,4,2] parameter(0)
r1 = s8[8,32,8] reshape(p0)
t1 = s8[32,8,8] transpose(r1), dimensions={1,0,2}
r0 = s8[32,64] reshape(t1)
p1 = s8[32,32] parameter(1)
c0 = f16[32,32] convert(p1)
ROOT d = f16[64,32] dot(r0, c0),
lhs_contracting_dims={0}, rhs_contracting_dims={1}
})")
.value();
EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Parameter(), m::Parameter())));
}
TEST_F(GemmFusionTest, DoNotTriggerOnUnsupportedOutputConversions) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
p0 = f16[128,256] parameter(0)
p1 = f16[256,512] parameter(1)
r = f16[128,512] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT c = u8[128,512] convert(r)
})"));
EXPECT_FALSE(GemmFusion(gpu_version_).Run(module.get()).value());
}
TEST_F(GemmFusionTest, FuseDotWithTrivialNoncontractingDim) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
ENTRY e {
p0 = s8[60,5] parameter(0)
r0 = s8[3,20,5] reshape(p0)
c0 = f16[3,20,5] convert(r0)
p1 = f16[3,1,20] parameter(1)
ROOT d = f16[3,5,1] dot(c0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={2},
lhs_batch_dims={0}, rhs_batch_dims={0}
})")
.value();
EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Parameter(), m::Parameter())));
}
TEST_F(GemmFusionTest, HandleDotIfCublasRequiresPadding) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule m
ENTRY e {
p0 = f16[5,3] parameter(0)
p1 = f16[5,7] parameter(1)
ROOT d = f16[3,7] dot(p0, p1),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
})"));
const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0};
EXPECT_TRUE(CublasRequiresPadding(
*xla::Cast<HloDotInstruction>(
module->entry_computation()->root_instruction()),
cc));
EXPECT_TRUE(GemmFusion(cc).Run(module.get()).value());
}
TEST_F(GemmFusionTest, FuseSliceOfParameterWithOtherUsers) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
p0 = f32[97,121] parameter(0)
s0 = f32[7,101] slice(p0), slice={[3:10], [10:111]}
p1 = f32[101,16] parameter(1)
d = f32[16,7] dot(p1, s0),
lhs_contracting_dims={0}, rhs_contracting_dims={1}
s1 = f32[3,33] slice(p0), slice={[10:13], [20:53]}
ROOT t = tuple(d, s1)
})"));
const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0};
EXPECT_TRUE(GemmFusion(cc).Run(module.get()).value());
}
TEST_F(GemmFusionTest, DoNotFuseSliceOfMixedDimensions) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
p0 = bf16[768,64] parameter(0)
s0 = bf16[768,32] slice(p0), slice={[0:768], [0:32]}
b0 = bf16[256,3,32] reshape(s0)
b1 = bf16[256,96] reshape(b0)
p1 = bf16[256,96] parameter(1)
ROOT d = bf16[96,96] dot(b1, p1),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
})"));
const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0};
EXPECT_FALSE(GemmFusion(cc).Run(module.get()).value());
}
TEST_F(GemmFusionTest, DoNotFuseSlicesOfNonMajorFragments) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
p0 = f32[2,2,256,256] parameter(0)
s0 = f32[1,1,256,256] slice(p0),
slice={[0:1], [0:1], [0:256], [0:256]}
r0 = f32[256,256] reshape(s0)
p1 = f16[2,2,256,256] parameter(1)
s1 = f16[1,1,256,256] slice(p1),
slice={[0:1], [0:1], [0:256], [0:256]}
r1 = f16[256,256] reshape(s1)
ROOT d = f32[256,256] dot(r0, r1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})"));
const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0};
EXPECT_FALSE(GemmFusion(cc).Run(module.get()).value());
}
TEST_F(GemmFusionTest, DynamicSliceIsFused) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
dot_lhs = f32[2,18] parameter(0)
dynamic_slice_input = f32[2,64,2] parameter(1)
start_index0 = s32[] parameter(2)
start_index1_2 = s32[] constant(0)
dynamic_slice = f32[1,64,2] dynamic-slice(dynamic_slice_input, start_index0, start_index1_2, start_index1_2),
dynamic_slice_sizes={1,64,2}
reshape = f32[64,2] reshape(dynamic_slice)
ROOT dot = f16[18,64] dot(dot_lhs, reshape),
lhs_contracting_dims={0}, rhs_contracting_dims={1}
})"));
EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{
se::CudaComputeCapability::AMPERE, 0})
.Run(module.get())
.value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch((m::Fusion(m::Parameter(), m::Parameter(),
m::Parameter(), m::Constant()))));
}
TEST_F(GemmFusionTest, DynamicSlicesAreFusedEvenIfTheyShareIndices) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
p0 = f32[2,64,2] parameter(0)
p1 = s32[] parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
ds0 = f32[1,64,2] dynamic-slice(p0, p1, p2, p3), dynamic_slice_sizes={1,64,2}
a = f32[64,2] reshape(ds0)
ds1 = f32[1,64,2] dynamic-slice(p0, p3, p2, p1), dynamic_slice_sizes={1,64,2}
b = f32[64,2] reshape(ds1)
ROOT d = f16[64,64] dot(a, b),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
})"));
EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{
se::CudaComputeCapability::AMPERE, 0})
.Run(module.get())
.value());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch((m::Fusion(m::Parameter(), m::Parameter(), m::Parameter(),
m::Parameter(), m::Parameter(), m::Parameter(),
m::Parameter(), m::Parameter()))));
}
TEST_F(GemmFusionTest, DoNotFuseDynamicSliceOfNonMajorFragments) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
dot_lhs = f32[2,4]{1,0} parameter(0)
dynamic_slice_input = f32[4,5,2]{2,1,0} parameter(1)
c0 = s32[] constant(0)
c2 = s32[] constant(2)
dynamic_slice = f32[4,1,2]{2,1,0} dynamic-slice(dynamic_slice_input, c0, c2, c0),
dynamic_slice_sizes={4,1,2}
reshape = f32[4,2]{1,0} reshape(dynamic_slice)
ROOT dot = f32[4,4]{1,0} dot(dot_lhs, reshape),
lhs_contracting_dims={0}, rhs_contracting_dims={1}
})"));
const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0};
EXPECT_FALSE(GemmFusion(cc).Run(module.get()).value());
}
TEST_F(GemmFusionTest, CanFuseDynamicSliceOfContractingDimIfItIsMajor) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
dot_lhs = f32[2,4]{1,0} parameter(0)
dynamic_slice_input = f32[5,5]{1,0} parameter(1)
start_index0 = s32[] constant(2)
start_index1 = s32[] constant(0)
dynamic_slice = f32[2,5]{1,0} dynamic-slice(dynamic_slice_input, start_index0, start_index1),
dynamic_slice_sizes={2,5}
ROOT d = f32[4,5]{1,0} dot(dot_lhs, dynamic_slice),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
})"));
EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{
se::CudaComputeCapability::AMPERE, 0})
.Run(module.get())
.value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch((m::Fusion(m::Parameter(), m::Parameter(),
m::Constant(), m::Constant()))));
}
TEST_F(GemmFusionTest, SliceToDegenerateIsSkipped) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
p = f32[3] parameter(0)
s = f32[1] slice(p), slice={[2:3]}
r = f32[] reshape(s)
b = f32[3,3] broadcast(r), dimensions={}
ROOT d = f32[3,3] dot(b, b),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)"));
const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0};
ASSERT_TRUE(GemmFusion(cc).Run(module.get()).value());
MatchHloModule(*module, R"(
; CHECK-NOT: slice
; CHECK: ENTRY
; CHECK: slice
)");
}
TEST_F(GemmFusionTest, MultipleUsesAreHandled) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
c = f32[] constant(1)
b = f32[6,8] broadcast(c), dimensions={}
p0 = f32[6,8] parameter(0)
a1 = f32[6,8] add(p0, b)
e = f32[6,8] exponential(a1)
a2 = f32[6,8] add(e, b)
d = f32[6,8] divide(b, a2)
p2 = f16[8,6] parameter(1)
cv = f32[8,6] convert(p2)
ROOT r = f32[6,6] dot(d, cv),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})"));
const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0};
EXPECT_TRUE(GemmFusion(cc).Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Parameter(), m::Parameter())));
}
TEST_F(GemmFusionTest, BinaryElementwiseOfBroadcastIsFused) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
p2 = f32[3072] parameter(2)
b = f32[8192,3072] broadcast(p2), dimensions={1}
p0 = f16[8192,3072] parameter(0)
p0c = f32[8192,3072] convert(p0)
a = f32[8192,3072] add(p0c, b)
p1 = f32[3072,768] parameter(1)
ROOT r = f32[8192,768] dot(a, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})"));
const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0};
EXPECT_TRUE(GemmFusion(cc).Run(module.get()).value());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Parameter(), m::Parameter(), m::Parameter())));
}
TEST_F(GemmFusionTest, BinaryElementwiseOfUnsupportedBroadcastIsNotFused) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
p2 = f32[768] parameter(2)
b = f32[8192,768,4] broadcast(p2), dimensions={1}
s = f32[8192,3072] bitcast(b)
p0 = f16[8192,3072] parameter(0)
p0c = f32[8192,3072] convert(p0)
a = f32[8192,3072] add(p0c, s)
p1 = f32[3072,768] parameter(1)
ROOT r = f32[8192,768] dot(a, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})"));
const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0};
EXPECT_FALSE(GemmFusion(cc).Run(module.get()).value());
}
class GemmFusionLevel2Test : public GemmFusionTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = GemmFusionTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_triton_fusion_level(2);
return debug_options;
}
};
TEST_F(GemmFusionLevel2Test, ReshapeToScalarIsHandled) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
p0 = s8[5,3] parameter(0)
c = f16[5,3] convert(p0)
p1 = f16[1] parameter(1)
r = f16[] reshape(p1)
b = f16[5,7] broadcast(r)
ROOT d = f16[3,7] dot(c, b),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
})"));
EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Parameter(), m::Parameter())));
}
TEST_F(GemmFusionLevel2Test, DoNotFuseIncompatibleDimensionSplits) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
p1 = s8[5,7,2,3]{3,2,1,0} parameter(1)
t1 = s8[7,5,2,3]{3,2,1,0} transpose(p1), dimensions={1,0,2,3}
r1 = s8[7,30]{1,0} reshape(t1)
cvt = f16[7,30]{1,0} convert(r1)
p2 = f16[2,7,5,3]{3,2,1,0} parameter(2)
t2 = f16[7,2,5,3]{3,2,1,0} transpose(p2), dimensions={1,0,2,3}
r2 = f16[7,30]{1,0} reshape(t2)
a = f16[7,30]{1,0} add(cvt, r2)
p0 = f16[7,79]{1,0} parameter(0)
ROOT dot = f16[30,79]{1,0} dot(a, p0),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
})"));
EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Transpose(), m::Parameter(), m::Parameter())));
}
TEST_F(GemmFusionLevel2Test, DoNotFuseTooManyParameters) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
tmp_0 = f32[] constant(1)
tmp_1 = f32[3,49]{1,0} broadcast(tmp_0), dimensions={}
tmp_2 = f32[3,49]{1,0} parameter(6)
tmp_3 = f32[] constant(0)
tmp_4 = f32[3,49]{1,0} broadcast(tmp_3), dimensions={}
tmp_5 = pred[3,49]{1,0} compare(tmp_2, tmp_4), direction=GT
tmp_6 = f32[3,49]{1,0} convert(tmp_5)
tmp_7 = f32[3,49]{1,0} subtract(tmp_1, tmp_6)
tmp_8 = s32[] parameter(13)
tmp_9 = f32[] convert(tmp_8)
tmp_10 = f32[] maximum(tmp_9, tmp_0)
tmp_11 = f32[] divide(tmp_3, tmp_10)
tmp_12 = f32[3,49]{1,0} broadcast(tmp_11), dimensions={}
tmp_13 = pred[3,49]{1,0} parameter(7)
tmp_14 = pred[3,49]{1,0} parameter(10)
tmp_15 = pred[3,49]{1,0} and(tmp_13, tmp_14)
tmp_16 = f32[3,49]{1,0} convert(tmp_15)
tmp_17 = f32[3,49]{1,0} multiply(tmp_12, tmp_16)
tmp_18 = f32[3,49]{1,0} negate(tmp_17)
tmp_19 = f32[3,49]{1,0} multiply(tmp_7, tmp_18)
tmp_20 = f32[3,49]{1,0} parameter(19)
tmp_21 = f32[3,49]{1,0} subtract(tmp_1, tmp_20)
tmp_22 = f32[3,49]{1,0} divide(tmp_19, tmp_21)
tmp_23 = f32[3,49]{1,0} negate(tmp_22)
tmp_24 = f32[3,49]{1,0} negate(tmp_6)
tmp_25 = f32[3,49]{1,0} multiply(tmp_24, tmp_17)
tmp_26 = f32[3,49]{1,0} divide(tmp_25, tmp_20)
tmp_27 = f32[3,49]{1,0} add(tmp_23, tmp_26)
tmp_28 = f32[3,49]{1,0} parameter(18)
tmp_29 = f32[3,49]{1,0} multiply(tmp_27, tmp_28)
tmp_30 = f32[3,49]{1,0} parameter(17)
tmp_31 = f32[3,49]{1,0} multiply(tmp_29, tmp_30)
tmp_32 = f32[3,49]{1,0} parameter(16)
tmp_33 = f32[3,49]{1,0} multiply(tmp_31, tmp_32)
tmp_34 = f32[3,49]{1,0} parameter(15)
tmp_35 = f32[3,49]{1,0} add(tmp_33, tmp_34)
tmp_36 = f32[3,49]{1,0} parameter(14)
tmp_37 = f32[3,49]{1,0} add(tmp_35, tmp_36)
tmp_38 = f32[1,1]{1,0} constant({ {0} })
tmp_39 = f32[1,1]{1,0} broadcast(tmp_38), dimensions={0,1}
tmp_40 = f32[] reshape(tmp_39)
tmp_41 = f32[3,32]{1,0} broadcast(tmp_40), dimensions={}
tmp_42 = u32[48]{0} parameter(11)
tmp_43 = u32[48]{0} parameter(5)
tmp_44 = u32[96]{0} concatenate(tmp_42, tmp_43), dimensions={0}
tmp_45 = u32[3,32]{1,0} reshape(tmp_44)
tmp_46 = u32[96]{0} reshape(tmp_45)
tmp_47 = u32[] constant(1)
tmp_48 = u32[3,32]{1,0} broadcast(tmp_47), dimensions={}
tmp_49 = u32[96]{0} reshape(tmp_48)
tmp_50 = u32[96]{0} shift-right-logical(tmp_46, tmp_49)
tmp_51 = u32[3,32]{1,0} reshape(tmp_50)
tmp_52 = u32[3,32]{1,0} or(tmp_51, tmp_48)
tmp_53 = f32[3,32]{1,0} bitcast-convert(tmp_52)
tmp_54 = f32[3,32]{1,0} broadcast(tmp_0), dimensions={}
tmp_55 = f32[3,32]{1,0} subtract(tmp_53, tmp_54)
tmp_56 = f32[1,1]{1,0} constant({ {1} })
tmp_57 = f32[1,1]{1,0} broadcast(tmp_56), dimensions={0,1}
tmp_58 = f32[] reshape(tmp_57)
tmp_59 = f32[3,32]{1,0} broadcast(tmp_58), dimensions={}
tmp_60 = f32[3,32]{1,0} multiply(tmp_55, tmp_59)
tmp_61 = f32[3,32]{1,0} add(tmp_60, tmp_41)
tmp_62 = f32[3,32]{1,0} maximum(tmp_41, tmp_61)
tmp_63 = f32[3,32]{1,0} broadcast(tmp_3), dimensions={}
tmp_64 = pred[3,32]{1,0} compare(tmp_62, tmp_63), direction=LT
tmp_65 = f32[3,32]{1,0} convert(tmp_64)
tmp_66 = f32[3,49]{1,0} parameter(9)
tmp_67 = f32[49]{0} parameter(4)
tmp_68 = f32[3,49]{1,0} broadcast(tmp_67), dimensions={1}
tmp_69 = f32[3,49]{1,0} add(tmp_66, tmp_68)
tmp_70 = f32[1,49]{1,0} parameter(12)
tmp_71 = f32[1,49]{1,0} broadcast(tmp_0), dimensions={}
tmp_72 = f32[1,49]{1,0} divide(tmp_70, tmp_71)
tmp_73 = f32[1,49]{1,0} broadcast(tmp_72), dimensions={0,1}
tmp_74 = f32[49]{0} reshape(tmp_73)
tmp_75 = f32[3,49]{1,0} broadcast(tmp_74), dimensions={1}
tmp_76 = f32[3,49]{1,0} subtract(tmp_69, tmp_75)
tmp_77 = f32[1,49]{1,0} parameter(3)
tmp_78 = f32[1,49]{1,0} parameter(8)
tmp_79 = f32[1,49]{1,0} divide(tmp_78, tmp_71)
tmp_80 = f32[1,49]{1,0} multiply(tmp_72, tmp_72)
tmp_81 = f32[1,49]{1,0} subtract(tmp_79, tmp_80)
tmp_82 = f32[1,49]{1,0} add(tmp_81, tmp_71)
tmp_83 = f32[1,49]{1,0} rsqrt(tmp_82)
tmp_84 = f32[1,49]{1,0} multiply(tmp_77, tmp_83)
tmp_85 = f32[1,49]{1,0} broadcast(tmp_84), dimensions={0,1}
tmp_86 = f32[49]{0} reshape(tmp_85)
tmp_87 = f32[3,49]{1,0} broadcast(tmp_86), dimensions={1}
tmp_88 = f32[3,49]{1,0} multiply(tmp_76, tmp_87)
tmp_89 = f32[1,49]{1,0} parameter(2)
tmp_90 = f32[1,49]{1,0} broadcast(tmp_89), dimensions={0,1}
tmp_91 = f32[49]{0} reshape(tmp_90)
tmp_92 = f32[3,49]{1,0} broadcast(tmp_91), dimensions={1}
tmp_93 = f32[3,49]{1,0} add(tmp_88, tmp_92)
tmp_94 = f32[49,32]{1,0} parameter(1)
tmp_95 = f32[3,32]{1,0} dot(tmp_93, tmp_94), lhs_contracting_dims={1}, rhs_contracting_dims={0}
tmp_96 = f32[32]{0} parameter(0)
tmp_97 = f32[3,32]{1,0} broadcast(tmp_96), dimensions={1}
tmp_98 = f32[3,32]{1,0} add(tmp_95, tmp_97)
tmp_99 = f32[3,32]{1,0} multiply(tmp_65, tmp_98)
tmp_100 = f32[3,32]{1,0} divide(tmp_99, tmp_63)
tmp_101 = f32[3,32]{1,0} maximum(tmp_100, tmp_63)
ROOT tmp_102 = f32[49,32]{1,0} dot(tmp_37, tmp_101), lhs_contracting_dims={0}, rhs_contracting_dims={0}
})"));
EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value());
EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kFusion);
EXPECT_EQ(module->entry_computation()->root_instruction()->fusion_kind(),
HloInstruction::FusionKind::kCustom);
EXPECT_LE(module->entry_computation()->root_instruction()->operand_count(),
TritonFusionAnalysis::kMaxParameterPerDotOperand * 2);
}
TEST_F(GemmFusionLevel2Test,
DoNotFuseTooManyParametersWhenAnInstructionWouldAddMultipleParameters) {
static_assert(TritonFusionAnalysis::kMaxParameterPerDotOperand == 4,
"We have to update this test.");
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
a = f32[3,49]{1,0} parameter(0)
b = f32[3,49]{1,0} parameter(1)
c = pred[3,49]{1,0} parameter(2)
d = f32[3,49]{1,0} parameter(3)
e = f32[3,49]{1,0} parameter(4)
add0 = f32[3,49]{1,0} add(a, b)
select = f32[3,49]{1,0} select(c, d, e)
add1 = f32[3,49]{1,0} add(add0, select)
f = f32[3,32]{1,0} parameter(5)
ROOT tmp_102 = f32[49,32]{1,0} dot(add1, f), lhs_contracting_dims={0}, rhs_contracting_dims={0}
})"));
EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value());
EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kFusion);
EXPECT_EQ(module->entry_computation()->root_instruction()->fusion_kind(),
HloInstruction::FusionKind::kCustom);
EXPECT_LE(module->entry_computation()->root_instruction()->operand_count(),
TritonFusionAnalysis::kMaxParameterPerDotOperand + 1);
}
TEST_F(GemmFusionLevel2Test, DoNotFuseTooManyParametersForConcat) {
static_assert(TritonFusionAnalysis::kMaxParameterPerDotOperand == 4,
"We have to update this test.");
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
a = f32[3,3]{1,0} parameter(0)
b = f32[3,3]{1,0} parameter(1)
c = f32[3,3]{1,0} parameter(2)
d = f32[3,3]{1,0} parameter(3)
e = f32[3,3]{1,0} parameter(4)
f = f16[3,3]{1,0} parameter(5)
concat = f32[15,3]{1,0} concatenate(a, b, c, d, e), dimensions={0}
convert = f32[3,3]{1,0} convert(f)
ROOT dot = f32[15,3]{1,0} dot(concat, convert), lhs_contracting_dims={1}, rhs_contracting_dims={1}
})"));
EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value());
EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kFusion);
EXPECT_EQ(module->entry_computation()->root_instruction()->fusion_kind(),
HloInstruction::FusionKind::kCustom);
EXPECT_LE(module->entry_computation()->root_instruction()->operand_count(),
TritonFusionAnalysis::kMaxParameterPerDotOperand + 1);
}
TEST_F(GemmFusionLevel2Test,
InstructionsReachableFromMultipleOperandsAreHandledCorrectly) {
static_assert(TritonFusionAnalysis::kMaxParameterPerDotOperand == 4,
"We have to update this test.");
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
a = f32[2,4]{1,0} parameter(0)
b = f32[2,4]{1,0} parameter(1)
c = f32[2,4]{1,0} parameter(2)
d = f32[2,4]{1,0} parameter(3)
e = f32[2,4]{1,0} parameter(4)
add0 = f32[2,4]{1,0} add(a, b)
add1 = f32[2,4]{1,0} add(add0, c)
add2 = f32[2,4]{1,0} add(add1, d)
add3 = f32[2,4]{1,0} add(add2, e)
ROOT r = f32[2,2]{1,0} dot(add3, add0),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
})"));
EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value());
}
TEST_F(GemmFusionLevel2Test, EachScopeIsFusedToASeparateSubgraph) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
a = f32[2,4]{1,0} parameter(0)
b = f32[2,4]{1,0} parameter(1)
add = f32[2,4]{1,0} add(a, b)
ROOT r = f32[2,2]{1,0} dot(add, add),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
})"));
EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value());
MatchHloModule(*module, R"(
CHECK-DAG: %[[P0:.*]] = f32[2,4]{1,0} parameter(0)
CHECK-DAG: %[[P1:.*]] = f32[2,4]{1,0} parameter(1)
CHECK-DAG: %[[ADD0:.*]] = f32[2,4]{1,0} add(f32[2,4]{1,0} %[[P0]], f32[2,4]{1,0} %[[P1]])
CHECK-DAG: %[[P2:.*]] = f32[2,4]{1,0} parameter(2)
CHECK-DAG: %[[P3:.*]] = f32[2,4]{1,0} parameter(3)
CHECK-DAG: %[[ADD1:.*]] = f32[2,4]{1,0} add(f32[2,4]{1,0} %[[P2]], f32[2,4]{1,0} %[[P3]])
CHECK-DAG: ROOT {{.*}} = f32[2,2]{1,0} dot(f32[2,4]{1,0} %[[ADD0]], f32[2,4]{1,0} %[[ADD1]])
CHECK: ENTRY
CHECK-DAG: %[[P0:.*]] = f32[2,4]{1,0} parameter(0)
CHECK-DAG: %[[P1:.*]] = f32[2,4]{1,0} parameter(1)
CHECK-DAG: ROOT {{.*}} = f32[2,2]{1,0}
CHECK-SAME: fusion(f32[2,4]{1,0} %[[P0]], f32[2,4]{1,0} %[[P1]], f32[2,4]{1,0} %[[P0]], f32[2,4]{1,0} %[[P1]]),
CHECK-SAME: kind=kCustom
CHECK-SAME: __triton_gemm
})");
}
TEST_F(GemmFusionLevel2Test, ParamNodesAreReusedIfTheyHaveTheSameIterSpec) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
a = f32[2,4]{1,0} parameter(0)
add = f32[2,4]{1,0} add(a, a)
ROOT r = f32[2,2]{1,0} dot(add, add),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
})"));
EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value());
MatchHloModule(*module, R"(
CHECK-DAG: %[[P0:.*]] = f32[2,4]{1,0} parameter(0)
CHECK-DAG: %[[ADD0:.*]] = f32[2,4]{1,0} add(f32[2,4]{1,0} %[[P0]], f32[2,4]{1,0} %[[P0]])
CHECK-DAG: %[[P1:.*]] = f32[2,4]{1,0} parameter(1)
CHECK-DAG: %[[ADD1:.*]] = f32[2,4]{1,0} add(f32[2,4]{1,0} %[[P1]], f32[2,4]{1,0} %[[P1]])
CHECK-DAG: ROOT {{.*}} = f32[2,2]{1,0} dot(f32[2,4]{1,0} %[[ADD0]], f32[2,4]{1,0} %[[ADD1]])
CHECK: ENTRY
CHECK-DAG: %[[P0:.*]] = f32[2,4]{1,0} parameter(0)
CHECK-DAG: ROOT {{.*}} = f32[2,2]{1,0}
CHECK-SAME: fusion(f32[2,4]{1,0} %[[P0]], f32[2,4]{1,0} %[[P0]])
CHECK-SAME: kind=kCustom
CHECK-SAME: __triton_gemm
})");
}
TEST_F(GemmFusionLevel2Test, NonParamNodesAreReusedIfTheyHaveTheSameIterSpec) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
a = f32[4,4]{1,0} parameter(0)
b = f32[4,4]{1,0} parameter(1)
negate = f32[4,4]{1,0} negate(a)
sine = f32[4,4]{1,0} sine(negate)
add = f32[4,4]{1,0} add(negate, sine)
ROOT r = f32[4,4]{1,0} dot(add, b),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
})"));
EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value());
MatchHloModule(*module, R"(
CHECK-DAG: %[[P0:.*]] = f32[4,4]{1,0} parameter(0)
CHECK-DAG: %[[P1:.*]] = f32[4,4]{1,0} parameter(1)
CHECK-DAG: %[[NEGATE:.*]] = f32[4,4]{1,0} negate(f32[4,4]{1,0} %[[P0]])
CHECK-DAG: %[[SINE:.*]] = f32[4,4]{1,0} sine(f32[4,4]{1,0} %[[NEGATE]])
CHECK-DAG: %[[ADD:.*]] = f32[4,4]{1,0} add(f32[4,4]{1,0} %[[NEGATE]], f32[4,4]{1,0} %[[SINE]])
CHECK-DAG: ROOT {{.*}} = f32[4,4]{1,0} dot(f32[4,4]{1,0} %[[ADD]], f32[4,4]{1,0} %[[P1]])
CHECK: ENTRY
CHECK-DAG: %[[P0:.*]] = f32[4,4]{1,0} parameter(0)
CHECK-DAG: %[[P1:.*]] = f32[4,4]{1,0} parameter(1)
CHECK-DAG: ROOT {{.*}} = f32[4,4]{1,0}
CHECK-SAME: fusion(f32[4,4]{1,0} %[[P0]], f32[4,4]{1,0} %[[P1]])
CHECK-SAME: kind=kCustom
CHECK-SAME: __triton_gemm
})");
}
TEST_F(GemmFusionLevel2Test, NodesAreNotReusedIfTheyHaveDifferentIterSpecs) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
a = f32[4,4]{1,0} parameter(0)
b = f32[4,4]{1,0} parameter(1)
tr_a = f32[4,4]{1,0} transpose(a), dimensions={1,0}
add = f32[4,4]{1,0} add(a, tr_a)
ROOT r = f32[4,4]{1,0} dot(add, b),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
})"));
EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value());
MatchHloModule(*module, R"(
CHECK-DAG: %[[P0:.*]] = f32[4,4]{1,0} parameter(0)
CHECK-DAG: %[[P1:.*]] = f32[4,4]{1,0} parameter(1)
CHECK-DAG: %[[P2:.*]] = f32[4,4]{1,0} parameter(2)
CHECK-DAG: %[[TRANSPOSE:.*]] = f32[4,4]{1, |
2,043 | cpp | tensorflow/tensorflow | double_buffer_loop_unrolling | third_party/xla/xla/service/gpu/transforms/double_buffer_loop_unrolling.cc | third_party/xla/xla/service/gpu/transforms/double_buffer_loop_unrolling_test.cc | #ifndef XLA_SERVICE_GPU_DOUBLE_BUFFER_LOOP_UNROLLING_H_
#define XLA_SERVICE_GPU_DOUBLE_BUFFER_LOOP_UNROLLING_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class DoubleBufferLoopUnrolling : public HloModulePass {
public:
enum class UnrollStrategy { kDoubleBuffer, kFullUnroll };
explicit DoubleBufferLoopUnrolling(
UnrollStrategy unroll_strategy = UnrollStrategy::kDoubleBuffer)
: unroll_strategy_(unroll_strategy) {};
~DoubleBufferLoopUnrolling() override = default;
absl::string_view name() const override {
return "loop-double-buffer-transformer";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
UnrollStrategy unroll_strategy_;
};
}
}
#endif
#include "xla/service/gpu/double_buffer_loop_unrolling.h"
#include <cstdint>
#include <iterator>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instruction_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/flatten_call_graph.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
void SetChannelIdForNewCollective(HloInstruction* new_instr,
const HloModule* module) {
absl::flat_hash_map<int64_t, int64_t> old_to_new_channel_id_map;
absl::flat_hash_map<int64_t, HloComputation*> channel_id_comp_map;
if (new_instr->IsAsynchronous() && hlo_query::IsCollectiveCommunicationOp(
new_instr->async_wrapped_opcode())) {
HloInstruction* wrapped_instr =
DynCast<HloAsyncInstruction>(new_instr)->async_wrapped_instruction();
int64_t old_channel_id = *wrapped_instr->channel_id();
int64_t new_channel_id = old_to_new_channel_id_map[old_channel_id];
if (old_to_new_channel_id_map.find(old_channel_id) ==
old_to_new_channel_id_map.end()) {
new_channel_id = hlo_query::NextChannelId(*module);
VLOG(2) << "Generated new channel id " << new_channel_id;
old_to_new_channel_id_map[old_channel_id] = new_channel_id;
}
VLOG(2) << "Setting channel id to " << new_channel_id;
wrapped_instr->set_channel_id(new_channel_id);
if (channel_id_comp_map.find(new_channel_id) == channel_id_comp_map.end()) {
channel_id_comp_map[new_channel_id] =
new_instr->async_wrapped_computation();
} else {
channel_id_comp_map[new_channel_id]->AddAsyncStart(new_instr);
}
} else if (hlo_query::IsCollectiveCommunicationOp(new_instr->opcode()) ||
hlo_query::IsAsyncCollectiveStartOp(new_instr)) {
new_instr->set_channel_id(hlo_query::NextChannelId(*module));
}
}
using Interval = std::pair<int64_t, int64_t>;
absl::StatusOr<std::vector<Interval>> ParseVectorOfPairs(
absl::string_view str) {
TF_ASSIGN_OR_RETURN(std::vector<ReplicaGroup> replica_groups,
ParseReplicaGroupsOnly(str));
std::vector<Interval> res;
res.reserve(replica_groups.size());
for (const ReplicaGroup& replica_group : replica_groups) {
TF_RET_CHECK(replica_group.replica_ids_size() == 2);
int64_t a = replica_group.replica_ids(0);
int64_t b = replica_group.replica_ids(1);
res.emplace_back(a, b);
}
return res;
}
absl::Status SetSendRecvValidationForPeeledInstr(HloInstruction* new_instr,
HloInstruction* old_instr) {
TF_RET_CHECK(
new_instr->opcode() == old_instr->opcode() &&
"cloned instruction and original instruction have different opcodes");
if (!HloPredicateIsOp<HloOpcode::kCollectivePermute,
HloOpcode::kCollectivePermuteStart, HloOpcode::kSend,
HloOpcode::kRecv>(old_instr)) {
return absl::OkStatus();
}
const auto& attribute_map = new_instr->frontend_attributes().map();
if (!attribute_map.contains(kSendRecvValidationAttr)) {
return absl::OkStatus();
}
VLOG(3) << "Original send-recv iterations: "
<< attribute_map.at(kSendRecvValidationAttr);
TF_ASSIGN_OR_RETURN(
auto send_recv_validation_attr,
ParseVectorOfPairs(attribute_map.at(kSendRecvValidationAttr)));
uint64_t n_pairs = send_recv_validation_attr.size();
if (n_pairs == 0) {
return absl::OkStatus();
}
std::vector<Interval> send_recv_validation_attr_updated(n_pairs, {1, 0});
for (std::uint64_t i = 0; i < send_recv_validation_attr.size(); i++) {
if (send_recv_validation_attr[i].first <= 0 &&
send_recv_validation_attr[i].second >= 0) {
send_recv_validation_attr_updated[i] = {0, 0};
}
}
hlo_instruction_utils::AddOrUpdateVectorOfPairsAsAttribute(
new_instr, kSendRecvValidationAttr,
send_recv_validation_attr_updated);
return absl::OkStatus();
}
absl::Status SetSendRecvValidation(HloInstruction* cp1, HloInstruction* cp2,
bool is_peeled) {
TF_RET_CHECK(
cp2->opcode() == cp1->opcode() &&
"cloned instruction and original instruction have different opcodes");
if (!HloPredicateIsOp<HloOpcode::kCollectivePermute,
HloOpcode::kCollectivePermuteStart, HloOpcode::kSend,
HloOpcode::kRecv>(cp1)) {
return absl::OkStatus();
}
const auto& attribute_map = cp2->frontend_attributes().map();
if (!attribute_map.contains(kSendRecvValidationAttr)) {
return absl::OkStatus();
}
VLOG(3) << "Original send-recv iterations: "
<< attribute_map.at(kSendRecvValidationAttr);
TF_ASSIGN_OR_RETURN(
auto send_recv_validation_attr,
ParseVectorOfPairs(attribute_map.at(kSendRecvValidationAttr)));
if (send_recv_validation_attr.size() == 0) {
return absl::OkStatus();
}
std::vector<Interval> send_recv_iterations_new_instr1,
send_recv_iterations_new_instr2;
send_recv_iterations_new_instr1.reserve(send_recv_validation_attr.size());
send_recv_iterations_new_instr2.reserve(send_recv_validation_attr.size());
for (const Interval& pair : send_recv_validation_attr) {
int64_t a = pair.first;
int64_t b = pair.second;
if (is_peeled) {
send_recv_iterations_new_instr1.emplace_back(
std::floor(a / 2.0), std::max(0.0, std::floor((b - 1) / 2.0)));
send_recv_iterations_new_instr2.emplace_back(
std::max(0.0, std::floor((a - 1) / 2.0)),
std::max(0.0, std::floor((b - 2) / 2.0)));
} else {
send_recv_iterations_new_instr1.emplace_back(std::floor((a + 1) / 2.0),
std::floor(b / 2.0));
send_recv_iterations_new_instr2.emplace_back(
std::floor(a / 2.0), std::max(0.0, std::floor((b - 1) / 2.0)));
}
}
hlo_instruction_utils::AddOrUpdateVectorOfPairsAsAttribute(
cp1, kSendRecvValidationAttr,
send_recv_iterations_new_instr1);
hlo_instruction_utils::AddOrUpdateVectorOfPairsAsAttribute(
cp2, kSendRecvValidationAttr,
send_recv_iterations_new_instr2);
VLOG(3) << "Updated send-recv iterations for " << cp1->name() << " : "
<< cp1->frontend_attributes().map().at(kSendRecvValidationAttr);
VLOG(3) << "Updated send-recv iterations for " << cp2->name() << " : "
<< cp2->frontend_attributes().map().at(kSendRecvValidationAttr);
return absl::OkStatus();
}
absl::Status HandleControlDependencies(
const HloComputation* while_body,
const absl::flat_hash_map<HloInstruction*, HloInstruction*>& old_to_new_map,
HloInstruction::InstructionVector* old_loop_roots,
HloInstruction* input_parameter,
const absl::flat_hash_set<HloInstruction*>& skip_control_dep_injection) {
for (HloInstruction* old_instr : while_body->MakeInstructionPostOrder()) {
if (old_to_new_map.find(old_instr) != old_to_new_map.end()) {
HloInstruction* new_instr = old_to_new_map.at(old_instr);
VLOG(2) << "Processing control predecessors for "
<< new_instr->ToString();
std::vector<HloInstruction*> new_control_pred;
new_control_pred.reserve(old_instr->control_predecessors().size());
for (HloInstruction* pred : old_instr->control_predecessors()) {
if (!old_to_new_map.contains(pred)) {
continue;
}
new_control_pred.push_back(old_to_new_map.at(pred));
}
TF_RETURN_IF_ERROR(new_instr->DropAllControlDeps());
for (HloInstruction* new_pred : new_control_pred) {
TF_RETURN_IF_ERROR(new_pred->AddControlDependencyTo(new_instr));
VLOG(2) << "Adding " << new_pred->ToString()
<< " to control dependency of " << new_instr->ToString();
}
}
}
for (HloInstruction* input_consumer : input_parameter->users()) {
for (HloInstruction* old_input : input_consumer->users()) {
if (old_to_new_map.find(old_input) != old_to_new_map.end()) {
HloInstruction* new_input = old_to_new_map.at(old_input);
if (skip_control_dep_injection.find(old_input) ==
skip_control_dep_injection.end() &&
!IsCollective(old_input)) {
for (HloInstruction* old_root : *old_loop_roots) {
TF_RETURN_IF_ERROR(old_root->AddControlDependencyTo(new_input));
}
}
}
}
}
return absl::OkStatus();
}
absl::StatusOr<bool> FullyUnroll(HloInstruction* while_instr,
HloModule* module) {
HloComputation* while_body = while_instr->while_body();
bool changed = false;
VLOG(2) << "Processing root " << while_body->root_instruction()->ToString();
auto loop_roots = while_body->root_instruction()->mutable_operands();
HloInstruction* input_parameter = while_body->parameter_instruction(0);
VLOG(2) << "Processing input parameter " << input_parameter->ToString();
absl::flat_hash_map<HloInstruction*, HloInstruction*> old_to_new_map;
absl::flat_hash_set<HloInstruction*> skip_control_dep_injection;
std::string clone_suffix = "full_unroll_clone";
TF_ASSIGN_OR_RETURN(WhileLoopBackendConfig config,
while_instr->backend_config<WhileLoopBackendConfig>());
std::vector<HloInstruction*> ops_to_clone;
ops_to_clone.reserve(while_body->MakeInstructionPostOrder().size());
HloInstruction* old_input_parameter = input_parameter;
HloInstruction* new_input_parameter = while_body->root_instruction();
absl::flat_hash_set<HloInstruction*> seen_ops;
for (HloInstruction* old_instr : while_body->MakeInstructionPostOrder()) {
if (seen_ops.contains(old_instr)) {
continue;
}
ops_to_clone.push_back(old_instr);
seen_ops.insert(old_instr);
}
int n = config.known_trip_count().n();
while (--n) {
std::vector<HloInstruction*> new_ops_to_clone;
old_to_new_map[old_input_parameter] = new_input_parameter;
for (HloInstruction* old_instr : ops_to_clone) {
if (old_to_new_map.contains(old_instr)) {
continue;
}
VLOG(2) << "Cloning instruction " << old_instr->ToString();
std::vector<HloInstruction*> new_operands;
for (HloInstruction* old_operand : old_instr->mutable_operands()) {
new_operands.push_back(old_to_new_map[old_operand]);
}
HloInstruction* new_instr =
while_body->AddInstruction(old_instr->CloneWithNewOperands(
old_instr->shape(), new_operands, clone_suffix));
if (old_instr->IsElementwiseBinary() && old_instr->HasConstantOperand()) {
skip_control_dep_injection.insert(old_instr);
}
SetChannelIdForNewCollective(new_instr, module);
old_to_new_map[old_instr] = new_instr;
new_ops_to_clone.push_back(new_instr);
VLOG(2) << "Added instruction " << new_instr->ToString();
}
while_body->set_root_instruction(
old_to_new_map[while_body->root_instruction()]);
VLOG(2) << "Replaced with new root "
<< while_body->root_instruction()->ToString();
TF_RETURN_IF_ERROR(HandleControlDependencies(
while_body, old_to_new_map, &loop_roots, old_input_parameter,
skip_control_dep_injection));
old_to_new_map.clear();
skip_control_dep_injection.clear();
loop_roots = while_body->root_instruction()->mutable_operands();
old_input_parameter = new_input_parameter;
new_input_parameter = while_body->root_instruction();
ops_to_clone = std::move(new_ops_to_clone);
changed = true;
}
WhileLoopBackendConfig new_config;
new_config.mutable_known_trip_count()->set_n(1);
TF_RETURN_IF_ERROR(while_instr->set_backend_config(new_config));
return changed;
}
absl::Status PeelInstructionsForOddTripCount(HloModule* module,
HloInstruction* while_instr) {
std::string suffix = "peeled_double_buffer";
absl::flat_hash_map<HloInstruction*, HloInstruction*> old_to_new_map;
HloComputation* while_body = while_instr->while_body();
HloInstruction* input_parameter = while_body->parameter_instruction(0);
HloInstruction* input_tuple = while_instr->mutable_operand(0);
auto old_loop_roots = while_body->root_instruction()->mutable_operands();
HloComputation* parent_comp = while_instr->parent();
old_to_new_map[input_parameter] = input_tuple;
for (HloInstruction* old_instr : while_body->MakeInstructionPostOrder()) {
if (old_to_new_map.find(old_instr) != old_to_new_map.end()) {
continue;
}
VLOG(2) << "Peeling instruction " << old_instr->ToString();
std::vector<HloInstruction*> new_operands(old_instr->operand_count());
for (int64_t i = 0; i < old_instr->operand_count(); i++) {
new_operands[i] = old_to_new_map[old_instr->mutable_operand(i)];
}
HloInstruction* new_instr =
parent_comp->AddInstruction(old_instr->CloneWithNewOperands(
old_instr->shape(), new_operands, suffix));
SetChannelIdForNewCollective(new_instr, module);
TF_CHECK_OK(SetSendRecvValidationForPeeledInstr(new_instr, old_instr));
old_to_new_map[old_instr] = new_instr;
VLOG(2) << "Added instruction " << new_instr->ToString()
<< " to parent computation.";
}
std::vector<HloInstruction*> new_roots;
for (HloInstruction* instr : old_loop_roots) {
new_roots.push_back(old_to_new_map[instr]);
}
TF_RETURN_IF_ERROR(while_instr->ReplaceOperandWith(
0, old_to_new_map[while_body->root_instruction()]));
VLOG(2) << "Replaced with new input tuple "
<< while_instr->operand(0)->ToString();
for (HloInstruction* old_instr : while_body->MakeInstructionPostOrder()) {
if (old_to_new_map.find(old_instr) != old_to_new_map.end()) {
HloInstruction* new_instr = old_to_new_map[old_instr];
VLOG(2) << "Processing control predecessors for peeled instruction "
<< new_instr->ToString();
std::vector<HloInstruction*> new_control_pred(
old_instr->control_predecessors().size());
for (HloInstruction* pred : old_instr->control_predecessors()) {
new_control_pred.push_back(old_to_new_map[pred]);
}
TF_RETURN_IF_ERROR(new_instr->DropAllControlDeps());
for (HloInstruction* new_pred : new_control_pred) {
TF_RETURN_IF_ERROR(new_pred->AddControlDependencyTo(new_instr));
VLOG(2) << "Adding " << new_pred->ToString()
<< " to control dependency of peeled instruction: "
<< new_instr->ToString();
}
}
}
return absl::OkStatus();
}
absl::StatusOr<bool> DoubleBufferingUnroll(HloInstruction* while_instr,
HloModule* module) {
TF_ASSIGN_OR_RETURN(auto config,
while_instr->backend_config<WhileLoopBackendConfig>());
CHECK(config.has_known_trip_count())
<< "Only loops with known trip count are supported.";
int64_t exact_trip_count = config.known_trip_count().n();
VLOG(2) << "Processing while loop " << while_instr->ToString()
<< " with trip count: " << exact_trip_count;
HloComputation* while_body = while_instr->while_body();
VLOG(2) << "Processing root " << while_body->root_instruction()->ToString();
auto old_loop_roots = while_body->root_instruction()->mutable_operands();
HloInstruction* input_parameter = while_body->parameter_instruction(0);
VLOG(2) << "Processing input parameter " << input_parameter->ToString();
absl::flat_hash_map<HloInstruction*, HloInstruction*> old_to_new_map;
absl::flat_hash_set<HloInstruction*> skip_control_dep_injection;
bool is_peeled = exact_trip_count % 2;
if (is_peeled) {
VLOG(2) << "Found loops with odd trip count, 1 iteration will be peeled "
"outside of the main body.";
TF_RETURN_IF_ERROR(PeelInstructionsForOddTripCount(module, while_instr));
exact_trip_count -= 1;
}
std::string suffix = "double_buffer_clone";
old_to_new_map[input_parameter] = while_body->root_instruction();
for (HloInstruction* old_instr : while_body->MakeInstructionPostOrder()) {
if (old_to_new_map.find(old_instr) != old_to_new_map.end()) {
continue;
}
VLOG(2) << "Cloning instruction " << old_instr->ToString();
std::vector<HloInstruction*> new_operands;
for (HloInstruction* old_operand : old_instr->mutable_operands()) {
new_operands.push_back(old_to_new_map[old_operand]);
}
HloInstruction* new_instr =
while_body->AddInstruction(old_instr->CloneWithNewOperands(
old_instr->shape(), new_operands, suffix));
if (old_instr->IsElementwiseBinary() && old_instr->HasConstantOperand()) {
skip_control_dep_injection.insert(old_instr);
}
SetChannelIdForNewCollective(new_instr, module);
TF_CHECK_OK(SetSendRecvValidation(old_instr, new_instr, is_peeled));
old_to_new_map[old_instr] = new_instr;
VLOG(2) << "Added instruction " << new_instr->ToString();
}
while_body->set_root_instruction(
old_to_new_map[while_body->root_instruction()]);
VLOG(2) << "Replaced with new root "
<< while_body->root_instruction()->ToString();
TF_RETURN_IF_ERROR(HandleControlDependencies(while_body, old_to_new_map,
&old_loop_roots, input_parameter,
skip_control_dep_injection));
WhileLoopBackendConfig new_config;
new_config.mutable_known_trip_count()->set_n(exact_trip_count / 2);
TF_RETURN_IF_ERROR(while_instr->set_backend_config(new_config));
return true;
}
}
absl::StatusOr<bool> DoubleBufferLoopUnrolling::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
std::vector<HloInstruction*> while_instrs;
for (auto comp : module->MakeNonfusionComputations()) {
absl::c_copy_if(comp->instructions(), std::back_inserter(while_instrs),
HloPredicateIsOp<HloOpcode::kWhile>);
}
VLOG(2) << "Processing " << while_instrs.size() << " while loops.";
for (HloInstruction* while_instr : while_instrs) {
TF_ASSIGN_OR_RETURN(WhileLoopBackendConfig config,
while_instr->backend_config<WhileLoopBackendConfig>());
if (!config.has_known_trip_count()) {
VLOG(2) << while_instr->ToString()
<< " doesn't have exact trip count, skipping loop unrolling "
"for now";
continue;
}
if (unroll_strategy_ == UnrollStrategy::kFullUnroll) {
TF_ASSIGN_OR_RETURN(changed, FullyUnroll(while_instr, module));
} else if (unroll_strategy_ == UnrollStrategy::kDoubleBuffer) {
TF_ASSIGN_OR_RETURN(changed, DoubleBufferingUnroll(while_instr, module));
} else {
LOG(FATAL) << absl::StrCat("Unhandled unrolling strategy: ",
unroll_strategy_);
}
}
VLOG(2) << "LoopDoubleBufferTransformer output: " << module->ToString();
if (changed) {
TF_RETURN_IF_ERROR(
FlattenCallGraph().Run(module, execution_threads).status());
}
return changed;
}
}
} | #include "xla/service/gpu/double_buffer_loop_unrolling.h"
#include <cstdint>
#include <memory>
#include <optional>
#include "absl/container/flat_hash_set.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/test.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using tsl::testing::IsOkAndHolds;
int64_t CountInstructions(const HloComputation& computation, HloOpcode opcode) {
int64_t count = 0;
for (const auto& instruction : computation.instructions()) {
if (instruction->opcode() == opcode) {
count++;
}
}
return count;
}
int64_t CountInstructions(const HloModule& module, HloOpcode opcode) {
int64_t count = 0;
for (const auto& computation : module.computations()) {
count += CountInstructions((*computation), opcode);
}
return count;
}
class GpuLoopDoubleBufferTransformerTest : public HloTestBase {
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_while_loop_double_buffering(true);
return debug_options;
}
};
TEST_F(GpuLoopDoubleBufferTransformerTest, FullUnrollOddTripCountTest) {
const char* const kModuleString = R"(
HloModule all_gather_overlapping
condition {
input_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=3
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
body {
input_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) parameter(0)
param_0 = f32[1,128] get-tuple-element(input_tuple), index=0
param_1 = f32[2,128] get-tuple-element(input_tuple), index=2
cond = s32[] get-tuple-element(input_tuple), index=3
c0 = f32[] constant(0)
splat_c0 = f32[1,128] broadcast(c0), dimensions={}
add = f32[1,128] add(splat_c0, param_0)
all-gather-start = (f32[1,128], f32[2,128]) all-gather-start(add), channel_id=1337, replica_groups={{0,1}}, dimensions={0}, use_global_device_ids=true
c1_s32 = s32[] constant(1)
c0_s32 = s32[] constant(0)
one = s32[] constant(1)
cond_plus_1 = s32[] add(cond, one)
dynamic-slice = f32[1,128] dynamic-slice(param_1, c1_s32, c0_s32), dynamic_slice_sizes={1,128}
all-gather-done = f32[2,128] all-gather-done(all-gather-start)
ROOT output_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) tuple(param_0, dynamic-slice, all-gather-done, cond_plus_1)
}
ENTRY main {
param_0 = f32[1,128] parameter(0)
param_1 = f32[2,128] parameter(1)
param_2 = s32[] constant(0)
tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) tuple(param_0, param_0, param_1, param_2)
ROOT while = (f32[1,128], f32[1,128], f32[2,128], s32[]) while(tuple), condition=condition, body=body, backend_config={"known_trip_count":{"n":"11"}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
DoubleBufferLoopUnrolling double_buffer(
DoubleBufferLoopUnrolling::UnrollStrategy::kFullUnroll);
TupleSimplifier tuple_simp;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, double_buffer.Run(module.get()));
EXPECT_TRUE(changed);
TF_ASSERT_OK_AND_ASSIGN(changed, tuple_simp.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* while_instruction = hlo_query::GetFirstInstructionWithOpcode(
*module->entry_computation(), HloOpcode::kWhile);
TF_ASSERT_OK_AND_ASSIGN(
WhileLoopBackendConfig config,
while_instruction->backend_config<WhileLoopBackendConfig>());
int64_t exact_trip_count = config.known_trip_count().n();
EXPECT_EQ(exact_trip_count, 1);
EXPECT_EQ(CountInstructions((*while_instruction->while_body()),
HloOpcode::kAllGatherStart),
11);
EXPECT_EQ(CountInstructions((*module), HloOpcode::kAllGatherStart), 11);
}
TEST_F(GpuLoopDoubleBufferTransformerTest, FullUnrollEvenTripCountTest) {
const char* const kModuleString = R"(
HloModule all_gather_overlapping
condition {
input_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=3
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
body {
input_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) parameter(0)
param_0 = f32[1,128] get-tuple-element(input_tuple), index=0
param_1 = f32[2,128] get-tuple-element(input_tuple), index=2
cond = s32[] get-tuple-element(input_tuple), index=3
c0 = f32[] constant(0)
splat_c0 = f32[1,128] broadcast(c0), dimensions={}
add = f32[1,128] add(splat_c0, param_0)
all-gather-start = (f32[1,128], f32[2,128]) all-gather-start(add), channel_id=1337, replica_groups={{0,1}}, dimensions={0}, use_global_device_ids=true
c1_s32 = s32[] constant(1)
c0_s32 = s32[] constant(0)
one = s32[] constant(1)
cond_plus_1 = s32[] add(cond, one)
dynamic-slice = f32[1,128] dynamic-slice(param_1, c1_s32, c0_s32), dynamic_slice_sizes={1,128}
all-gather-done = f32[2,128] all-gather-done(all-gather-start)
ROOT output_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) tuple(param_0, dynamic-slice, all-gather-done, cond_plus_1)
}
ENTRY main {
param_0 = f32[1,128] parameter(0)
param_1 = f32[2,128] parameter(1)
param_2 = s32[] constant(0)
tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) tuple(param_0, param_0, param_1, param_2)
ROOT while = (f32[1,128], f32[1,128], f32[2,128], s32[]) while(tuple), condition=condition, body=body, backend_config={"known_trip_count":{"n":"10"}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
DoubleBufferLoopUnrolling double_buffer(
DoubleBufferLoopUnrolling::UnrollStrategy::kFullUnroll);
TupleSimplifier tuple_simp;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, double_buffer.Run(module.get()));
EXPECT_TRUE(changed);
TF_ASSERT_OK_AND_ASSIGN(changed, tuple_simp.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* while_instruction;
for (auto instr : module->entry_computation()->instructions()) {
if (instr->opcode() == HloOpcode::kWhile) {
while_instruction = instr;
}
}
TF_ASSERT_OK_AND_ASSIGN(
WhileLoopBackendConfig config,
while_instruction->backend_config<WhileLoopBackendConfig>());
int64_t exact_trip_count = config.known_trip_count().n();
EXPECT_EQ(exact_trip_count, 1);
EXPECT_EQ(CountInstructions((*while_instruction->while_body()),
HloOpcode::kAllGatherStart),
10);
EXPECT_EQ(CountInstructions((*module), HloOpcode::kAllGatherStart), 10);
}
TEST_F(GpuLoopDoubleBufferTransformerTest, UnrolledLoopEvenTripCount) {
const char* const kModuleString = R"(
HloModule all_gather_overlapping
condition {
input_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=3
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
body {
input_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) parameter(0)
param_0 = f32[1,128] get-tuple-element(input_tuple), index=0
param_1 = f32[2,128] get-tuple-element(input_tuple), index=2
cond = s32[] get-tuple-element(input_tuple), index=3
c0 = f32[] constant(0)
splat_c0 = f32[1,128] broadcast(c0), dimensions={}
add = f32[1,128] add(splat_c0, param_0)
all-gather-start = (f32[1,128], f32[2,128]) all-gather-start(add), channel_id=1337, replica_groups={{0,1}}, dimensions={0}, use_global_device_ids=true
c1_s32 = s32[] constant(1)
c0_s32 = s32[] constant(0)
one = s32[] constant(1)
cond_plus_1 = s32[] add(cond, one)
dynamic-slice = f32[1,128] dynamic-slice(param_1, c1_s32, c0_s32), dynamic_slice_sizes={1,128}
all-gather-done = f32[2,128] all-gather-done(all-gather-start)
ROOT output_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) tuple(param_0, dynamic-slice, all-gather-done, cond_plus_1)
}
ENTRY main {
param_0 = f32[1,128] parameter(0)
param_1 = f32[2,128] parameter(1)
param_2 = s32[] constant(0)
tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) tuple(param_0, param_0, param_1, param_2)
ROOT while = (f32[1,128], f32[1,128], f32[2,128], s32[]) while(tuple), condition=condition, body=body, backend_config={"known_trip_count":{"n":"10"}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
DoubleBufferLoopUnrolling double_buffer;
TupleSimplifier tuple_simp;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, double_buffer.Run(module.get()));
EXPECT_TRUE(changed);
TF_ASSERT_OK_AND_ASSIGN(changed, tuple_simp.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* while_instruction = hlo_query::GetFirstInstructionWithOpcode(
*module->entry_computation(), HloOpcode::kWhile);
TF_ASSERT_OK_AND_ASSIGN(
WhileLoopBackendConfig config,
while_instruction->backend_config<WhileLoopBackendConfig>());
int64_t exact_trip_count = config.known_trip_count().n();
EXPECT_EQ(exact_trip_count, 5);
EXPECT_EQ(CountInstructions((*while_instruction->while_body()),
HloOpcode::kAllGatherStart),
2);
EXPECT_EQ(CountInstructions((*module), HloOpcode::kAllGatherStart), 2);
}
TEST_F(GpuLoopDoubleBufferTransformerTest, UnrolledLoopOddTripCount) {
const char* const kModuleString = R"(
HloModule all_gather_overlapping
condition {
input_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=3
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
body {
input_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) parameter(0)
param_0 = f32[1,128] get-tuple-element(input_tuple), index=0
param_1 = f32[2,128] get-tuple-element(input_tuple), index=2
cond = s32[] get-tuple-element(input_tuple), index=3
c0 = f32[] constant(0)
splat_c0 = f32[1,128] broadcast(c0), dimensions={}
add = f32[1,128] add(splat_c0, param_0)
all-gather-start = (f32[1,128], f32[2,128]) all-gather-start(add), channel_id=1337, replica_groups={{0,1}}, dimensions={0}, use_global_device_ids=true
c1_s32 = s32[] constant(1)
c0_s32 = s32[] constant(0)
one = s32[] constant(1)
cond_plus_1 = s32[] add(cond, one)
dynamic-slice = f32[1,128] dynamic-slice(param_1, c1_s32, c0_s32), dynamic_slice_sizes={1,128}
all-gather-done = f32[2,128] all-gather-done(all-gather-start)
ROOT output_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) tuple(param_0, dynamic-slice, all-gather-done, cond_plus_1)
}
ENTRY main {
param_0 = f32[1,128] parameter(0)
param_1 = f32[2,128] parameter(1)
param_2 = s32[] constant(0)
tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) tuple(param_0, param_0, param_1, param_2)
ROOT while = (f32[1,128], f32[1,128], f32[2,128], s32[]) while(tuple), condition=condition, body=body, backend_config={"known_trip_count":{"n":"11"}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
DoubleBufferLoopUnrolling double_buffer;
TupleSimplifier tuple_simp;
EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(tuple_simp.Run(module.get()), IsOkAndHolds(true));
HloInstruction* while_instruction = hlo_query::GetFirstInstructionWithOpcode(
*module->entry_computation(), HloOpcode::kWhile);
TF_ASSERT_OK_AND_ASSIGN(
WhileLoopBackendConfig config,
while_instruction->backend_config<WhileLoopBackendConfig>());
int64_t exact_trip_count = config.known_trip_count().n();
EXPECT_EQ(exact_trip_count, 5);
EXPECT_EQ(CountInstructions((*while_instruction->while_body()),
HloOpcode::kAllGatherStart),
2);
EXPECT_EQ(CountInstructions((*module), HloOpcode::kAllGatherStart), 3);
EXPECT_EQ(while_instruction->operand(0)->operand(2)->opcode(),
HloOpcode::kAllGatherDone);
}
TEST_F(GpuLoopDoubleBufferTransformerTest,
UnrolledLoopNoControlDepsForConstantAdd) {
const char* const kModuleString = R"(
HloModule loop_unrolling_no_deps
condition {
input_tuple = (f32[], s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=1
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
body {
input_tuple = (f32[], s32[]) parameter(0)
param_0 = f32[] get-tuple-element(input_tuple), index=0
cond = s32[] get-tuple-element(input_tuple), index=1
c2 = f32[] constant(2)
add = f32[] add(c2, param_0)
one = s32[] constant(1)
cond_plus_1 = s32[] add(cond, one)
ROOT output_tuple = (f32[], s32[]) tuple(add, cond_plus_1)
}
ENTRY main {
param_0 = f32[] parameter(0)
param_2 = s32[] constant(0)
tuple = (f32[], s32[]) tuple(param_0, param_2)
ROOT while = (f32[], s32[]) while(tuple), condition=condition, body=body, backend_config={"known_trip_count":{"n":"11"}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
DoubleBufferLoopUnrolling double_buffer;
TupleSimplifier tuple_simp;
EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(tuple_simp.Run(module.get()), IsOkAndHolds(true));
HloInstruction* while_instruction = hlo_query::GetFirstInstructionWithOpcode(
*module->entry_computation(), HloOpcode::kWhile);
TF_ASSERT_OK_AND_ASSIGN(
WhileLoopBackendConfig config,
while_instruction->backend_config<WhileLoopBackendConfig>());
int64_t exact_trip_count = config.known_trip_count().n();
EXPECT_EQ(exact_trip_count, 5);
EXPECT_EQ(
CountInstructions((*while_instruction->while_body()), HloOpcode::kAdd),
4);
EXPECT_EQ(while_instruction->while_body()
->root_instruction()
->operand(0)
->control_predecessors()
.size(),
0);
}
TEST_F(GpuLoopDoubleBufferTransformerTest,
UnrolledLoopNoControlDepsForCollective) {
const char* const kModuleString = R"(
HloModule loop_unrolling_no_deps
condition {
input_tuple = (f32[], s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=1
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
ar_add {
Arg_1 = f32[] parameter(1)
Arg_0 = f32[] parameter(0)
ROOT add_ar = f32[] add(Arg_1, Arg_0)
}
body {
input_tuple = (f32[], s32[]) parameter(0)
param_0 = f32[] get-tuple-element(input_tuple), index=0
cond = s32[] get-tuple-element(input_tuple), index=1
all-reduce-start = f32[] all-reduce-start(param_0), channel_id=8, replica_groups={{0}}, to_apply=ar_add, backend_config="{\"is_sync\":false}"
one = s32[] constant(1)
all-reduce-done = f32[] all-reduce-done(all-reduce-start)
cond_plus_1 = s32[] add(cond, one)
ROOT output_tuple = (f32[], s32[]) tuple(all-reduce-done, cond_plus_1)
}
ENTRY main {
param_0 = f32[] parameter(0)
param_2 = s32[] constant(0)
tuple = (f32[], s32[]) tuple(param_0, param_2)
ROOT while = (f32[], s32[]) while(tuple), condition=condition, body=body, backend_config={"known_trip_count":{"n":"10"}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
DoubleBufferLoopUnrolling double_buffer;
TupleSimplifier tuple_simp;
EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(tuple_simp.Run(module.get()), IsOkAndHolds(true));
HloInstruction* while_instruction = hlo_query::GetFirstInstructionWithOpcode(
*module->entry_computation(), HloOpcode::kWhile);
TF_ASSERT_OK_AND_ASSIGN(
WhileLoopBackendConfig config,
while_instruction->backend_config<WhileLoopBackendConfig>());
int64_t exact_trip_count = config.known_trip_count().n();
EXPECT_EQ(exact_trip_count, 5);
EXPECT_EQ(CountInstructions((*while_instruction->while_body()),
HloOpcode::kAllReduceStart),
2);
absl::flat_hash_set<int64_t> channel_ids;
hlo_query::ForEachInstructionWithOpcode(
*while_instruction->while_body(), HloOpcode::kAllReduceStart,
[&channel_ids](HloInstruction* ar) {
EXPECT_EQ(ar->control_predecessors().size(), 0);
channel_ids.insert(*(ar->channel_id()));
});
EXPECT_EQ(channel_ids.size(), 2);
}
TEST_F(GpuLoopDoubleBufferTransformerTest,
FullyUnrolledLoopNoControlDepsForCollective) {
const char* const kModuleString = R"(
HloModule loop_unrolling_no_deps
condition {
input_tuple = (f32[], s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=1
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
ar_add {
Arg_1 = f32[] parameter(1)
Arg_0 = f32[] parameter(0)
ROOT add_ar = f32[] add(Arg_1, Arg_0)
}
body {
input_tuple = (f32[], s32[]) parameter(0)
param_0 = f32[] get-tuple-element(input_tuple), index=0
cond = s32[] get-tuple-element(input_tuple), index=1
all-reduce-start = f32[] all-reduce-start(param_0), channel_id=8, replica_groups={{0}}, to_apply=ar_add, backend_config="{\"is_sync\":false}"
one = s32[] constant(1)
all-reduce-done = f32[] all-reduce-done(all-reduce-start)
cond_plus_1 = s32[] add(cond, one)
ROOT output_tuple = (f32[], s32[]) tuple(all-reduce-done, cond_plus_1)
}
ENTRY main {
param_0 = f32[] parameter(0)
param_2 = s32[] constant(0)
tuple = (f32[], s32[]) tuple(param_0, param_2)
ROOT while = (f32[], s32[]) while(tuple), condition=condition, body=body, backend_config={"known_trip_count":{"n":"10"}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
DoubleBufferLoopUnrolling double_buffer(
DoubleBufferLoopUnrolling::UnrollStrategy::kFullUnroll);
TupleSimplifier tuple_simp;
EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(tuple_simp.Run(module.get()), IsOkAndHolds(true));
HloInstruction* while_instruction = hlo_query::GetFirstInstructionWithOpcode(
*module->entry_computation(), HloOpcode::kWhile);
TF_ASSERT_OK_AND_ASSIGN(
WhileLoopBackendConfig config,
while_instruction->backend_config<WhileLoopBackendConfig>());
int64_t exact_trip_count = config.known_trip_count().n();
EXPECT_EQ(exact_trip_count, 1);
EXPECT_EQ(CountInstructions((*while_instruction->while_body()),
HloOpcode::kAllReduceStart),
10);
absl::flat_hash_set<int64_t> channel_ids;
hlo_query::ForEachInstructionWithOpcode(
*while_instruction->while_body(), HloOpcode::kAllReduceStart,
[&channel_ids](HloInstruction* ar) {
EXPECT_EQ(ar->control_predecessors().size(), 0);
channel_ids.insert(*(ar->channel_id()));
});
EXPECT_EQ(channel_ids.size(), 10);
}
TEST_F(GpuLoopDoubleBufferTransformerTest, NestedWhileLoopRemainsFlattened) {
const char* const kModuleString = R"(
HloModule loop_unrolling_nested_while_loop_remains_flattened
condition_nested {
input_tuple = (s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=0
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
body_nested {
input_tuple = (s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=0
one = s32[] constant(1)
cond_plus_1 = s32[] add(cond, one)
ROOT output = (s32[]) tuple(cond_plus_1)
}
condition {
input_tuple = (s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=0
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
body {
input_tuple = (s32[]) parameter(0)
ROOT output = (s32[]) while(input_tuple), condition=condition_nested, body=body_nested
}
ENTRY main {
param_0 = (s32[]) parameter(0)
ROOT while = (s32[]) while(param_0), condition=condition, body=body, backend_config={"known_trip_count":{"n":"10"}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
DoubleBufferLoopUnrolling double_buffer;
EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true));
absl::flat_hash_set<const HloComputation*> while_loops_callees;
hlo_query::ForEachInstructionWithOpcode(
*module, HloOpcode::kWhile,
[&while_loops_callees](HloInstruction* instr) {
EXPECT_TRUE(
while_loops_callees.insert(instr->while_condition()).second);
EXPECT_TRUE(while_loops_callees.insert(instr->while_body()).second);
});
EXPECT_EQ(while_loops_callees.size(), 6);
}
TEST_F(GpuLoopDoubleBufferTransformerTest,
NestedWhileLoopRemainsFlattenedOddTripCount) {
const char* const kModuleString = R"(
HloModule loop_unrolling_nested_while_loop_remains_flattened
condition_nested {
input_tuple = (s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=0
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
body_nested {
input_tuple = (s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=0
one = s32[] constant(1)
cond_plus_1 = s32[] add(cond, one)
ROOT output = (s32[]) tuple(cond_plus_1)
}
condition {
input_tuple = (s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=0
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
body {
input_tuple = (s32[]) parameter(0)
ROOT output = (s32[]) while(input_tuple), condition=condition_nested, body=body_nested
}
ENTRY main {
param_0 = (s32[]) parameter(0)
ROOT while = (s32[]) while(param_0), condition=condition, body=body, backend_config={"known_trip_count":{"n":"11"}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
DoubleBufferLoopUnrolling double_buffer;
EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true));
absl::flat_hash_set<const HloComputation*> while_loops_callees;
hlo_query::ForEachInstructionWithOpcode(
*module, HloOpcode::kWhile,
[&while_loops_callees](HloInstruction* instr) {
EXPECT_TRUE(
while_loops_callees.insert(instr->while_condition()).second);
EXPECT_TRUE(while_loops_callees.insert(instr->while_body()).second);
});
EXPECT_EQ(while_loops_callees.size(), 8);
}
TEST_F(GpuLoopDoubleBufferTransformerTest,
NestedWhileLoopRemainsFlattenedWhenFullyUnrolled) {
const char* const kModuleString = R"(
HloModule loop_unrolling_nested_while_loop_remains_flattened
condition_nested {
input_tuple = (s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=0
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
body_nested {
input_tuple = (s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=0
one = s32[] constant(1)
cond_plus_1 = s32[] add(cond, one)
ROOT output = (s32[]) tuple(cond_plus_1)
}
condition {
input_tuple = (s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=0
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
body {
input_tuple = (s32[]) parameter(0)
ROOT output = (s32[]) while(input_tuple), condition=condition_nested, body=body_nested
}
ENTRY main {
param_0 = (s32[]) parameter(0)
ROOT while = (s32[]) while(param_0), condition=condition, body=body, backend_config={"known_trip_count":{"n":"10"}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
DoubleBufferLoopUnrolling double_buffer(
DoubleBufferLoopUnrolling::UnrollStrategy::kFullUnroll);
EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true));
absl::flat_hash_set<const HloComputation*> while_loops_callees;
hlo_query::ForEachInstructionWithOpcode(
*module, HloOpcode::kWhile,
[&while_loops_callees](HloInstruction* instr) {
EXPECT_TRUE(
while_loops_callees.insert(instr->while_condition()).second);
EXPECT_TRUE(while_loops_callees.insert(instr->while_body()).second);
});
hlo_query::ForEachInstructionWithOpcode(
*module->entry_computation(), HloOpcode::kWhile,
[](HloInstruction* instr) {
TF_ASSERT_OK_AND_ASSIGN(
WhileLoopBackendConfig config,
instr->backend_config<WhileLoopBackendConfig>());
int64_t exact_trip_count = config.known_trip_count().n();
EXPECT_EQ(exact_trip_count, 1);
});
EXPECT_EQ(while_loops_callees.size(), 22);
}
TEST_F(GpuLoopDoubleBufferTransformerTest, NestedWhileLoopAreUnrolled) {
const char* const kModuleString = R"(
HloModule loop_unrolling_nested_are_unrolled
condition_nested {
input_tuple = (s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=0
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
body_nested {
input_tuple = (s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=0
one = s32[] constant(1)
cond_plus_1 = s32[] add(cond, one)
ROOT output = (s32[]) tuple(cond_plus_1)
}
condition {
input_tuple = (s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=0
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
body {
input_tuple = (s32[]) parameter(0)
ROOT output = (s32[]) while(input_tuple), condition=condition_nested, body=body_nested, backend_config={"known_trip_count":{"n":"11"}}
}
ENTRY main {
param_0 = (s32[]) parameter(0)
ROOT while = (s32[]) while(param_0), condition=condition, body=body, backend_config={"known_trip_count":{"n":"11"}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
DoubleBufferLoopUnrolling double_buffer;
EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true));
int64_t num_whiles = 0;
hlo_query::ForEachInstructionWithOpcode(
*module, HloOpcode::kWhile, [&num_whiles](HloInstruction* instr) {
EXPECT_EQ(instr->backend_config<WhileLoopBackendConfig>()
->known_trip_count()
.n(),
5);
++num_whiles;
});
EXPECT_EQ(num_whiles, 4);
}
TEST_F(GpuLoopDoubleBufferTransformerTest, NestedWhileLoopAreFullyUnrolled) {
const char* const kModuleString = R"(
HloModule loop_unrolling_nested_are_unrolled
condition_nested {
input_tuple = (s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=0
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
body_nested {
input_tuple = (s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=0
one = s32[] constant(1)
cond_plus_1 = s32[] add(cond, one)
ROOT output = (s32[]) tuple(cond_plus_1)
}
condition {
input_tuple = (s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=0
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
body {
input_tuple = (s32[]) parameter(0)
ROOT output = (s32[]) while(input_tuple), condition=condition_nested, body=body_nested, backend_config={"known_trip_count":{"n":"11"}}
}
ENTRY main {
param_0 = (s32[]) parameter(0)
ROOT while = (s32[]) while(param_0), condition=condition, body=body, backend_config={"known_trip_count":{"n":"11"}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
DoubleBufferLoopUnrolling double_buffer(
DoubleBufferLoopUnrolling::UnrollStrategy::kFullUnroll);
EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true));
int64_t num_whiles = 0;
hlo_query::ForEachInstructionWithOpcode(
*module, HloOpcode::kWhile, [&num_whiles](HloInstruction* instr) {
EXPECT_EQ(instr->backend_config<WhileLoopBackendConfig>()
->known_trip_count()
.n(),
1);
++num_whiles;
});
EXPECT_EQ(num_whiles, 12);
}
TEST_F(GpuLoopDoubleBufferTransformerTest, WhileLoopWithCollectivePermute) {
const char* kModuleString = R"(
HloModule loop_unrolling_no_deps
condition {
input_tuple = (f32[], s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=1
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
ar_add {
Arg_1 = f32[] parameter(1)
Arg_0 = f32[] parameter(0)
ROOT add_ar = f32[] add(Arg_1, Arg_0)
}
body {
input_tuple = (f32[], s32[]) parameter(0)
param_0 = f32[] get-tuple-element(input_tuple), index=0
cond = s32[] get-tuple-element(input_tuple), index=1
collective-permute = f32[] collective-permu |
2,044 | cpp | tensorflow/tensorflow | gpu_all_gather_optimizer | null | null | #ifndef XLA_SERVICE_GPU_GPU_ALL_GATHER_OPTIMIZER_H_
#define XLA_SERVICE_GPU_GPU_ALL_GATHER_OPTIMIZER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class AllGatherOptimizer : public HloModulePass {
public:
AllGatherOptimizer() = default;
absl::string_view name() const override { return "all-gather-optimizer"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
}
#endif
#include "xla/service/gpu/gpu_all_gather_optimizer.h"
#include <cstdint>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace gpu {
absl::StatusOr<bool> AllGatherOptimizer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
if (!HloOpcodeIsBinaryCommutative(instruction->opcode())) {
continue;
}
HloInstruction* left_op = instruction->mutable_operand(0);
HloInstruction* right_op = instruction->mutable_operand(1);
if (right_op->opcode() != HloOpcode::kAllGather ||
left_op->opcode() != HloOpcode::kAllGather) {
VLOG(2) << "Binary op's operands are not all-gather deduced types.";
continue;
}
auto* left_all_gather = Cast<HloAllGatherInstruction>(left_op);
auto* right_all_gather = Cast<HloAllGatherInstruction>(right_op);
if (right_all_gather->constrain_layout() !=
left_all_gather->constrain_layout() ||
right_all_gather->use_global_device_ids() !=
left_all_gather->use_global_device_ids() ||
!ReplicaGroupsEqual(right_all_gather->replica_groups(),
left_all_gather->replica_groups())) {
VLOG(2) << "The right and left all-gather ops are not compatible "
"to merge. ";
continue;
}
if (!ShapeUtil::Equal(left_all_gather->operand(0)->shape(),
right_all_gather->operand(0)->shape())) {
VLOG(2) << "all-gather operands have different shapes";
continue;
}
if (right_all_gather->user_count() != 1 ||
left_all_gather->user_count() != 1) {
VLOG(2) << "all-gather user_count > 1 ";
continue;
}
auto index_in_full_shape =
computation->AddInstruction(HloInstruction::CreateBinary(
right_all_gather->operand(0)->shape(), instruction->opcode(),
left_all_gather->mutable_operand(0),
right_all_gather->mutable_operand(0)));
int64_t all_gather_dimension =
Cast<HloAllGatherInstruction>(right_all_gather)
->all_gather_dimension();
auto combined = HloInstruction::CreateAllGather(
left_all_gather->shape(), {index_in_full_shape}, all_gather_dimension,
left_all_gather->device_list(),
false, left_all_gather->channel_id(),
Cast<HloAllGatherInstruction>(left_all_gather)
->use_global_device_ids());
TF_RETURN_IF_ERROR(computation->ReplaceWithNewInstruction(
instruction, std::move(combined)));
changed = true;
}
}
return changed;
}
}
} | #include "xla/service/gpu/gpu_all_gather_optimizer.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <utility>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_module_config.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
class GpuAllGatherOptimizerTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> RunPass(
absl::string_view hlo_module, int64_t num_replicas,
int64_t num_partitions, bool expect_change) {
HloModuleConfig config = GetModuleConfigForTest(
num_replicas,
num_partitions);
config.set_use_spmd_partitioning(num_partitions > 1);
TF_ASSIGN_OR_RETURN(auto module,
ParseAndReturnVerifiedModule(hlo_module, config));
auto changed = AllGatherOptimizer().Run(module.get());
if (!changed.ok()) {
return changed.status();
}
EXPECT_EQ(changed.value(), expect_change);
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
template <HloOpcode oc>
size_t CollectiveCount(std::unique_ptr<HloModule> &module) {
return absl::c_count_if(module->entry_computation()->instructions(),
HloPredicateIsOp<oc>);
}
};
TEST_F(GpuAllGatherOptimizerTest, BranchesOptimized) {
absl::string_view hlo_string = R"(
HloModule ReduceScatter
add {
x = bf16[] parameter(0)
y = bf16[] parameter(1)
ROOT add = bf16[] add(x, y)
}
ENTRY main {
param.1 = bf16[8,128,1024]{2,1,0} parameter(0)
param.2 = bf16[8,128,1024]{2,1,0} parameter(1)
reduce-scatter.1 = bf16[8,64,1024]{2,1,0} reduce-scatter(param.1), channel_id=8, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, dimensions={1}, to_apply=add
all-gather.1 = bf16[8,128,1024]{2,1,0} all-gather(reduce-scatter.1), channel_id=5, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={1}, use_global_device_ids=true
reduce-scatter.2 = bf16[8,64,1024]{2,1,0} reduce-scatter(param.2), channel_id=9, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, dimensions={1}, to_apply=add
all-gather.2 = bf16[8,128,1024]{2,1,0} all-gather(reduce-scatter.2), channel_id=5, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={1}, use_global_device_ids=true
add.1 = bf16[8,128,1024]{2,1,0} add(all-gather.1, all-gather.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
1,
true));
EXPECT_EQ(CollectiveCount<HloOpcode::kAllGather>(module), 3);
EXPECT_EQ(CollectiveCount<HloOpcode::kReduceScatter>(module), 2);
}
TEST_F(GpuAllGatherOptimizerTest, DisbledSPMDPartitioningJAXBug) {
absl::string_view hlo_string = R"(
HloModule pjit_f, entry_computation_layout={(f32[4,8]{1,0}, f32[4,8]{1,0})->f32[8,8]{1,0}}
ENTRY %main.6_spmd (param: f32[4,8], param.1: f32[4,8]) -> f32[8,8] {
%param = f32[4,8]{1,0} parameter(0), sharding={devices=[2,1]<=[2]}
%all-gather = f32[8,8]{1,0} all-gather(f32[4,8]{1,0} %param), channel_id=1, replica_groups={{0,1}}, dimensions={0}, use_global_device_ids=true, metadata={op_name="pjit(f)/jit(main)/add" source_file="third_party/py/jax/tests/pjit_test.py" source_line=207}
%param.1 = f32[4,8]{1,0} parameter(1), sharding={devices=[2,1]<=[2]}
%all-gather.1 = f32[8,8]{1,0} all-gather(f32[4,8]{1,0} %param.1), channel_id=2, replica_groups={{0,1}}, dimensions={0}, use_global_device_ids=true, metadata={op_name="pjit(f)/jit(main)/add" source_file="third_party/py/jax/tests/pjit_test.py" source_line=207}
ROOT %add.0 = f32[8,8]{1,0} add(f32[8,8]{1,0} %all-gather, f32[8,8]{1,0} %all-gather.1), metadata={op_name="pjit(f)/jit(main)/add" source_file="third_party/py/jax/tests/pjit_test.py" source_line=207}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
1,
2,
true));
EXPECT_EQ(CollectiveCount<HloOpcode::kAllGather>(module), 1);
}
TEST_F(GpuAllGatherOptimizerTest, MoreThanSingleUserForAllGather) {
absl::string_view hlo_string = R"(
HloModule ReduceScatter
add {
x = bf16[] parameter(0)
y = bf16[] parameter(1)
ROOT add = bf16[] add(x, y)
}
ENTRY main {
param.1 = bf16[8,128,1024]{2,1,0} parameter(0)
param.2 = bf16[8,128,1024]{2,1,0} parameter(1)
param.3 = bf16[8,128,1024]{2,1,0} parameter(2)
reduce-scatter.1 = bf16[8,64,1024]{2,1,0} reduce-scatter(param.1), channel_id=8, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, dimensions={1}, to_apply=add
all-gather.1 = bf16[8,128,1024]{2,1,0} all-gather(reduce-scatter.1), channel_id=5, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={1}, use_global_device_ids=true
reduce-scatter.2 = bf16[8,64,1024]{2,1,0} reduce-scatter(param.2), channel_id=9, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, dimensions={1}, to_apply=add
all-gather.2 = bf16[8,128,1024]{2,1,0} all-gather(reduce-scatter.2), channel_id=5, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={1}, use_global_device_ids=true
reduce-scatter.3 = bf16[8,64,1024]{2,1,0} reduce-scatter(param.3), channel_id=9, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, dimensions={1}, to_apply=add
all-gather.3 = bf16[8,128,1024]{2,1,0} all-gather(reduce-scatter.3), channel_id=5, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={1}, use_global_device_ids=true
add.1 = bf16[8,128,1024]{2,1,0} add(all-gather.1, all-gather.3)
add.2 = bf16[8,128,1024]{2,1,0} add(all-gather.1, all-gather.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
1,
false));
EXPECT_EQ(CollectiveCount<HloOpcode::kAllGather>(module), 3);
EXPECT_EQ(CollectiveCount<HloOpcode::kReduceScatter>(module), 3);
}
TEST_F(GpuAllGatherOptimizerTest, AllGatherWithOpInBetweenOnRightBranch) {
absl::string_view hlo_string = R"(
HloModule ReduceScatter
add {
x = bf16[] parameter(0)
y = bf16[] parameter(1)
ROOT add = bf16[] add(x, y)
}
ENTRY main {
param.1 = bf16[8,128,1024]{2,1,0} parameter(0)
param.2 = bf16[8,128,1024]{2,1,0} parameter(1)
param.3 = bf16[8,128,1024]{2,1,0} parameter(2)
reduce-scatter.1 = bf16[8,64,1024]{2,1,0} reduce-scatter(param.1), channel_id=8, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, dimensions={1}, to_apply=add
reduce-scatter.2 = bf16[8,64,1024]{2,1,0} reduce-scatter(param.2), channel_id=9, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, dimensions={1}, to_apply=add
add.1 = bf16[8,64,1024]{2,1,0} add(reduce-scatter.1, reduce-scatter.2)
all-gather.1 = bf16[8,128,1024]{2,1,0} all-gather(add.1), channel_id=5, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={1}, use_global_device_ids=true
reduce-scatter.3 = bf16[8,64,1024]{2,1,0} reduce-scatter(param.3), channel_id=9, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, dimensions={1}, to_apply=add
all-gather.3 = bf16[8,128,1024]{2,1,0} all-gather(reduce-scatter.3), channel_id=5, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={1}, use_global_device_ids=true
add.2 = bf16[8,128,1024]{2,1,0} add(all-gather.1, all-gather.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
1,
true));
EXPECT_EQ(CollectiveCount<HloOpcode::kAllGather>(module), 3);
EXPECT_EQ(CollectiveCount<HloOpcode::kReduceScatter>(module), 3);
}
TEST_F(GpuAllGatherOptimizerTest, AllGatherOneSided) {
absl::string_view hlo_string = R"(
HloModule ReduceScatter
add {
x = bf16[] parameter(0)
y = bf16[] parameter(1)
ROOT add = bf16[] add(x, y)
}
ENTRY main {
param.1 = bf16[8,128,1024]{2,1,0} parameter(0)
param.2 = bf16[8,128,1024]{2,1,0} parameter(1)
param.3 = bf16[8,128,1024]{2,1,0} parameter(2)
add.1 = bf16[8,128,1024]{2,1,0} add(param.1, param.2)
reduce-scatter = bf16[8,64,1024]{2,1,0} reduce-scatter(param.3), channel_id=9, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, dimensions={1}, to_apply=add
all-gather = bf16[8,128,1024]{2,1,0} all-gather(reduce-scatter), channel_id=5, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={1}, use_global_device_ids=true
add.2 = bf16[8,128,1024]{2,1,0} add(all-gather, add.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
1,
false));
EXPECT_EQ(CollectiveCount<HloOpcode::kAllGather>(module), 1);
EXPECT_EQ(CollectiveCount<HloOpcode::kReduceScatter>(module), 1);
}
TEST_F(GpuAllGatherOptimizerTest, DifferentOperandShapes) {
absl::string_view hlo_string = R"(
HloModule TestModule
ENTRY main {
param.1 = bf16[8,64,128]{2,1,0} parameter(0)
param.2 = bf16[8,128,64]{2,1,0} parameter(1)
all-gather.1 = bf16[8,128,128]{2,1,0} all-gather(param.1), channel_id=5, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={1}, use_global_device_ids=true
all-gather.2 = bf16[8,128,128]{2,1,0} all-gather(param.2), channel_id=5, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={2}, use_global_device_ids=true
add.1 = bf16[8,128,128]{2,1,0} add(all-gather.1, all-gather.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
1,
false));
}
}
}
} |
2,045 | cpp | tensorflow/tensorflow | triton_fusion_analysis | third_party/xla/xla/service/gpu/triton_fusion_analysis.cc | third_party/xla/xla/service/gpu/triton_fusion_analysis_test.cc | #ifndef XLA_SERVICE_GPU_TRITON_FUSION_ANALYSIS_H_
#define XLA_SERVICE_GPU_TRITON_FUSION_ANALYSIS_H_
#include <map>
#include <optional>
#include <string>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/triton_tiling_propagation.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
class TritonFusionAnalysis {
absl::Status ExecuteForDotFusion(const HloInstruction& dot, int split_k);
public:
static absl::StatusOr<TritonFusionAnalysis> Execute(
const HloComputation& computation, int split_k = 1);
static absl::Status ExecuteForProducerConsumer(const HloInstruction& producer,
const HloInstruction& consumer,
int split_k = 1);
enum class Scope { LHS = 0, RHS = 1, META = 2, OUTPUT = 3 };
using IterationSpecByInstructionMap =
ConstHloInstructionMap<TensorIterationSpec>;
using IterationSpecByInstructionByScopeMap =
std::map<Scope, IterationSpecByInstructionMap>;
static constexpr int kMaxParameterPerDotOperand = 4;
const TensorIterationSpec::DimIterationSpec* IterSpec(Scope scope,
const HloInstruction*,
int dimension) const;
const ConstHloInstructionSet& ScopeParameters(const Scope scope) const {
return parameters_.at(scope);
}
std::optional<Scope> QueryInstructionScope(const HloInstruction& hlo) const;
std::string ToString() const;
private:
IterationSpecByInstructionByScopeMap iter_specs_;
std::map<Scope, ConstHloInstructionSet> parameters_;
};
namespace triton_fusion {
class FusionContext {
FusionContext(DotProperties properties, DotRequirements requirements)
: properties_(properties), requirements_(requirements) {}
public:
static absl::StatusOr<FusionContext> FromDotOperand(const HloInstruction& dot,
int operand_number,
int split_k = 1);
static FusionContext FromDotOutput(const HloInstruction& dot, int split_k,
DotRequirements requirements);
bool CombineDimOrdersAndReqs(const DimOrdersAndReqs& update);
absl::Status PropagateDimensionOrdersToParameters(
const HloInstruction& origin, ConstHloInstructionSet& parameters,
ConstHloInstructionMap<TensorIterationSpec>& iter_specs);
const DotProperties& dot_properties() const { return properties_; }
const DimOrderMap& dim_orders() const { return dim_orders_; }
const DotRequirements& requirements() const { return requirements_; }
private:
const DotProperties properties_;
DotRequirements requirements_;
DimOrderMap dim_orders_;
};
}
}
}
#endif
#include "xla/service/gpu/triton_fusion_analysis.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <queue>
#include <string>
#include <utility>
#include <variant>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/gpu/cudnn_support_utils.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/triton_tiling_propagation.h"
#include "xla/service/instruction_fusion.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/tools/hlo_decomposer.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using triton_fusion::DimOrdersAndReqs;
using triton_fusion::DimOrdersAndReqsOrError;
using triton_fusion::DotRequirements;
using triton_fusion::FusionContext;
using triton_fusion::GetPropagatedDimOrdersAndRequirements;
using triton_fusion::kNoSplitRequirement;
using triton_fusion::TransformDirection;
}
namespace triton_fusion {
absl::StatusOr<FusionContext> FusionContext::FromDotOperand(
const HloInstruction& dot, const int operand_number, const int split_k) {
const int num_split_k_batch_dims = split_k > 1;
int split_k_dimension_index = kNoDimensionIndex;
TF_ASSIGN_OR_RETURN(int contracting_dimension_index,
ContractingDimensionIndex(dot, operand_number));
TF_ASSIGN_OR_RETURN(int non_contracting_dimension_index,
NonContractingDimensionIndex(dot, operand_number));
if (split_k > 1) {
split_k_dimension_index = contracting_dimension_index - 1;
}
int splittable_dimension_index = kNoDimensionIndex;
if (operand_number == 0 &&
dot.dot_dimension_numbers().lhs_batch_dimensions_size() -
num_split_k_batch_dims ==
0) {
splittable_dimension_index = non_contracting_dimension_index;
}
FusionContext context(DotProperties{non_contracting_dimension_index,
splittable_dimension_index},
DotRequirements(kNoSplitRequirement));
context.dim_orders_[dot.operand(operand_number)] =
DimensionOrder::FromDotOperandOrOutput(*dot.operand(operand_number),
split_k_dimension_index);
return context;
}
FusionContext FusionContext::FromDotOutput(
const HloInstruction& dot, const int split_k,
DotRequirements requirements) {
int splittable_dimension_index = kNoDimensionIndex;
if (requirements.splittable_dimension_major_part_size > 1) {
splittable_dimension_index = (split_k > 1) ? 1 : 0;
}
FusionContext context(DotProperties{-1,
splittable_dimension_index},
std::move(requirements));
context.dim_orders_[&dot] = DimensionOrder::FromDotOperandOrOutput(dot);
return context;
}
namespace {
int64_t NumAddedParameters(const HloInstruction& hlo) {
if (hlo.opcode() == HloOpcode::kConstant &&
!ShapeUtil::IsScalar(hlo.shape())) {
return 0;
}
return hlo.operand_count() - 1;
}
}
bool FusionContext::CombineDimOrdersAndReqs(const DimOrdersAndReqs& update) {
for (const auto& [key, value] : update.dim_orders) {
auto it = dim_orders_.find(key);
if (it != dim_orders_.cend() && !it->second.IsPhysicallyEquivalent(value)) {
return false;
}
}
DotRequirementsOrError requirements_or_error =
CombineDotRequirements(requirements_, update.requirements);
if (std::holds_alternative<FusionDecision>(requirements_or_error)) {
return false;
}
requirements_ = std::move(std::get<DotRequirements>(requirements_or_error));
dim_orders_.insert(update.dim_orders.begin(), update.dim_orders.end());
return true;
}
absl::Status FusionContext::PropagateDimensionOrdersToParameters(
const HloInstruction& origin, ConstHloInstructionSet& parameters,
ConstHloInstructionMap<TensorIterationSpec>& iter_specs) {
absl::flat_hash_set<const HloInstruction*> visited;
std::queue<const HloInstruction*> to_process;
visited.insert(&origin);
to_process.push(&origin);
while (!to_process.empty()) {
const HloInstruction* hlo = to_process.front();
to_process.pop();
if (hlo->opcode() == HloOpcode::kParameter) {
if (!parameters.insert(hlo).second) {
return FailedPrecondition(
"A parameter is read differently by different users. hlo: %s",
hlo->ToString());
}
VLOG(5) << hlo->ToString();
}
DimOrdersAndReqsOrError result = GetPropagatedDimOrdersAndRequirements(
*hlo, dim_orders_.at(hlo), TransformDirection::kOutputToInput,
properties_);
if (!std::holds_alternative<DimOrdersAndReqs>(result)) {
return FailedPrecondition(
"Can not propagate dim orders and requirements.");
}
if (!CombineDimOrdersAndReqs(std::get<DimOrdersAndReqs>(result))) {
return FailedPrecondition("Can not combine dim orders and requirements.");
}
iter_specs[hlo] = dim_orders_.at(hlo).ToTensorIterationSpec();
for (const HloInstruction* operand : hlo->operands()) {
if (!visited.insert(operand).second) {
continue;
}
if (operand->opcode() == HloOpcode::kDot) {
continue;
}
to_process.push(operand);
}
}
return absl::OkStatus();
}
}
absl::StatusOr<TritonFusionAnalysis> TritonFusionAnalysis::Execute(
const HloComputation& computation, const int split_k) {
VLOG(5) << computation.ToString(HloPrintOptions::ShortParsable());
TritonFusionAnalysis analysis;
const HloInstruction* dot =
hlo_query::GetFirstInstructionWithOpcode(computation, HloOpcode::kDot);
TF_RET_CHECK(dot != nullptr);
TF_RETURN_IF_ERROR(analysis.ExecuteForDotFusion(*dot, split_k));
return analysis;
}
absl::Status TritonFusionAnalysis::ExecuteForProducerConsumer(
const HloInstruction& producer, const HloInstruction& consumer,
int split_k) {
std::unique_ptr<HloModule> new_module =
ExtractProducerConsumerIntoNewModule(producer, consumer);
auto* new_producer =
new_module->entry_computation()->GetInstructionWithName(producer.name());
auto* new_consumer =
new_module->entry_computation()->GetInstructionWithName(consumer.name());
std::unique_ptr<HloInstruction> fusion_instruction_holder;
HloInstruction* fusion_instruction;
if (new_consumer->opcode() == HloOpcode::kFusion) {
fusion_instruction = new_consumer;
} else {
fusion_instruction_holder = HloInstruction::CreateFusion(
new_consumer->shape(), new_producer->fusion_kind(), new_consumer);
fusion_instruction = fusion_instruction_holder.get();
}
if (new_producer->opcode() == HloOpcode::kFusion) {
fusion_instruction->MergeFusionInstruction(new_producer);
} else {
fusion_instruction->FuseInstruction(new_producer);
}
auto* fused_computation =
fusion_instruction->fused_instructions_computation();
return Execute(*fused_computation, split_k).status();
}
absl::Status TritonFusionAnalysis::ExecuteForDotFusion(
const HloInstruction& dot, const int split_k) {
DotRequirements lhs_requirements(kNoSplitRequirement);
for (const Scope scope : {Scope::LHS, Scope::RHS, Scope::META}) {
const int operand_number = static_cast<int>(scope);
if (dot.operand_count() < operand_number + 1) {
continue;
}
TF_ASSIGN_OR_RETURN(auto context, FusionContext::FromDotOperand(
dot, operand_number, split_k));
TF_RETURN_IF_ERROR(context.PropagateDimensionOrdersToParameters(
*dot.operand(operand_number), parameters_[scope], iter_specs_[scope]));
if (scope == Scope::LHS) {
lhs_requirements = context.requirements();
}
}
auto context = FusionContext::FromDotOutput(dot, split_k, lhs_requirements);
const HloInstruction* output = ˙
while (!output->IsRoot()) {
TF_RET_CHECK(output->user_count() == 1);
const HloInstruction* input = output;
if (IsWorkspaceAllocationRoot(*output->users()[0])) {
break;
}
output = output->users()[0];
DimOrdersAndReqsOrError result = GetPropagatedDimOrdersAndRequirements(
*output, context.dim_orders().at(input),
TransformDirection::kInputToOutput, context.dot_properties());
TF_RET_CHECK(std::holds_alternative<DimOrdersAndReqs>(result));
TF_RET_CHECK(
context.CombineDimOrdersAndReqs(std::get<DimOrdersAndReqs>(result)));
}
TF_RET_CHECK(
iter_specs_[Scope::OUTPUT]
.insert(
{output, context.dim_orders().at(output).ToTensorIterationSpec()})
.second);
parameters_[Scope::OUTPUT] = {};
if (output != &dot) {
TF_RETURN_IF_ERROR(context.PropagateDimensionOrdersToParameters(
*output, parameters_[Scope::OUTPUT], iter_specs_[Scope::OUTPUT]));
}
return absl::OkStatus();
}
std::optional<TritonFusionAnalysis::Scope>
TritonFusionAnalysis::QueryInstructionScope(const HloInstruction& hlo) const {
for (const Scope& scope : {Scope::LHS, Scope::RHS, Scope::OUTPUT}) {
if (iter_specs_.at(scope).count(&hlo) > 0) {
return scope;
}
}
LOG(WARNING) << "No scope for hlo: " << hlo.ToString();
return std::nullopt;
}
const TensorIterationSpec::DimIterationSpec* TritonFusionAnalysis::IterSpec(
const TritonFusionAnalysis::Scope scope, const HloInstruction* hlo,
const int dimension) const {
auto hlo_spec = iter_specs_.at(scope).find(hlo);
if (hlo_spec != iter_specs_.at(scope).cend()) {
return hlo_spec->second.Find(dimension);
}
return nullptr;
}
namespace {
std::string IterationSpecByInstructionMapToString(
const TritonFusionAnalysis::IterationSpecByInstructionMap& m) {
return absl::StrCat("IterSpec{",
absl::StrJoin(m, ", ",
[&](std::string* s, const auto& kv) {
absl::StrAppend(s, kv.first->name(), ": ",
kv.second.ToString());
}),
"}");
}
std::string ScopeToString(TritonFusionAnalysis::Scope s) {
switch (s) {
case TritonFusionAnalysis::Scope::LHS:
return "LHS";
case TritonFusionAnalysis::Scope::RHS:
return "RHS";
case TritonFusionAnalysis::Scope::META:
return "META";
case TritonFusionAnalysis::Scope::OUTPUT:
return "OUTPUT";
}
}
}
std::string TritonFusionAnalysis::ToString() const {
return absl::StrCat(
"TritonFusionAnalysis{\n",
absl::StrJoin(iter_specs_, ",\n",
[&](std::string* s, const auto& kv) {
absl::StrAppend(
s, ScopeToString(kv.first), ": ",
IterationSpecByInstructionMapToString(kv.second));
}),
"\n}");
}
}
} | #include "xla/service/gpu/triton_fusion_analysis.h"
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gemm_fusion.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
using ::testing::FieldsAre;
using TritonDotAnalysisTest = HloTestBase;
TEST_F(TritonDotAnalysisTest, QueryingOutputScopeParametersAlwaysWorks) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
triton_dot {
p0 = f32[8,8] parameter(0)
ROOT dot = f32[8,8] dot(p0, p0),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f32[8,8] parameter(0)
ROOT r = f32[8,8] fusion(p0), kind=kCustom, calls=triton_dot
})"));
TF_ASSERT_OK_AND_ASSIGN(
const auto analysis,
TritonFusionAnalysis::Execute(*module->entry_computation()
->root_instruction()
->called_computations()[0]));
EXPECT_TRUE(
analysis.ScopeParameters(TritonFusionAnalysis::Scope::OUTPUT).empty());
}
TEST_F(TritonDotAnalysisTest, NopBitcasts) {
const std::string hlo_text = R"(
HloModule t
triton_dot {
param_0.1 = s8[48,4]{1,0} parameter(0)
bitcast.18 = s8[1,48,4]{2,1,0} bitcast(param_0.1)
bitcast.19 = s8[48,4]{1,0} bitcast(bitcast.18)
convert.4 = bf16[48,4]{1,0} convert(bitcast.19)
param_1.1 = bf16[4,3]{1,0} parameter(1)
ROOT dot = bf16[48,3]{1,0} dot(convert.4, param_1.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = s8[48,4]{1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
custom-call = bf16[48,3]{1,0} custom-call(p0, p1),
custom_call_target="__triton",
called_computations={triton_dot}
ROOT bitcast.2 = bf16[1,8,6,3]{3,2,1,0} bitcast(custom-call)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
const HloComputation* dot_computation = module->entry_computation()
->root_instruction()
->operand(0)
->called_computations()[0];
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p0);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p1);
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),
ElementsAre(FieldsAre(4, 48, 0,
48, ElementsAre(48))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),
ElementsAre(FieldsAre(1, 4, 0,
4, ElementsAre(4))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),
ElementsAre(FieldsAre(3, 4, 0,
4, ElementsAre(4))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),
ElementsAre(FieldsAre(1, 3, 0,
3, ElementsAre(3))));
}
TEST_F(TritonDotAnalysisTest, DoNotRemoveTrivialDimensionForDot) {
const std::string hlo_text = R"(
HloModule t, is_scheduled=true
triton_dot {
param_0.1 = f32[137,115]{1,0} parameter(0)
param_1.1 = f32[1,115]{1,0} parameter(1)
ROOT dot = f32[137,1]{1,0} dot(param_0.1, param_1.1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = f32[137,115]{1,0} parameter(0)
p1 = f32[1,115]{1,0} parameter(1)
ROOT custom-call = f32[137,1]{1,0} fusion(p0, p1), kind=kCustom,
calls=triton_dot,
backend_config={"fusion_backend_config": {kind: "__triton_gemm",
triton_gemm_config: {"block_m":16,"block_n":64,"block_k":32,
"split_k":1,"num_stages":1,"num_warps":2,
"num_ctas":1}}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p0);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p1);
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),
ElementsAre(FieldsAre(115, 137, 0,
137, ElementsAre(137))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),
ElementsAre(FieldsAre(1, 115, 0,
115, ElementsAre(115))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),
ElementsAre(FieldsAre(115, 1, 0,
1, ElementsAre(1))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),
ElementsAre(FieldsAre(1, 115, 0,
115, ElementsAre(115))));
}
TEST_F(TritonDotAnalysisTest, Merge) {
const std::string hlo_text = R"(
HloModule t
triton_dot {
param_0.1 = s8[1,8,6,4]{3,2,1,0} parameter(0)
bitcast.18 = s8[48,4]{1,0} bitcast(param_0.1)
convert.4 = bf16[48,4]{1,0} convert(bitcast.18)
param_1.1 = bf16[4,3]{1,0} parameter(1)
ROOT dot = bf16[48,3]{1,0} dot(convert.4, param_1.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = s8[1,8,6,4]{3,2,1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
custom-call = bf16[48,3]{1,0} custom-call(p0, p1),
custom_call_target="__triton",
called_computations={triton_dot}
ROOT bitcast.2 = bf16[1,8,6,3]{3,2,1,0} bitcast(custom-call)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
const HloComputation* dot_computation = module->entry_computation()
->root_instruction()
->operand(0)
->called_computations()[0];
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p0);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p1);
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),
ElementsAre(FieldsAre(4, 6 * 8,
0, 6 * 8,
ElementsAre(6, 8))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),
ElementsAre(FieldsAre(1, 4,
0, 4,
ElementsAre(4))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),
ElementsAre(FieldsAre(3, 4,
0, 4,
ElementsAre(4))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),
ElementsAre(FieldsAre(1, 3,
0, 3,
ElementsAre(3))));
}
TEST_F(TritonDotAnalysisTest, Split) {
const std::string hlo_text = R"(
HloModule t
triton_dot {
%parameter_1 = f32[24000,2]{1,0} parameter(1)
%convert.15 = f16[24000,2]{1,0} convert(%parameter_1)
%parameter_0 = f16[4]{0} parameter(0)
%bitcast.45 = f16[2,2]{1,0} bitcast(%parameter_0)
ROOT %dot.26 = f16[24000,2]{1,0} dot(%convert.15, %bitcast.45),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f16[4]{0} parameter(0)
p1 = f32[24000,2]{1,0} parameter(1)
ROOT r = f16[24000,2]{1,0} custom-call(p0, p1),
custom_call_target="__triton",
called_computations={triton_dot}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p1);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p0);
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p1, 0),
ElementsAre(FieldsAre(2, 24000,
0, 24000,
ElementsAre(24000))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p1, 1),
ElementsAre(FieldsAre(1, 2,
0, 2,
ElementsAre(2))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p0, 0),
ElementsAre(FieldsAre(2, 2,
0, 2,
ElementsAre(2))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p0, 1),
ElementsAre(FieldsAre(1, 2,
0, 2,
ElementsAre(2))));
}
TEST_F(TritonDotAnalysisTest, TransposeMerge) {
const std::string hlo_text = R"(
HloModule t
triton_dot {
param_0.1 = s8[1,4,8,6]{3,2,1,0} parameter(0)
transpose.3 = s8[1,8,6,4]{3,2,1,0} transpose(param_0.1), dimensions={0,2,3,1}
bitcast.18 = s8[48,4]{1,0} bitcast(transpose.3)
convert.4 = bf16[48,4]{1,0} convert(bitcast.18)
param_1.1 = bf16[4,3]{1,0} parameter(1)
ROOT dot = bf16[48,3]{1,0} dot(convert.4, param_1.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = s8[1,4,8,6]{3,2,1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
custom-call = bf16[48,3]{1,0} custom-call(p0, p1),
custom_call_target="__triton",
called_computations={triton_dot}
ROOT bitcast.2 = bf16[1,8,6,3]{3,2,1,0} bitcast(custom-call)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
const HloComputation* dot_computation = module->entry_computation()
->root_instruction()
->operand(0)
->called_computations()[0];
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p0);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p1);
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),
ElementsAre(FieldsAre(1, 8 * 6,
0, 8 * 6,
ElementsAre(6, 8))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),
ElementsAre(FieldsAre(8 * 6, 4,
0, 4,
ElementsAre(4))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),
ElementsAre(FieldsAre(3, 4,
0, 4,
ElementsAre(4))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),
ElementsAre(FieldsAre(1, 3,
0, 3,
ElementsAre(3))));
}
TEST_F(TritonDotAnalysisTest, CopyMerge) {
const std::string hlo_text = R"(
HloModule t
triton_dot {
param_0.1 = s8[1,4,8,6]{3,2,1,0} parameter(0)
bitcast.99 = s8[1,8,6,4]{2,1,3,0} bitcast(param_0.1)
copy.3 = s8[1,8,6,4]{3,2,1,0} copy(bitcast.99)
bitcast.18 = s8[48,4]{1,0} bitcast(copy.3)
convert.4 = bf16[48,4]{1,0} convert(bitcast.18)
param_1.1 = bf16[4,3]{1,0} parameter(1)
ROOT dot = bf16[48,3]{1,0} dot(convert.4, param_1.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = s8[1,4,8,6]{3,2,1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
custom-call = bf16[48,3]{1,0} custom-call(p0, p1),
custom_call_target="__triton",
called_computations={triton_dot}
ROOT bitcast.2 = bf16[1,8,6,3]{3,2,1,0} bitcast(custom-call)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
const HloComputation* dot_computation = module->entry_computation()
->root_instruction()
->operand(0)
->called_computations()[0];
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p0);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p1);
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),
ElementsAre(FieldsAre(1, 8 * 6,
0, 8 * 6,
ElementsAre(6, 8))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),
ElementsAre(FieldsAre(8 * 6, 4,
0, 4,
ElementsAre(4))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),
ElementsAre(FieldsAre(3, 4,
0, 4,
ElementsAre(4))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),
ElementsAre(FieldsAre(1, 3,
0, 3,
ElementsAre(3))));
}
TEST_F(TritonDotAnalysisTest, TransposeMergeNCN) {
const std::string hlo_text = R"(
HloModule t
triton_dot {
param_0.1 = bf16[3,4,8,1]{3,2,1,0} parameter(0)
transpose.3 = bf16[3,8,1,4]{3,2,1,0} transpose(param_0.1), dimensions={0,2,3,1}
bitcast.18 = bf16[24,4]{1,0} bitcast(transpose.3)
param_1.1 = bf16[4,3]{1,0} parameter(1)
ROOT dot = bf16[24,3]{1,0} dot(bitcast.18, param_1.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = bf16[3,4,8,1]{3,2,1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
custom-call = bf16[24,3]{1,0} custom-call(p0, p1),
custom_call_target="__triton", called_computations={triton_dot}
ROOT bitcast.2 = bf16[3,8,1,3]{3,2,1,0} bitcast(custom-call)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
const HloComputation* dot_computation = module->entry_computation()
->root_instruction()
->operand(0)
->called_computations()[0];
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p0);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p1);
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),
ElementsAre(FieldsAre(1, 8,
0, 8,
ElementsAre(8)),
FieldsAre(4 * 8, 3,
0, 3,
ElementsAre(3))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),
ElementsAre(FieldsAre(8, 4,
0, 4,
ElementsAre(4))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),
ElementsAre(FieldsAre(3, 4,
0, 4,
ElementsAre(4))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),
ElementsAre(FieldsAre(1, 3,
0, 3,
ElementsAre(3))));
}
TEST_F(TritonDotAnalysisTest, TransposeOutput) {
const std::string hlo_text = R"(
HloModule t
triton_dot {
p0 = bf16[24,4]{1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
dot = bf16[24,3]{1,0} dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
bc = bf16[12,2,3]{2,1,0} bitcast(dot)
ROOT t = bf16[3,12,2]{2,1,0} transpose(bc), dimensions={2,0,1}
}
ENTRY e {
p0 = bf16[24,4]{1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
ROOT r = bf16[3,12,2]{2,1,0} fusion(p0, p1), kind=kCustom,
calls=triton_dot
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
const HloInstruction* dot_output = dot_computation->root_instruction();
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, dot_output, 0),
ElementsAre(FieldsAre(1, 24, 0,
24,
ElementsAre(2, 12))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, dot_output, 1),
ElementsAre(FieldsAre(24, 3, 0,
3,
ElementsAre(3))));
}
TEST_F(TritonDotAnalysisTest, OutputParameterIsHandled) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule t
triton_dot {
p0 = bf16[24,4]{1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
dot = bf16[24,3]{1,0} dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
p2 = f16[3,24]{1,0} parameter(2)
p2t = f16[24,3]{1,0} transpose(p2), dimensions={1,0}
p2tc = bf16[24,3]{1,0} convert(p2t)
ROOT r = bf16[24,3]{1,0} divide(p2tc, dot)
}
ENTRY e {
p0 = bf16[24,4]{1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
p2 = f16[3,24]{1,0} parameter(2)
ROOT r = bf16[24,3]{1,0} fusion(p0, p1, p2), kind=kCustom,
calls=triton_dot
})"));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
const HloInstruction* output_param =
dot_computation->parameter_instruction(2);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(
analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, output_param, 0)
->size(),
1);
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, output_param, 0),
ElementsAre(FieldsAre(1, 24, 0,
24,
ElementsAre(24))));
EXPECT_EQ(
analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, output_param, 1)
->size(),
1);
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, output_param, 1),
ElementsAre(FieldsAre(24, 3, 0,
3,
ElementsAre(3))));
}
TEST_F(TritonDotAnalysisTest, InputBroadcastFromScalarIsHandled) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule t
triton_dot {
p0 = bf16[24,4]{1,0} parameter(0)
p1 = bf16[] parameter(1)
p1b = bf16[4,3] broadcast(p1)
ROOT dot = bf16[24,3]{1,0} dot(p0, p1b),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = bf16[24,4]{1,0} parameter(0)
p1 = bf16[] parameter(1)
ROOT r = bf16[24,3]{1,0} fusion(p0, p1), kind=kCustom,
calls=triton_dot
})"));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
const HloInstruction* scalar = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, scalar, 0),
nullptr);
EXPECT_EQ(analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, scalar, 1),
nullptr);
}
TEST_F(TritonDotAnalysisTest, InputBroadcastFromVectorIsHandled) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule t
triton_dot {
p0 = bf16[24,4]{1,0} parameter(0)
p1 = bf16[4] parameter(1)
p1b = bf16[4,3] broadcast(p1), dimensions={0}
ROOT dot = bf16[24,3]{1,0} dot(p0, p1b),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = bf16[24,4]{1,0} parameter(0)
p1 = bf16[4] parameter(1)
ROOT r = bf16[24,3]{1,0} fusion(p0, p1), kind=kCustom,
calls=triton_dot
})"));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
const HloInstruction* vector = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(
analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, vector, 0)->size(),
1);
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, vector, 0),
ElementsAre(FieldsAre(1, 4,
0, 4,
ElementsAre(4))));
}
TEST_F(TritonDotAnalysisTest, OutputBroadcastIsNotAccepted) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule t
ENTRY e {
p0 = f16[2,35] parameter(0)
p0c = bf16[2,35] convert(p0)
p1 = bf16[35,2] parameter(1)
dot = bf16[2,2] dot(p0c, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT bc = bf16[2,2,100] broadcast(dot), dimensions={0,1}
})"));
EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{
se::CudaComputeCapability::AMPERE, 0})
.Run(module.get())
.value());
EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kBroadcast);
}
TEST_F(TritonDotAnalysisTest, DegenerateSplitFragmentIsHandled) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
triton_gemm_r {
Arg_0.1 = s8[30,913,8,21]{3,2,1,0} parameter(0)
bitcast.6 = s8[30,8,21,913]{2,1,3,0} bitcast(Arg_0.1)
copy.7 = s8[30,8,21,913]{3,2,1,0} copy(bitcast.6)
bitcast.8 = s8[5040,913]{1,0} bitcast(copy.7)
convert.9 = bf16[5040,913]{1,0} convert(bitcast.8)
bitcast.32 = bf16[58,913]{1,0} parameter(1)
dot.33 = bf16[5040,58]{1,0} dot(convert.9, bitcast.32),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
bitcast.34 = bf16[30,8,21,58]{3,2,1,0} bitcast(dot.33)
copy.35 = bf16[30,8,21,58]{2,1,3,0} copy(bitcast.34)
ROOT bitcast.41 = bf16[30,1,58,8,21]{4,3,2,1,0} bitcast(copy.35)
}
ENTRY e {
Arg_0.1 = s8[30,913,8,21]{3,2,1,0} parameter(0)
Arg_1.2 = bf16[58,913]{1,0} parameter(1)
ROOT r = bf16[30,1,58,8,21]{4,3,2,1,0} fusion(Arg_0.1, Arg_1.2), kind=kCustom,
calls=triton_gemm_r,
backend_config={kind: "__triton_gemm"}
})"));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT,
dot_computation->root_instruction(), 0),
ElementsAre(FieldsAre(1, 8 * 21,
0, 8 * 21,
ElementsAre(21, 8)),
FieldsAre(8 * 21 * 58, 30,
0, 30,
ElementsAre(30))));
}
TEST_F(TritonDotAnalysisTest,
HandlesFurtherPropagationFromTrivialSizedTensorGracefully) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
triton_gemm_r {
a = f32[3,3]{1,0} parameter(0)
constant = f32[1,1]{1,0} constant({ {0} })
broadcast = f32[1,1]{1,0} broadcast(constant), dimensions={0,1}
reshape = f32[] reshape(broadcast)
broadcast2 = f32[3,3]{1,0} broadcast(reshape), dimensions={}
ROOT dot = f32[3,3]{1,0} dot(a, broadcast2),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
ENTRY e {
a = f32[3,3]{1,0} parameter(0)
ROOT dot = f32[3,3]{1,0} fusion(a), kind=kCustom, calls=triton_gemm_r,
backend_config={kind: "__triton_gemm"}
}
)"));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
absl::StatusOr<TritonFusionAnalysis> analysis =
TritonFusionAnalysis::Execute(*dot_computation);
(void)analysis;
}
TEST_F(TritonDotAnalysisTest, DynamicSliceIsSupported) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
triton_gemm {
dot_lhs = f32[2,18]{1,0} parameter(0)
dynamic_slice_input = f32[96,2]{1,0} parameter(1)
start_index0 = s32[] parameter(2)
start_index1 = s32[] parameter(3)
dynamic_slice = f32[64,2]{1,0} dynamic-slice(dynamic_slice_input,
start_index0, start_index1),
dynamic_slice_sizes={64,2}
ROOT dot = f32[18,64]{1,0} dot(dot_lhs, dynamic_slice),
lhs_contracting_dims={0}, rhs_contracting_dims={1}
}
ENTRY e {
dot_lhs = f32[2,18]{1,0} parameter(0)
dynamic_slice_input = f32[96,2]{1,0} parameter(1)
start_index0 = s32[] parameter(2)
start_index1 = s32[] parameter(3)
ROOT triton_gemm_d = f32[18,64]{1,0} fusion(dot_lhs, dynamic_slice_input,
start_index0, start_index1),
kind=kCustom,
calls=triton_gemm,
backend_config={"kind":"__triton_gemm"}
}
)"));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p0);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p1);
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),
ElementsAre(FieldsAre(18, 2,
0, 2,
ElementsAre(2))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),
ElementsAre(FieldsAre(1, 18,
0, 18,
ElementsAre(18))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),
ElementsAre(FieldsAre(2, |
2,046 | cpp | tensorflow/tensorflow | cudnn_vectorize_convolutions | third_party/xla/xla/service/gpu/transforms/cudnn_vectorize_convolutions.cc | third_party/xla/xla/service/gpu/transforms/cudnn_vectorize_convolutions_test.cc | #ifndef XLA_SERVICE_GPU_CUDNN_VECTORIZE_CONVOLUTIONS_H_
#define XLA_SERVICE_GPU_CUDNN_VECTORIZE_CONVOLUTIONS_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/dnn.h"
namespace xla {
namespace gpu {
class CudnnVectorizeConvolutions : public HloModulePass {
public:
explicit CudnnVectorizeConvolutions(
se::CudaComputeCapability compute_capability,
se::dnn::VersionInfo cudnn_version)
: compute_capability_(compute_capability),
cudnn_version_(cudnn_version) {}
absl::string_view name() const override {
return "cudnn_vectorize_convolutions";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
const se::CudaComputeCapability compute_capability_;
const se::dnn::VersionInfo cudnn_version_;
};
}
}
#endif
#include "xla/service/gpu/cudnn_vectorize_convolutions.h"
#include <cstdint>
#include <optional>
#include <string>
#include <tuple>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/client/xla_builder.h"
#include "xla/client/xla_computation.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/cudnn_support_utils.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/dnn.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
static std::vector<HloCustomCallInstruction*> GetRelevantConvs(
HloComputation* comp) {
std::vector<HloCustomCallInstruction*> convs;
for (HloInstruction* instr : comp->instructions()) {
if (instr->opcode() != HloOpcode::kCustomCall ||
(instr->custom_call_target() != kCudnnConvForwardCallTarget &&
instr->custom_call_target() !=
kCudnnConvBiasActivationForwardCallTarget) ||
instr->operand_count() < 2) {
continue;
}
PrimitiveType input_ty = instr->operand(0)->shape().element_type();
PrimitiveType output_ty = instr->shape().tuple_shapes(0).element_type();
if (input_ty == output_ty && (input_ty == S8 || input_ty == U8)) {
convs.push_back(Cast<HloCustomCallInstruction>(instr));
}
}
return convs;
}
static absl::StatusOr<HloComputation*> BuilderToHloComputation(
XlaBuilder& b, XlaOp root, HloComputation* sibling_computation) {
TF_ASSIGN_OR_RETURN(XlaComputation comp, b.Build(root));
TF_ASSIGN_OR_RETURN(ProgramShape program_shape, comp.GetProgramShape());
HloModuleConfig config(program_shape);
TF_ASSIGN_OR_RETURN(auto new_module,
HloModule::CreateFromProto(comp.proto(), config));
HloModule* dest_module = sibling_computation->parent();
HloCloneContext context(dest_module);
return dest_module->DeepCloneComputation(new_module->entry_computation(),
&context);
}
static XlaOp SplitAtDim(XlaOp instr, int64_t dim, int64_t vect_size) {
XlaBuilder& b = *instr.builder();
Shape shape = b.GetShape(instr).value();
DimensionVector new_dims(shape.dimensions().begin(),
shape.dimensions().end());
CHECK_EQ(new_dims[dim] % vect_size, 0);
new_dims[dim] /= vect_size;
new_dims.insert(new_dims.begin() + dim + 1, vect_size);
return Reshape(instr, new_dims);
}
static Shape SplitShapeAtDim(Shape shape, int64_t dim, int64_t vect_size) {
DimensionVector new_dims(shape.dimensions().begin(),
shape.dimensions().end());
CHECK_EQ(new_dims[dim] % vect_size, 0);
new_dims[dim] /= vect_size;
new_dims.insert(new_dims.begin() + dim + 1, vect_size);
return ShapeUtil::MakeShape(shape.element_type(), new_dims);
}
static XlaOp MoveDim(XlaOp instr, int64_t src, int64_t dst) {
XlaBuilder& b = *instr.builder();
int64_t rank = b.GetShape(instr)->dimensions_size();
DimensionVector idxs(rank);
absl::c_iota(idxs, 0);
if (src < dst) {
idxs.insert(idxs.begin() + dst, src);
idxs.erase(idxs.begin() + src);
} else {
idxs.erase(idxs.begin() + src);
idxs.insert(idxs.begin() + dst, src);
}
return Transpose(instr, idxs);
}
static XlaOp RevectorizeInstr(XlaOp instr, int64_t dim, int64_t vect_dim,
int64_t vect_size) {
XlaBuilder& b = *instr.builder();
Shape shape = b.GetShape(instr).value();
auto size = [&](int64_t d) { return shape.dimensions(d); };
CHECK_LE(size(vect_dim), vect_size);
CHECK_EQ(vect_size % size(vect_dim), 0);
int64_t split_factor = vect_size / size(vect_dim);
CHECK_EQ(size(dim) % split_factor, 0);
instr = SplitAtDim(instr, dim, split_factor);
if (vect_dim > dim) {
vect_dim++;
}
instr = MoveDim(instr, dim + 1, vect_dim);
if (vect_dim > dim) {
vect_dim--;
}
return Collapse(instr, {vect_dim, vect_dim + 1});
}
static XlaOp UnrevectorizeInstr(XlaOp instr, int64_t dim, int64_t vect_dim,
int64_t orig_vect_size) {
XlaBuilder& b = *instr.builder();
Shape shape = b.GetShape(instr).value();
auto size = [&](int64_t d) { return shape.dimensions(d); };
CHECK_GE(size(vect_dim), orig_vect_size);
CHECK_EQ(size(vect_dim) % orig_vect_size, 0);
instr = SplitAtDim(instr, vect_dim, orig_vect_size);
if (dim > vect_dim) {
dim++;
}
instr = MoveDim(instr, vect_dim, dim + 1);
if (dim > vect_dim) {
dim--;
}
return Collapse(instr, {dim, dim + 1});
}
static ConvolutionDimensionNumbers VectorizeDnums(
ConvolutionDimensionNumbers dnums, bool reordered_filter) {
int64_t input_vect_dim = dnums.input_feature_dimension();
if (dnums.input_batch_dimension() > input_vect_dim) {
dnums.set_input_batch_dimension(dnums.input_batch_dimension() + 1);
}
for (int64_t& d : *dnums.mutable_input_spatial_dimensions()) {
if (d > input_vect_dim) {
++d;
}
}
if (!reordered_filter) {
int64_t kernel_vect_dim = dnums.kernel_input_feature_dimension();
if (dnums.kernel_output_feature_dimension() > kernel_vect_dim) {
dnums.set_kernel_output_feature_dimension(
dnums.kernel_output_feature_dimension() + 1);
}
for (int64_t& d : *dnums.mutable_kernel_spatial_dimensions()) {
if (d > kernel_vect_dim) {
++d;
}
}
}
int64_t output_vect_dim = dnums.output_feature_dimension();
if (dnums.output_batch_dimension() > output_vect_dim) {
dnums.set_output_batch_dimension(dnums.output_batch_dimension() + 1);
}
for (int64_t& d : *dnums.mutable_output_spatial_dimensions()) {
if (d > output_vect_dim) {
++d;
}
}
return dnums;
}
absl::Status ReorderInt8NchwVect(HloCustomCallInstruction* conv,
XlaOp* operands) {
bool has_bias = conv->operand_count() > 2;
VLOG(1) << "Reordering filter" << (has_bias ? " and bias" : "")
<< " (replacement for cudnnReorderFilterAndBias)";
auto builder = operands->builder();
ConvolutionDimensionNumbers dnums = conv->convolution_dimension_numbers();
TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config,
conv->backend_config<GpuBackendConfig>());
CudnnConvBackendConfig& config =
*gpu_config.mutable_cudnn_conv_backend_config();
config.set_reordered_int8_nchw_vect(true);
TF_RETURN_IF_ERROR(conv->set_backend_config(gpu_config));
TF_ASSIGN_OR_RETURN(Shape filter_shape, builder->GetShape(operands[1]));
TF_ASSIGN_OR_RETURN(auto reorder, CudnnInferTransposeForFilterReordering(
filter_shape, dnums));
XlaOp reshape = Reshape(reorder.transpose_shape, operands[1]);
XlaOp transpose = Transpose(reshape, reorder.permutation);
operands[1] = Reshape(reorder.result_shape, transpose);
dnums.set_kernel_output_feature_dimension(0);
dnums.set_kernel_input_feature_dimension(1);
dnums.set_kernel_spatial_dimensions(0, 2);
dnums.set_kernel_spatial_dimensions(1, 3);
conv->set_convolution_dimension_numbers(dnums);
if (has_bias) {
TF_ASSIGN_OR_RETURN(Shape bias_shape, builder->GetShape(operands[2]));
TF_ASSIGN_OR_RETURN(reorder,
CudnnInferTransposeForBiasReordering(bias_shape));
reshape = Reshape(reorder.transpose_shape, operands[2]);
transpose = Transpose(reshape, reorder.permutation);
operands[2] = Reshape(reorder.result_shape, transpose);
}
return absl::OkStatus();
}
static absl::StatusOr<bool> TryRevectorizeConv(
const se::CudaComputeCapability& compute_capability,
const se::dnn::VersionInfo& cudnn_version, HloCustomCallInstruction* conv,
int vect_size) {
const Shape& input_shape = conv->operand(0)->shape();
const Shape& kernel_shape = conv->operand(1)->shape();
const Shape& output_shape = conv->shape().tuple_shapes(0);
const ConvolutionDimensionNumbers* dnums =
&conv->convolution_dimension_numbers();
std::optional<int64_t> input_vect_dim;
std::optional<int64_t> kernel_vect_dim;
std::optional<int64_t> output_vect_dim;
std::tie(input_vect_dim, kernel_vect_dim, output_vect_dim) =
FindVectorizedFeatureDims(*dnums, input_shape, kernel_shape,
output_shape);
if (!input_vect_dim.has_value() || !kernel_vect_dim.has_value() ||
!output_vect_dim.has_value()) {
return false;
}
int64_t input_feat_size =
input_shape.dimensions(dnums->input_feature_dimension());
int64_t output_feat_size =
output_shape.dimensions(dnums->output_feature_dimension());
int64_t input_vect_size = input_shape.dimensions(*input_vect_dim);
int64_t output_vect_size = output_shape.dimensions(*output_vect_dim);
if (vect_size % input_vect_size != 0 || vect_size % output_vect_size != 0 ||
input_feat_size % (vect_size / input_vect_size) != 0 ||
output_feat_size % (vect_size / output_vect_size) != 0) {
return false;
}
if (primitive_util::IsIntegralType(input_shape.element_type())) {
TF_ASSIGN_OR_RETURN(bool supported_target_vectorization,
CudnnSupportsOptimizedIntegerConvolution(
compute_capability, *conv, vect_size));
if (!supported_target_vectorization) {
VLOG(3) << "Skipping re-vectorization of conv to vector size: "
<< vect_size << ": " << conv->ToString();
return false;
}
}
VLOG(1) << "Re-vectorizing conv channels from "
<< input_shape.dimensions(*input_vect_dim) << " to " << vect_size
<< ": " << conv->ToString();
XlaBuilder b(absl::StrCat(conv->name(), ".revectorized"));
b.SetOpMetadata(conv->metadata());
XlaOp filter = Parameter(&b, 1, conv->operand(1)->shape(), "filter");
absl::InlinedVector<XlaOp, 4> new_operands = {
RevectorizeInstr(Parameter(&b, 0, conv->operand(0)->shape(), "input"),
dnums->input_feature_dimension(), *input_vect_dim,
vect_size),
RevectorizeInstr(filter, dnums->kernel_input_feature_dimension(),
*kernel_vect_dim, vect_size),
};
if (conv->operand_count() > 2) {
new_operands.push_back(Parameter(&b, 2, conv->operand(2)->shape(), "bias"));
}
if (conv->operand_count() > 3) {
new_operands.push_back(RevectorizeInstr(
Parameter(&b, 3, conv->operand(3)->shape(), "side_input"),
dnums->input_feature_dimension(), *input_vect_dim, vect_size));
}
if (conv->operand_count() > 4) {
return InvalidArgument(
"Don't understand a conv with more than 4 arguments: %s",
conv->ToString());
}
const auto& debug_options = conv->GetModule()->config().debug_options();
bool use_reordering =
input_shape.element_type() == xla::S8 && vect_size == 32 &&
debug_options.xla_gpu_enable_cudnn_int8x32_convolution_reordering() &&
cudnn_version >= se::dnn::VersionInfo{8, 3, 0};
if (use_reordering) {
int64_t kernel_vect_size = kernel_shape.dimensions(*kernel_vect_dim);
if (kernel_vect_size == 4 || kernel_vect_size == 32) {
new_operands[1] = filter;
}
TF_RETURN_IF_ERROR(ReorderInt8NchwVect(conv, new_operands.data()));
dnums = &conv->convolution_dimension_numbers();
}
DimensionVector new_output_dims(output_shape.dimensions().begin(),
output_shape.dimensions().end());
new_output_dims[dnums->output_feature_dimension()] /=
(vect_size / output_vect_size);
new_output_dims[*output_vect_dim] = vect_size;
XlaOp new_conv = CustomCallWithConvDnums(
&b, conv->custom_call_target(), new_operands,
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(output_shape.element_type(), new_output_dims),
ShapeUtil::MakeShape(U8, {0})}),
{},
conv->raw_backend_config_string(), false,
{}, nullptr,
conv->window(),
*dnums);
XlaOp new_conv_result = GetTupleElement(new_conv, 0);
XlaOp new_conv_scratch = GetTupleElement(new_conv, 1);
XlaOp new_conv_result_unrevectorized = UnrevectorizeInstr(
new_conv_result, dnums->output_feature_dimension(), *output_vect_dim,
output_shape.dimensions(*output_vect_dim));
TF_ASSIGN_OR_RETURN(
HloComputation * new_conv_comp,
BuilderToHloComputation(
b, Tuple(&b, {new_conv_result_unrevectorized, new_conv_scratch}),
conv->parent()));
auto new_conv_comp_instrs = new_conv_comp->instructions();
auto new_conv_it =
absl::c_find_if(new_conv_comp_instrs, [](HloInstruction* instr) {
return instr->opcode() == HloOpcode::kCustomCall;
});
if (new_conv_it != new_conv_comp_instrs.end()) {
new_conv_comp->parent()->SetAndUniquifyInstrName(*new_conv_it,
conv->name());
}
VLOG(1) << "Re-vectorized conv to " << new_conv_comp->ToString();
TF_RETURN_IF_ERROR(conv->parent()->ReplaceWithNewInstruction(
conv, HloInstruction::CreateCall(conv->shape(), conv->operands(),
new_conv_comp)));
return true;
}
static absl::StatusOr<bool> TryVectorizeConv(
const se::CudaComputeCapability& compute_capability,
const se::dnn::VersionInfo& cudnn_version, HloCustomCallInstruction* conv,
int64_t vect_size) {
const Shape& input_shape = conv->operand(0)->shape();
const Shape& output_shape = conv->shape().tuple_shapes(0);
const ConvolutionDimensionNumbers* dnums =
&conv->convolution_dimension_numbers();
int64_t in_channels =
input_shape.dimensions(dnums->input_feature_dimension());
int64_t out_channels =
output_shape.dimensions(dnums->output_feature_dimension());
if (in_channels % vect_size != 0 || out_channels % vect_size != 0) {
return false;
}
if (input_shape.dimensions_size() >
2 + dnums->input_spatial_dimensions_size()) {
return false;
}
if (primitive_util::IsIntegralType(input_shape.element_type())) {
TF_ASSIGN_OR_RETURN(bool supported_target_vectorization,
CudnnSupportsOptimizedIntegerConvolution(
compute_capability, *conv, vect_size));
if (!supported_target_vectorization) {
VLOG(3) << "Skipping vectorization of conv to vector size: " << vect_size
<< ": " << conv->ToString();
return false;
}
}
VLOG(1) << "Vectorizing conv channels by " << vect_size << ": "
<< conv->ToString();
XlaBuilder b(absl::StrCat(conv->name(), ".revectorized"));
b.SetOpMetadata(conv->metadata());
XlaOp filter = Parameter(&b, 1, conv->operand(1)->shape(), "filter");
absl::InlinedVector<XlaOp, 4> new_operands = {
SplitAtDim(Parameter(&b, 0, conv->operand(0)->shape(), "input"),
dnums->input_feature_dimension(), vect_size),
SplitAtDim(filter, dnums->kernel_input_feature_dimension(), vect_size),
};
if (conv->operand_count() > 2) {
new_operands.push_back(Parameter(&b, 2, conv->operand(2)->shape(), "bias"));
}
if (conv->operand_count() > 3) {
new_operands.push_back(
SplitAtDim(Parameter(&b, 3, conv->operand(3)->shape(), "side_input"),
dnums->output_feature_dimension(), vect_size));
}
if (conv->operand_count() > 4) {
return InvalidArgument(
"Don't understand a conv with more than 4 arguments: %s",
conv->ToString());
}
const auto& debug_options = conv->GetModule()->config().debug_options();
bool use_reordering =
input_shape.element_type() == xla::S8 && vect_size == 32 &&
debug_options.xla_gpu_enable_cudnn_int8x32_convolution_reordering() &&
cudnn_version >= se::dnn::VersionInfo{8, 3, 0};
if (use_reordering) {
new_operands[1] = filter;
TF_RETURN_IF_ERROR(ReorderInt8NchwVect(conv, new_operands.data()));
dnums = &conv->convolution_dimension_numbers();
}
Shape new_output_shape = SplitShapeAtDim(
output_shape, dnums->output_feature_dimension(), vect_size);
XlaOp new_conv = CustomCallWithConvDnums(
&b, conv->custom_call_target(), new_operands,
ShapeUtil::MakeTupleShape(
{new_output_shape, ShapeUtil::MakeShape(U8, {0})}),
{},
conv->raw_backend_config_string(), false,
{}, nullptr,
conv->window(),
VectorizeDnums(*dnums, use_reordering));
XlaOp new_conv_result = GetTupleElement(new_conv, 0);
XlaOp new_conv_scratch = GetTupleElement(new_conv, 1);
XlaOp conv_result_collapsed =
Collapse(new_conv_result, {dnums->output_feature_dimension(),
dnums->output_feature_dimension() + 1});
TF_ASSIGN_OR_RETURN(
HloComputation * new_conv_comp,
BuilderToHloComputation(
b, Tuple(&b, {conv_result_collapsed, new_conv_scratch}),
conv->parent()));
VLOG(1) << "Vectorized conv to: " << new_conv_comp->ToString();
TF_RETURN_IF_ERROR(conv->parent()->ReplaceWithNewInstruction(
conv, HloInstruction::CreateCall(conv->shape(), conv->operands(),
new_conv_comp)));
return true;
}
}
absl::StatusOr<bool> CudnnVectorizeConvolutions::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* comp :
module->MakeNonfusionComputations(execution_threads)) {
for (HloCustomCallInstruction* conv : GetRelevantConvs(comp)) {
bool local_changed = false;
if (compute_capability_.IsAtLeast(7, 5)) {
TF_ASSIGN_OR_RETURN(
local_changed,
TryRevectorizeConv(compute_capability_, cudnn_version_, conv, 32));
if (!local_changed) {
TF_ASSIGN_OR_RETURN(
local_changed,
TryVectorizeConv(compute_capability_, cudnn_version_, conv, 32));
}
}
if (!local_changed) {
TF_ASSIGN_OR_RETURN(
local_changed,
TryVectorizeConv(compute_capability_, cudnn_version_, conv, 4));
}
changed |= local_changed;
}
}
return changed;
}
}
} | #include "xla/service/gpu/cudnn_vectorize_convolutions.h"
#include <cstdint>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/status/statusor.h"
#include "xla/service/call_inliner.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/dnn.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
class CudnnVectorizeConvolutionsTest : public HloTestBase {
protected:
absl::StatusOr<bool> Run(std::pair<int, int> compute_capability,
HloModule* module) {
CudnnVectorizeConvolutions pass(
se::CudaComputeCapability{compute_capability.first,
compute_capability.second},
se::dnn::VersionInfo(8, 3, 0));
TF_ASSIGN_OR_RETURN(bool changed, RunHloPass(&pass, module));
CallInliner inliner;
TF_RETURN_IF_ERROR(RunHloPass(&inliner, module).status());
return changed;
}
};
TEST_F(CudnnVectorizeConvolutionsTest, VectorizeTo4) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,40] parameter(0)
filter = s8[2,2,40,44] parameter(1)
ROOT result = (s8[10,20,30,44], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward",
backend_config="{bar: 0}"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
ASSERT_THAT(
root,
GmockMatch(m::Tuple(
m::Reshape(m::GetTupleElement(
m::CustomCall(&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Parameter(0))
.WithShape(S8, {10, 20, 30, 10, 4}),
m::Reshape(m::Parameter(1))
.WithShape(S8, {2, 2, 10, 4, 44}))
.WithConvDnums("b01f?_01i?o->b01f?"))
.WithShape(S8, {10, 20, 30, 11, 4})),
m::Op())));
EXPECT_EQ(conv->raw_backend_config_string(), "{bar: 0}");
}
TEST_F(CudnnVectorizeConvolutionsTest, NoVectorizeTo4UnsupportedFilterType) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,40] parameter(0)
filter = f32[2,2,40,44] parameter(1)
ROOT result = (s8[10,20,30,44], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward",
backend_config="{bar: 0}"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnVectorizeConvolutionsTest, VectorizeTo4NCHW) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,48,20,30] parameter(0)
filter = s8[48,44,2,2] parameter(1)
ROOT result = (s8[10,44,20,30], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=bf01_io01->bf01,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
ASSERT_THAT(
root,
GmockMatch(m::Tuple(
m::Reshape(m::GetTupleElement(
m::CustomCall(&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Parameter(0))
.WithShape(S8, {10, 12, 4, 20, 30}),
m::Reshape(m::Parameter(1))
.WithShape(S8, {12, 4, 44, 2, 2}))
.WithConvDnums("bf?01_i?o01->bf?01"))
.WithShape(S8, {10, 11, 4, 20, 30})),
m::Op())));
}
TEST_F(CudnnVectorizeConvolutionsTest, IncrementAllDnums) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[16,16,16,16] parameter(0)
filter = s8[16,16,3,3] parameter(1)
ROOT result = (s8[16,16,16,16], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=fb01_i01o->fb01,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
ASSERT_THAT(
root,
GmockMatch(m::Tuple(
m::Reshape(m::GetTupleElement(
m::CustomCall(&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Parameter(0))
.WithShape(S8, {4, 4, 16, 16, 16}),
m::Reshape(m::Parameter(1))
.WithShape(S8, {4, 4, 16, 3, 3}))
.WithConvDnums("f?b01_i?01o->f?b01"))
.WithShape(S8, {4, 4, 16, 16, 16})),
m::Op())));
}
TEST_F(CudnnVectorizeConvolutionsTest, FilterDnums) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[1,20,9,9] parameter(0)
filter = s8[3,3,20,32] parameter(1)
ROOT result = (s8[1,32,9,9], u8[0]) custom-call(s8[1,20,9,9] input, s8[3,3,20,32] filter),
window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
ASSERT_THAT(
root,
GmockMatch(m::Tuple(
m::Reshape(m::GetTupleElement(
m::CustomCall(&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Parameter(0))
.WithShape(S8, {1, 5, 4, 9, 9}),
m::Reshape(m::Parameter(1))
.WithShape(S8, {3, 3, 5, 4, 32}))
.WithConvDnums("bf?01_01i?o->bf?01"))
.WithShape(S8, {1, 8, 4, 9, 9})),
m::Op())));
}
TEST_F(CudnnVectorizeConvolutionsTest, NoVectorizeTo4) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,41] parameter(0)
filter = s8[2,2,41,44] parameter(1)
ROOT result = (s8[10,20,30,44], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
CudnnVectorizeConvolutions pass(
{7, 5},
se::dnn::VersionInfo{8, 3, 0});
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_FALSE(changed);
}
TEST_F(CudnnVectorizeConvolutionsTest, NoVectorizeTo4IfOutputIsS32) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,41] parameter(0)
filter = s8[2,2,41,44] parameter(1)
ROOT result = (s32[10,20,30,44], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_FALSE(changed);
}
TEST_F(CudnnVectorizeConvolutionsTest, NoVectorizeTo4IfOutputIsF32) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,41] parameter(0)
filter = s8[2,2,41,44] parameter(1)
ROOT result = (f32[10,20,30,44], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_FALSE(changed);
}
TEST_F(CudnnVectorizeConvolutionsTest, VectorizeTo32) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,64] parameter(0)
filter = s8[2,2,64,128] parameter(1)
ROOT result = (s8[10,20,30,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
ASSERT_THAT(
root,
GmockMatch(m::Tuple(
m::Reshape(
m::GetTupleElement(
m::CustomCall(
&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Parameter(0))
.WithShape(S8, {10, 20, 30, 2, 32}),
m::Reshape(
m::Transpose(
m::Reshape(m::Parameter(1))
.WithShape(S8, {2, 2, 2, 8, 4, 16, 4, 2}))
.WithShape(S8, {2, 2, 2, 16, 2, 8, 4, 4})
.WithPredicate([](const HloInstruction* instr) {
return absl::c_equal(
instr->dimensions(),
std::vector<int64_t>{2, 0, 1, 5, 7, 3, 6,
4});
}))
.WithShape(S8, {128, 2, 2, 2, 32})))
.WithShape(S8, {10, 20, 30, 4, 32})),
m::Op())));
EXPECT_TRUE(conv->backend_config<GpuBackendConfig>()
->cudnn_conv_backend_config()
.reordered_int8_nchw_vect());
}
TEST_F(CudnnVectorizeConvolutionsTest, BiasAndSideInput) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,64] parameter(0)
filter = s8[2,2,64,128] parameter(1)
bias = f32[128] parameter(2)
side_input = s8[10,20,30,64] parameter(3)
ROOT result = (s8[10,20,30,128], u8[0]) custom-call(input, filter, bias, side_input),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
ASSERT_THAT(
root,
GmockMatch(m::Tuple(
m::Reshape(
m::GetTupleElement(
m::CustomCall(
&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Parameter(0))
.WithShape(S8, {10, 20, 30, 2, 32}),
m::Reshape(m::Transpose(m::Reshape(m::Parameter(1))))
.WithShape(S8, {128, 2, 2, 2, 32}),
m::Reshape(
m::Transpose(m::Reshape(m::Parameter(2))
.WithShape(F32, {4, 4, 2, 4}))
.WithShape(F32, {4, 2, 4, 4})
.WithPredicate([](const HloInstruction* instr) {
return absl::c_equal(
instr->dimensions(),
std::vector<int64_t>{0, 2, 1, 3});
}))
.WithShape(F32, {128}),
m::Reshape(m::Parameter(3))
.WithShape(S8, {10, 20, 30, 2, 32})))
.WithShape(S8, {10, 20, 30, 4, 32})),
m::Op())));
EXPECT_TRUE(conv->backend_config<GpuBackendConfig>()
->cudnn_conv_backend_config()
.reordered_int8_nchw_vect());
}
TEST_F(CudnnVectorizeConvolutionsTest, InputNHWC_OutputNCHW) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,64] parameter(0)
filter = s8[2,2,64,128] parameter(1)
bias = f32[128] parameter(2)
side_input = s8[10,128,20,30] parameter(3)
ROOT result = (s8[10,128,20,30], u8[0]) custom-call(input, filter, bias, side_input),
window={size=2x2}, dim_labels=b01f_01io->bf01,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
ASSERT_THAT(
root,
GmockMatch(m::Tuple(
m::Reshape(
m::GetTupleElement(
m::CustomCall(
&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Parameter(0))
.WithShape(S8, {10, 20, 30, 2, 32}),
m::Reshape(m::Transpose(m::Reshape(m::Parameter(1))))
.WithShape(S8, {128, 2, 2, 2, 32}),
m::Reshape(
m::Transpose(m::Reshape(m::Parameter(2))
.WithShape(F32, {4, 4, 2, 4}))
.WithShape(F32, {4, 2, 4, 4})
.WithPredicate([](const HloInstruction* instr) {
return absl::c_equal(
instr->dimensions(),
std::vector<int64_t>{0, 2, 1, 3});
}))
.WithShape(F32, {128}),
m::Reshape(m::Parameter(3))
.WithShape(S8, {10, 4, 32, 20, 30})))
.WithShape(S8, {10, 4, 32, 20, 30})),
m::Op())));
EXPECT_TRUE(conv->backend_config<GpuBackendConfig>()
->cudnn_conv_backend_config()
.reordered_int8_nchw_vect());
}
TEST_F(CudnnVectorizeConvolutionsTest, NoVectorizeTo32) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,64] parameter(0)
filter = s8[2,2,64,128] parameter(1)
ROOT result = (s8[10,20,30,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 0}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
ASSERT_THAT(
root,
GmockMatch(m::Tuple(
m::Reshape(m::GetTupleElement(
m::CustomCall(&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Parameter(0))
.WithShape(S8, {10, 20, 30, 16, 4}),
m::Reshape(m::Parameter(1))
.WithShape(S8, {2, 2, 16, 4, 128})))
.WithShape(S8, {10, 20, 30, 32, 4})),
m::Op())));
EXPECT_FALSE(conv->backend_config<GpuBackendConfig>()
->cudnn_conv_backend_config()
.reordered_int8_nchw_vect());
}
TEST_F(CudnnVectorizeConvolutionsTest, Vectorize4To32) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,16,4] parameter(0)
filter = s8[3,5,16,192,4] parameter(1)
bias = f32[64] parameter(2)
side_input = s8[10,20,30,16,4] parameter(3)
ROOT result = (s8[10,20,30,48,4], u8[0]) custom-call(input, filter, bias, side_input),
window={size=3x5}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
auto conv_pat =
m::GetTupleElement(
m::CustomCall(
&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Transpose(m::Reshape(m::Parameter(0))
.WithShape(S8, {10, 20, 30, 2, 8, 4}))
.WithShape(S8, {10, 20, 30, 2, 8, 4}))
.WithShape(S8, {10, 20, 30, 2, 32}),
m::Reshape(
m::Transpose(m::Reshape(m::Parameter(1))
.WithShape(S8, {3, 5, 2, 8, 24, 4, 2, 4}))
.WithShape(S8, {2, 3, 5, 24, 2, 8, 4, 4})
.WithPredicate([](const HloInstruction* instr) {
return absl::c_equal(
instr->dimensions(),
std::vector<int64_t>{2, 0, 1, 4, 6, 3, 5, 7});
}))
.WithShape(S8, {192, 2, 3, 5, 32}),
m::Reshape(m::Transpose(m::Reshape(m::Parameter(2)))),
m::Reshape(m::Transpose(m::Reshape(m::Parameter(3))
.WithShape(S8, {10, 20, 30, 2, 8, 4}))
.WithShape(S8, {10, 20, 30, 2, 8, 4}))
.WithShape(S8, {10, 20, 30, 2, 32}))
.WithConvDnums("b01f?_oi01?->b01f?"))
.WithShape(S8, {10, 20, 30, 6, 32});
ASSERT_THAT(root, GmockMatch(m::Tuple(
m::Reshape(m::Transpose(m::Reshape(conv_pat).WithShape(
S8, {10, 20, 30, 6, 8, 4}))
.WithShape(S8, {10, 20, 30, 6, 8, 4}))
.WithShape(S8, {10, 20, 30, 48, 4}),
m::Op())));
EXPECT_TRUE(conv->backend_config<GpuBackendConfig>()
->cudnn_conv_backend_config()
.reordered_int8_nchw_vect());
}
TEST_F(CudnnVectorizeConvolutionsTest, Vectorize4To32NCHW) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,16,20,30,4] parameter(0)
filter = s8[16,128,2,2,4] parameter(1)
bias = f32[64] parameter(2)
side_input = s8[10,16,20,30,4] parameter(3)
ROOT result = (s8[10,32,20,30,4], u8[0]) custom-call(input, filter, bias, side_input),
window={size=2x2}, dim_labels=bf01_io01->bf01,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
auto conv_pat =
m::GetTupleElement(
m::CustomCall(
&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Transpose(m::Reshape(m::Parameter(0))
.WithShape(S8, {10, 2, 8, 20, 30, 4}))
.WithShape(S8, {10, 2, 20, 30, 8, 4}))
.WithShape(S8, {10, 2, 20, 30, 32}),
m::Reshape(
m::Transpose(m::Reshape(m::Parameter(1))
.WithShape(S8, {2, 8, 16, 4, 2, 2, 2, 4}))
.WithShape(S8, {2, 2, 2, 16, 2, 8, 4, 4})
.WithPredicate([](const HloInstruction* instr) {
return absl::c_equal(
instr->dimensions(),
std::vector<int64_t>{0, 5, 6, 2, 4, 1, 3, 7});
}))
.WithShape(S8, {128, 2, 2, 2, 32}),
m::Reshape(m::Transpose(m::Reshape(m::Parameter(2)))),
m::Reshape(m::Transpose(m::Reshape(m::Parameter(3))
.WithShape(S8, {10, 2, 8, 20, 30, 4}))
.WithShape(S8, {10, 2, 20, 30, 8, 4}))
.WithShape(S8, {10, 2, 20, 30, 32}))
.WithConvDnums("bf01_oi01->bf01"))
.WithShape(S8, {10, 4, 20, 30, 32});
ASSERT_THAT(root, GmockMatch(m::Tuple(
m::Reshape(m::Transpose(m::Reshape(conv_pat).WithShape(
S8, {10, 4, 20, 30, 8, 4}))
.WithShape(S8, {10, 4, 8, 20, 30, 4}))
.WithShape(S8, {10, 32, 20, 30, 4}),
m::Op())));
EXPECT_TRUE(conv->backend_config<GpuBackendConfig>()
->cudnn_conv_backend_config()
.reordered_int8_nchw_vect());
}
TEST_F(CudnnVectorizeConvolutionsTest, Vectorize4To32VectorDimFirst) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[4,10,20,30,16] parameter(0)
filter = s8[4,3,5,16,192] parameter(1)
bias = f32[64] parameter(2)
side_input = s8[4,10,20,30,16] parameter(3)
ROOT result = (s8[4,10,20,30,48], u8[0]) custom-call(input, filter, bias, side_input),
window={size=3x5}, dim_labels=?b01f_?01io->?b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
auto conv_pat =
m::GetTupleElement(
m::CustomCall(
&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Transpose(m::Reshape(m::Parameter(0))
.WithShape(S8, {4, 10, 20, 30, 2, 8}))
.WithShape(S8, {8, 4, 10, 20, 30, 2}))
.WithShape(S8, {32, 10, 20, 30, 2}),
m::Reshape(
m::Transpose(m::Reshape(m::Parameter(1))
.WithShape(S8, {4, 3, 5, 2, 8, 24, 4, 2}))
.WithShape(S8, {2, 3, 5, 24, 2, 8, 4, 4})
.WithPredicate([](const HloInstruction* instr) {
return absl::c_equal(
instr->dimensions(),
std::vector<int64_t>{3, 1, 2, 5, 7, 4, 6, 0});
}))
.WithShape(S8, {192, 2, 3, 5, 32}),
m::Reshape(m::Transpose(m::Reshape(m::Parameter(2)))),
m::Reshape(m::Transpose(m::Reshape(m::Parameter(3))
.WithShape(S8, {4, 10, 20, 30, 2, 8}))
.WithShape(S8, {8, 4, 10, 20, 30, 2}))
.WithShape(S8, {32, 10, 20, 30, 2}))
.WithConvDnums("?b01f_oi01->?b01f"))
.WithShape(S8, {32, 10, 20, 30, 6});
ASSERT_THAT(root, GmockMatch(m::Tuple(
m::Reshape(m::Transpose(m::Reshape(conv_pat).WithShape(
S8, {8, 4, 10, 20, 30, 6}))
.WithShape(S8, {4, 10, 20, 30, 6, 8}))
.WithShape(S8, {4, 10, 20, 30, 48}),
m::Op())));
EXPECT_TRUE(conv->backend_config<GpuBackendConfig>()
->cudnn_conv_backend_config()
.reordered_int8_nchw_vect());
}
TEST_F(CudnnVectorizeConvolutionsTest, NoVectorize4To32) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,16,4] parameter(0)
filter = s8[2,2,16,128,4] parameter(1)
bias = f32[10] parameter(2)
side_input = s8[10,20,30,16,4] parameter(3)
ROOT result = (s8[10,20,30,32,4], u8[0]) custom-call(input, filter, bias, side_input),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 0}, module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnVectorizeConvolutionsTest, Vectorize16To32) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,4,16] parameter(0)
filter = s8[3,5,4,192,16] parameter(1)
ROOT result = (s8[10,20,30,12,16], u8[0]) custom-call(input, filter),
window={size=3x5}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
auto filter_pat =
m::Reshape(
m::Transpose(
m::Reshape(m::Parameter(1)).WithShape(S8, {3, 5, 2, 2, 192, 16}))
.WithShape(S8, {3, 5, 2, 192, 2, 16}))
.WithShape(S8, {3, 5, 2, 192, 32});
auto conv_pat =
m::GetTupleElement(
m::CustomCall(
&conv, {kCudnnConvForwardCallTarget},
m::Reshape(
m::Transpose(m::Reshape(m::Parameter(0))
.WithShape(S8, {10, 20, 30, 2, 2, 16}))
.WithShape(S8, {10, 20, 30, 2, 2, 16}))
.WithShape(S8, {10, 20, 30, 2, 32}),
m::Reshape(
m::Transpose(m::Reshape(filter_pat)
.WithShape(S8, {3, 5, 2, 24, 4, 2, 8, 4}))
.WithShape(S8, {2, 3, 5, 24, 2, 8, 4, 4}))
.WithShape(S8, {192, 2, 3, 5, 32}))
.WithConvDnums("b01f_oi01->b01f"))
.WithShape(S8, {10, 20, 30, 6, 32});
ASSERT_THAT(root, GmockMatch(m::Tuple(
m::Reshape(m::Transpose(m::Reshape(conv_pat).WithShape(
S8, {10, 20, 30, 6, 2, 16}))
.WithShape(S8, {10, 20, 30, 6, 2, 16}))
.WithShape(S8, {10, 20, 30, 12, 16}),
m::Op())));
EXPECT_TRUE(conv->backend_config<GpuBackendConfig>()
->cudnn_conv_backend_config()
.reordered_int8_nchw_vect());
}
TEST_F(CudnnVectorizeConvolutionsTest, VectorizeMixedTo32) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,8,8] parameter(0)
filter = s8[3,5,2,192,32] parameter(1)
ROOT result = (s8[10,20,30,96,2], u8[0]) custom-call(input, filter),
window={size=3x5}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
auto conv_pat =
m::GetTupleElement(
m::CustomCall(
&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Transpose(m::Reshape(m::Parameter(0))
.WithShape(S8, {10, 20, 30, 2, 4, 8}))
.WithShape(S8, {10, 20, 30, 2, 4, 8}))
.WithShape(S8, {10, 20, 30, 2, 32}),
m::Reshape(
m::Transpose(m::Reshape(m::Parameter(1))
.WithShape(S8, {3, 5, 2, 24, 4, 2, 8, 4}))
.WithShape(S8, {2, 3, 5, 24, 2, 8, 4, 4}))
.WithShape(S8, {192, 2, 3, 5, 32}))
.WithConvDnums("b01f_oi01->b01f"))
.WithShape(S8, {10, 20, 30, 6, 32});
ASSERT_THAT(root, GmockMatch(m::Tuple(
m::Reshape(m::Transpose(m::Reshape(conv_pat).WithShape(
S8, {10, 20, 30, 6, 16, 2}))
.WithShape(S8, {10, 20, 30, 6, 16, 2}))
.WithShape(S8, {10, 20, 30, 96, 2}),
m::Op())));
EXPECT_TRUE(conv->backend_config<GpuBackendConfig>()
->cudnn_conv_backend_config()
.reordered_int8_nchw_vect());
}
}
}
} |
2,047 | cpp | tensorflow/tensorflow | triton_support | third_party/xla/xla/service/gpu/fusions/triton/triton_support.cc | third_party/xla/xla/service/gpu/fusions/triton/triton_support_test.cc | #ifndef XLA_SERVICE_GPU_TRITON_SUPPORT_H_
#define XLA_SERVICE_GPU_TRITON_SUPPORT_H_
#include <vector>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/instruction_fusion.h"
#include "xla/stream_executor/device_description.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
using CodegenDecision = FusionDecision;
namespace legacy_triton {
bool IsDistributiveOverAddition(const HloInstruction& hlo);
std::vector<HloOpcode> TritonSupportedUnaryElementwiseUpToFloatNormalization(
PrimitiveType);
std::vector<HloOpcode> TritonSupportedBinaryElementwiseUpToFloatNormalization(
PrimitiveType);
std::vector<HloOpcode> TritonSupportedTernaryElementwiseUpToFloatNormalization(
PrimitiveType);
bool IsTritonSupportedDataType(PrimitiveType, const se::GpuComputeCapability&);
bool IsTritonSupportedElementwiseUpToFloatNormalization(HloOpcode,
PrimitiveType);
CodegenDecision CanTritonHandleGEMM(
const HloDotInstruction& dot, const se::GpuComputeCapability& gpu_version);
CodegenDecision IsTritonSupportedInstruction(
const HloInstruction& instr, const se::GpuComputeCapability& gpu_version);
CodegenDecision IsTritonSupportedDynamicSlice(
const HloDynamicSliceInstruction& instr);
}
CodegenDecision IsTritonSupportedInstruction(
const HloInstruction& instr, const se::GpuComputeCapability& gpu_version);
}
}
#endif
#include "xla/service/gpu/triton_support.h"
#include <cstdint>
#include <iterator>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/service/gpu/variant_visitor.h"
#include "xla/stream_executor/device_description.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/tensor_float_32_utils.h"
namespace xla {
namespace gpu {
namespace legacy_triton {
bool IsDistributiveOverAddition(const HloInstruction& hlo) {
if (hlo.opcode() == HloOpcode::kMultiply ||
hlo.opcode() == HloOpcode::kNegate ||
hlo.opcode() == HloOpcode::kBitcast ||
hlo.opcode() == HloOpcode::kReshape || hlo.opcode() == HloOpcode::kCopy ||
hlo.opcode() == HloOpcode::kTranspose ||
hlo.opcode() == HloOpcode::kConvert ||
hlo.opcode() == HloOpcode::kBroadcast ||
hlo.opcode() == HloOpcode::kSlice) {
return true;
}
return false;
}
bool IsTritonSupportedDotOutputType(
const PrimitiveType t, const se::GpuComputeCapability& gpu_version) {
switch (t) {
case F16:
case F32:
return true;
case F8E5M2:
return std::visit(VariantVisitor{[](const se::CudaComputeCapability& cc) {
return cc.IsAtLeastAmpere();
},
[](const se::RocmComputeCapability& cc) {
return false;
}},
gpu_version);
case F8E4M3FN:
return std::visit(VariantVisitor{[](const se::CudaComputeCapability& cc) {
return cc.IsAtLeastHopper();
},
[](const se::RocmComputeCapability& cc) {
return false;
}},
gpu_version);
case BF16:
return std::visit(VariantVisitor{[](const se::CudaComputeCapability& cc) {
return true;
},
[](const se::RocmComputeCapability& cc) {
return cc.has_bf16_dtype_support();
}},
gpu_version);
default:
return false;
}
};
bool IsTritonSupportedDataType(PrimitiveType type,
const se::GpuComputeCapability& gpu_version) {
if (IsTritonSupportedDotOutputType(type, gpu_version)) {
return true;
}
switch (type) {
case PRED:
case S8:
case S16:
case S32:
return true;
default:
return false;
}
}
std::vector<HloOpcode> TritonSupportedUnaryElementwiseUpToFloatNormalization(
PrimitiveType element_type) {
std::vector<HloOpcode> ret = {HloOpcode::kConvert};
if (element_type == PrimitiveType::PRED) {
ret.push_back(HloOpcode::kNot);
return ret;
}
ret.push_back(HloOpcode::kAbs);
ret.push_back(HloOpcode::kNegate);
if (element_type == PrimitiveType::F32 ||
element_type == PrimitiveType::BF16 ||
element_type == PrimitiveType::F64) {
absl::c_copy(std::vector<HloOpcode>{HloOpcode::kCos, HloOpcode::kExp,
HloOpcode::kExpm1, HloOpcode::kFloor,
HloOpcode::kCeil, HloOpcode::kLog,
HloOpcode::kLog1p, HloOpcode::kRsqrt,
HloOpcode::kSin, HloOpcode::kSqrt,
HloOpcode::kCbrt, HloOpcode::kTan,
HloOpcode::kTanh, HloOpcode::kErf},
std::back_inserter(ret));
}
return ret;
}
std::vector<HloOpcode> TritonSupportedBinaryElementwiseUpToFloatNormalization(
PrimitiveType element_type) {
if (element_type == PrimitiveType::PRED) {
return {HloOpcode::kAnd, HloOpcode::kOr, HloOpcode::kXor,
HloOpcode::kCompare};
}
std::vector<HloOpcode> ret = {HloOpcode::kAdd, HloOpcode::kCompare,
HloOpcode::kMaximum, HloOpcode::kMinimum,
HloOpcode::kMultiply, HloOpcode::kSubtract};
if (element_type == PrimitiveType::F32 ||
element_type == PrimitiveType::BF16 ||
element_type == PrimitiveType::F64) {
ret.push_back(HloOpcode::kAtan2);
ret.push_back(HloOpcode::kDivide);
ret.push_back(HloOpcode::kPower);
}
return ret;
}
std::vector<HloOpcode> TritonSupportedTernaryElementwiseUpToFloatNormalization(
PrimitiveType element_type) {
return {HloOpcode::kSelect, HloOpcode::kClamp};
}
bool IsTritonSupportedElementwiseUpToFloatNormalization(
HloOpcode opcode, PrimitiveType element_type) {
return absl::c_linear_search(
TritonSupportedUnaryElementwiseUpToFloatNormalization(
element_type),
opcode) ||
absl::c_linear_search(
TritonSupportedBinaryElementwiseUpToFloatNormalization(
element_type),
opcode) ||
absl::c_linear_search(
TritonSupportedTernaryElementwiseUpToFloatNormalization(
element_type),
opcode);
}
CodegenDecision CanTritonHandleElementwise(
const HloInstruction& instr, const se::GpuComputeCapability& gpu_version) {
if (!IsTritonSupportedDataType(instr.shape().element_type(), gpu_version)) {
return "Unsupported output data type.";
}
for (const HloInstruction* operand : instr.operands()) {
if (!IsTritonSupportedDataType(operand->shape().element_type(),
gpu_version)) {
return "Unsupported input data type.";
}
}
if (instr.opcode() == HloOpcode::kConstant) {
return CodegenDecision{};
} else if (!IsTritonSupportedElementwiseUpToFloatNormalization(
instr.opcode(), instr.operand(0)->shape().element_type())) {
return "Unsupported elementwise operation.";
}
return CodegenDecision{};
}
bool IsDotAlgorithmSupportedByTriton(
PrecisionConfig::Algorithm algorithm,
const se::GpuComputeCapability& gpu_version) {
auto cuda_compute_capability =
std::get_if<se::CudaComputeCapability>(&gpu_version);
auto rocm_compute_capability =
std::get_if<se::RocmComputeCapability>(&gpu_version);
switch (algorithm) {
case PrecisionConfig::ALG_DOT_TF32_TF32_F32:
if (cuda_compute_capability) {
return true;
}
return false;
case PrecisionConfig::ALG_DOT_BF16_BF16_F32:
case PrecisionConfig::ALG_DOT_BF16_BF16_F32_X3:
case PrecisionConfig::ALG_DOT_BF16_BF16_F32_X6:
if (cuda_compute_capability) {
return true;
}
if (rocm_compute_capability) {
return rocm_compute_capability->has_bf16_dtype_support();
}
return false;
case PrecisionConfig::ALG_DOT_F16_F16_F32:
case PrecisionConfig::ALG_DOT_F32_F32_F32:
default:
return false;
}
}
CodegenDecision CanTritonHandleGEMM(
const HloDotInstruction& dot, const se::GpuComputeCapability& gpu_version) {
auto cuda_compute_capability =
std::get_if<se::CudaComputeCapability>(&gpu_version);
auto rocm_compute_capability =
std::get_if<se::RocmComputeCapability>(&gpu_version);
CHECK(cuda_compute_capability || rocm_compute_capability);
if (dot.precision_config().algorithm() == PrecisionConfig::ALG_UNSET) {
if (!tsl::tensor_float_32_execution_enabled() ||
absl::c_any_of(dot.precision_config().operand_precision(),
[](int x) { return x != PrecisionConfig::DEFAULT; })) {
return "Having non-default operand precisions or TensorFloat-32 disabled "
"for Dot op with unset algorithm.";
}
} else {
if (!IsDotAlgorithmSupportedByTriton(dot.precision_config().algorithm(),
gpu_version)) {
return "Unsupported algorithm on the current device(s).";
}
}
if (!IsTritonSupportedDotOutputType(dot.shape().element_type(),
gpu_version)) {
return "Unsupported output data type for Dot op.";
}
if (!IsTritonSupportedDataType(dot.operand(0)->shape().element_type(),
gpu_version) ||
!IsTritonSupportedDataType(dot.operand(1)->shape().element_type(),
gpu_version)) {
return "Unsupported input data type for Dot op.";
}
const DotDimensionNumbers& dim_numbers = dot.dot_dimension_numbers();
if (dim_numbers.lhs_batch_dimensions().size() > 1) {
return "Multiple batch dimensions.";
}
return CodegenDecision{};
}
CodegenDecision CanTritonHandleReduce(
const HloReduceInstruction& reduce,
const se::GpuComputeCapability& gpu_version) {
if (!IsTritonSupportedDataType(reduce.shape().element_type(), gpu_version)) {
return "Unsupported output data type for Reduce op.";
}
for (const HloInstruction* operand : reduce.operands()) {
if (!IsTritonSupportedDataType(operand->shape().element_type(),
gpu_version)) {
return "Unsupported input data type for Reduce op.";
}
}
bool is_triton_supported_reduction_computation = [&]() {
return absl::c_all_of(
reduce.to_apply()->instructions(), [&](const HloInstruction* instr) {
return IsTritonSupportedInstruction(*instr, gpu_version);
});
}();
if (!is_triton_supported_reduction_computation) {
return "Unsupported reduction computation by Triton.";
}
if (reduce.dimensions().size() == 1 &&
reduce.dimensions().front() == reduce.operand(0)->shape().rank() - 1 &&
reduce.operand_count() == 2) {
const HloInstruction* operand = reduce.operand(1);
if (operand->opcode() == HloOpcode::kConvert) {
if (operand->operand(0)->opcode() == HloOpcode::kConstant &&
operand->operand(0)->shape().element_type() == BF16 &&
operand->shape().element_type() == F32) {
return CodegenDecision{};
}
} else if (operand->opcode() == HloOpcode::kConstant) {
return CodegenDecision{};
}
return "Reduction init value should be a constant or a convert of a "
"constant.";
}
return "Reduction is not a row-reduction of a single operand.";
}
bool NoNonContractingDimension(const HloDotInstruction& dot) {
const DotDimensionNumbers& dim_numbers = dot.dot_dimension_numbers();
if (dim_numbers.lhs_batch_dimensions().size() +
dim_numbers.lhs_contracting_dimensions().size() ==
dot.operand(0)->shape().rank() ||
dim_numbers.rhs_batch_dimensions().size() +
dim_numbers.rhs_contracting_dimensions().size() ==
dot.operand(1)->shape().rank()) {
return true;
}
return false;
}
CodegenDecision IsTritonSupportedDynamicSlice(
const HloDynamicSliceInstruction& instr) {
for (const HloInstruction* index_operand : instr.index_operands()) {
switch (index_operand->shape().element_type()) {
case S8:
case S16:
case S32:
break;
default:
return CodegenDecision(
"Dynamic slice is only supported with S8, S16, or S32 indices.");
}
}
const HloInstruction* input = instr.operand(0);
Layout in_layout = input->shape().layout();
int64_t majormost_dim_id =
in_layout.minor_to_major(in_layout.minor_to_major_size() - 1);
for (int i = 0; i < input->shape().dimensions_size(); ++i) {
if (i == majormost_dim_id) {
continue;
} else if (input->shape().dimensions(i) != instr.slice_sizes(i)) {
return CodegenDecision(
"Unsupported dynamic slice on non-major-most dimension.");
}
}
return CodegenDecision{};
}
CodegenDecision IsTritonSupportedInstruction(
const HloInstruction& instr, const se::GpuComputeCapability& gpu_version) {
if (instr.IsElementwise()) {
return CanTritonHandleElementwise(instr, gpu_version);
}
switch (instr.opcode()) {
case HloOpcode::kDot: {
auto* dot = Cast<HloDotInstruction>(&instr);
if (NoNonContractingDimension(*dot)) {
return "No non-contracting dimensions.";
}
return CanTritonHandleGEMM(*dot, gpu_version);
}
case HloOpcode::kReduce: {
return CanTritonHandleReduce(*Cast<HloReduceInstruction>(&instr),
gpu_version);
}
case HloOpcode::kTuple: {
if (instr.IsRoot()) {
return CodegenDecision{};
}
return "Only supports root tuples.";
}
case HloOpcode::kDynamicSlice: {
return IsTritonSupportedDynamicSlice(
*Cast<HloDynamicSliceInstruction>(&instr));
}
case HloOpcode::kBitcast:
case HloOpcode::kTranspose:
case HloOpcode::kSlice:
case HloOpcode::kReshape:
case HloOpcode::kPad:
case HloOpcode::kConcatenate:
case HloOpcode::kParameter:
case HloOpcode::kBroadcast:
return CodegenDecision{};
default:
break;
}
return "Unsupported opcode.";
}
}
namespace {
absl::flat_hash_set<HloOpcode> TritonSupportedUnaryElementwiseOps(
PrimitiveType element_type) {
if (element_type == PrimitiveType::PRED) {
return {HloOpcode::kConvert, HloOpcode::kNot};
}
absl::flat_hash_set<HloOpcode> ret = {HloOpcode::kConvert, HloOpcode::kAbs,
HloOpcode::kNegate};
if (element_type == PrimitiveType::F32 ||
element_type == PrimitiveType::F64) {
absl::flat_hash_set<HloOpcode> additional_opcodes{
HloOpcode::kCos, HloOpcode::kExp, HloOpcode::kExpm1,
HloOpcode::kFloor, HloOpcode::kCeil, HloOpcode::kLog,
HloOpcode::kLog1p, HloOpcode::kRsqrt, HloOpcode::kSin,
HloOpcode::kSqrt, HloOpcode::kCbrt, HloOpcode::kTan,
HloOpcode::kTanh, HloOpcode::kErf};
ret.insert(additional_opcodes.begin(), additional_opcodes.end());
}
if (element_type == PrimitiveType::BF16 ||
element_type == PrimitiveType::F16) {
absl::flat_hash_set<HloOpcode> additional_opcodes{HloOpcode::kFloor,
HloOpcode::kCeil};
ret.insert(additional_opcodes.begin(), additional_opcodes.end());
}
return ret;
}
absl::flat_hash_set<HloOpcode> TritonSupportedBinaryElementwiseOps(
PrimitiveType element_type) {
if (element_type == PrimitiveType::PRED) {
return {HloOpcode::kAnd, HloOpcode::kOr, HloOpcode::kXor,
HloOpcode::kCompare};
}
absl::flat_hash_set<HloOpcode> ret = {
HloOpcode::kAdd, HloOpcode::kCompare, HloOpcode::kMaximum,
HloOpcode::kMinimum, HloOpcode::kMultiply, HloOpcode::kSubtract};
if (element_type == PrimitiveType::F32 ||
element_type == PrimitiveType::F64) {
absl::flat_hash_set<HloOpcode> additional_opcodes{
HloOpcode::kAtan2, HloOpcode::kDivide, HloOpcode::kPower};
ret.insert(additional_opcodes.begin(), additional_opcodes.end());
}
return ret;
}
absl::flat_hash_set<HloOpcode> TritonSupportedTernaryElementwiseOps(
PrimitiveType element_type) {
return {HloOpcode::kSelect, HloOpcode::kClamp};
}
bool IsTritonSupportedElementwise(HloOpcode opcode,
PrimitiveType element_type) {
return TritonSupportedUnaryElementwiseOps(element_type).contains(opcode) ||
TritonSupportedBinaryElementwiseOps(element_type).contains(opcode) ||
TritonSupportedTernaryElementwiseOps(element_type).contains(opcode);
}
}
CodegenDecision IsTritonSupportedInstruction(
const HloInstruction& instr, const se::GpuComputeCapability& gpu_version) {
bool output_type_is_supported = legacy_triton::IsTritonSupportedDataType(
instr.shape().element_type(), gpu_version);
if (!output_type_is_supported) {
return "Unsupported output data type.";
}
bool input_types_are_supported =
absl::c_all_of(instr.operands(), [&](const HloInstruction* operand) {
return legacy_triton::IsTritonSupportedDataType(
operand->shape().element_type(), gpu_version);
});
if (!input_types_are_supported) {
return "Unsupported input data type.";
}
if (instr.IsElementwise()) {
if (!IsTritonSupportedElementwise(instr.opcode(),
instr.shape().element_type())) {
return "Unsupported elementwise operation.";
}
return CodegenDecision{};
}
switch (instr.opcode()) {
case HloOpcode::kReduce: {
return legacy_triton::CanTritonHandleReduce(
*Cast<HloReduceInstruction>(&instr), gpu_version);
}
case HloOpcode::kTranspose:
case HloOpcode::kSlice:
case HloOpcode::kParameter:
case HloOpcode::kBroadcast:
return CodegenDecision{};
default:
VLOG(1) << "Unsupported instruction: " << instr.ToString();
break;
}
return "Unsupported opcode.";
}
}
} | #include "xla/service/gpu/triton_support.h"
#include <cstdint>
#include <string>
#include <tuple>
#include <utility>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/ir_emitter_triton.h"
#include "xla/service/gpu/model/tiled_hlo_computation.h"
#include "xla/service/gpu/triton_test_utils.h"
#include "xla/stream_executor/device_description.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::Not;
using ::testing::status::IsOk;
auto AllXlaDataTypes() {
std::vector<xla::PrimitiveType> xla_data_types;
std::vector<xla::PrimitiveType> to_filter_out = {PRIMITIVE_TYPE_INVALID,
TUPLE, OPAQUE_TYPE, TOKEN};
const tsl::protobuf::EnumDescriptor* xla_type_descriptor =
tsl::protobuf::GetEnumDescriptor<xla::PrimitiveType>();
for (int enum_ix = 0; enum_ix < xla_type_descriptor->value_count();
++enum_ix) {
xla::PrimitiveType xla_type = static_cast<xla::PrimitiveType>(
xla_type_descriptor->value(enum_ix)->number());
if (!absl::c_linear_search(to_filter_out, xla_type)) {
xla_data_types.push_back(xla_type);
}
}
return ::testing::ValuesIn(xla_data_types);
}
auto AllDevicesToTest() {
using cc = se::GpuComputeCapability;
#ifdef TENSORFLOW_USE_ROCM
se::RocmComputeCapability example_rocm_compute_capability =
TestGpuDeviceInfo::AMDMI210DeviceInfo().rocm_compute_capability();
return ::testing::Values(cc(example_rocm_compute_capability));
#else
return ::testing::Values(cc(se::CudaComputeCapability::Ampere()),
cc(se::CudaComputeCapability::Hopper()));
#endif
}
auto AllTestCombinationsForOpcodes(std::vector<HloOpcode>&& opcodes) {
return ::testing::Combine(AllXlaDataTypes(), ::testing::ValuesIn(opcodes),
AllDevicesToTest());
}
class TritonSupportTest : public TritonSupportTestBase {
public:
void RunSupportTest(TestedInstruction ti,
std::vector<int64_t> output_tile_sizes,
se::GpuComputeCapability cc,
bool skip_failure_branch_to_avoid_crash = false) {
BlockLevelParameters block_level_parameters =
FromOutputTileSizes(std::move(output_tile_sizes));
const se::DeviceDescription dev_info =
std::holds_alternative<se::CudaComputeCapability>(cc)
? TestGpuDeviceInfo::RTXA6000DeviceInfo(cc)
: TestGpuDeviceInfo::AMDMI210DeviceInfo();
if (IsTritonSupportedInstruction(ti.Instruction(), cc)) {
EXPECT_THAT(
TritonWrapper("test_fn", &ti.TritonFusion(), cc, dev_info,
block_level_parameters, &llvm_module_, mlir_context_),
IsOk());
} else {
if (!skip_failure_branch_to_avoid_crash) {
EXPECT_THAT(
TritonWrapper("test_fn", &ti.TritonFusion(), cc, dev_info,
block_level_parameters, &llvm_module_, mlir_context_),
Not(IsOk()));
}
}
}
};
class TritonSupportTestWithParam
: public TritonSupportTest,
public ::testing::WithParamInterface<
std::tuple<PrimitiveType, HloOpcode, se::GpuComputeCapability>> {};
using BitcastOrReshapeTest = TritonSupportTestWithParam;
TEST_P(BitcastOrReshapeTest, IsTritonSupportedBitcastOrReshape) {
auto [data_type, opcode, cc] = GetParam();
const std::string kHloTestTemplate = R"(
ENTRY triton_computation {
parameter_0 = $0[1,16,4]{2,1,0} parameter(0)
ROOT bitcast_or_reshape = $0[64]{0} $1(parameter_0)
})";
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
RunSupportTest(std::move(ti), {16}, cc);
}
INSTANTIATE_TEST_SUITE_P(BitcastOrReshapeTestSuite, BitcastOrReshapeTest,
AllTestCombinationsForOpcodes({HloOpcode::kBitcast,
HloOpcode::kReshape}),
TritonSupportTestTypeOpcodeAndDeviceToString);
using UnaryElementwiseTest = TritonSupportTestWithParam;
TEST_P(UnaryElementwiseTest, IsTritonSupportedUnaryElementwise) {
auto [data_type, opcode, cc] = GetParam();
const std::string kHloTestTemplate = R"(
ENTRY triton_computation {
parameter_0 = $0[33,68]{1,0} parameter(0)
unary = $0[33,68]{1,0} $1(parameter_0)
ROOT convert = f32[33,68]{1,0} convert(unary)
})";
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
RunSupportTest(std::move(ti), {1, 32}, cc);
}
INSTANTIATE_TEST_SUITE_P(
UnaryElementwiseTestSuite, UnaryElementwiseTest,
::testing::Combine(::testing::Values(S8, S16, S32, F16, F32, BF16),
::testing::Values(HloOpcode::kConvert, HloOpcode::kAbs,
HloOpcode::kNegate),
AllDevicesToTest()),
TritonSupportTestTypeOpcodeAndDeviceToString);
INSTANTIATE_TEST_SUITE_P(
UnaryPREDTestSuite, UnaryElementwiseTest,
::testing::Combine(::testing::Values(PRED),
::testing::Values(HloOpcode::kConvert, HloOpcode::kNot),
AllDevicesToTest()),
TritonSupportTestTypeOpcodeAndDeviceToString);
INSTANTIATE_TEST_SUITE_P(
UnaryMathTestSuite, UnaryElementwiseTest,
::testing::Combine(::testing::Values(F16, F32, BF16),
::testing::Values(HloOpcode::kCeil, HloOpcode::kCos,
HloOpcode::kExp, HloOpcode::kExpm1,
HloOpcode::kFloor, HloOpcode::kLog,
HloOpcode::kLog1p, HloOpcode::kRsqrt,
HloOpcode::kSin, HloOpcode::kSqrt,
HloOpcode::kCbrt, HloOpcode::kTan,
HloOpcode::kTanh, HloOpcode::kErf),
AllDevicesToTest()),
TritonSupportTestTypeOpcodeAndDeviceToString);
using BinaryElementwiseTest = TritonSupportTestWithParam;
TEST_P(BinaryElementwiseTest, IsTritonSupportedBinaryElementwise) {
auto [data_type, opcode, cc] = GetParam();
const std::string kHloTestTemplate = R"(
ENTRY triton_computation {
parameter_0 = $0[11,63]{1,0} parameter(0)
parameter_1 = $0[11,63]{1,0} parameter(1)
ROOT binary = $0[11,63]{1,0} $1(parameter_0, parameter_1)
})";
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
bool skip_failure_branch_to_avoid_crash = false;
if (primitive_util::BitWidth(data_type) == 16 &&
opcode == HloOpcode::kDivide) {
skip_failure_branch_to_avoid_crash = true;
}
RunSupportTest(std::move(ti), {1, 32}, cc,
skip_failure_branch_to_avoid_crash);
}
INSTANTIATE_TEST_SUITE_P(
BinaryElementwiseTestSuite, BinaryElementwiseTest,
::testing::Combine(::testing::Values(S8, S16, S32, F16, F32, BF16),
::testing::Values(HloOpcode::kAdd, HloOpcode::kMultiply,
HloOpcode::kMaximum,
HloOpcode::kMinimum,
HloOpcode::kSubtract),
AllDevicesToTest()),
TritonSupportTestTypeOpcodeAndDeviceToString);
INSTANTIATE_TEST_SUITE_P(BinaryPREDTestSuite, BinaryElementwiseTest,
::testing::Combine(::testing::Values(PRED),
::testing::Values(HloOpcode::kAnd,
HloOpcode::kOr,
HloOpcode::kXor),
AllDevicesToTest()),
TritonSupportTestTypeOpcodeAndDeviceToString);
INSTANTIATE_TEST_SUITE_P(
BinaryMathTestSuite, BinaryElementwiseTest,
::testing::Combine(::testing::Values(F16, F32, BF16),
::testing::Values(HloOpcode::kAtan2, HloOpcode::kDivide,
HloOpcode::kPower),
AllDevicesToTest()),
TritonSupportTestTypeOpcodeAndDeviceToString);
using CompareTest = TritonSupportTestWithParam;
TEST_P(CompareTest, IsTritonSupportedCompare) {
auto [data_type, opcode, cc] = GetParam();
const std::string kHloTestTemplate = R"(
ENTRY triton_computation {
parameter_0 = $0[11,63]{1,0} parameter(0)
parameter_1 = $0[11,63]{1,0} parameter(1)
compare = pred[11,63]{1,0} $1(parameter_0, parameter_1), direction=GE
ROOT convert = f32[11,63]{1,0} convert(compare)
})";
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
RunSupportTest(std::move(ti), {1, 32}, cc);
}
INSTANTIATE_TEST_SUITE_P(
CompareTestSuite, CompareTest,
::testing::Combine(::testing::Values(PRED, S8, S16, S32, F16, F32, BF16),
::testing::Values(HloOpcode::kCompare),
AllDevicesToTest()),
TritonSupportTestTypeOpcodeAndDeviceToString);
using TernaryElementwiseTest = TritonSupportTestWithParam;
TEST_P(TernaryElementwiseTest, IsTritonSupportedTernaryElementwise) {
auto [data_type, opcode, cc] = GetParam();
const std::string kHloTestTemplate = R"(
ENTRY triton_computation {
parameter_0 = $0[13,63]{1,0} parameter(0)
parameter_1 = $0[13,63]{1,0} parameter(1)
parameter_2 = pred[13,63]{1,0} parameter(2)
ternary = $0[13,63]{1,0} $1(parameter_2, parameter_0, parameter_1)
ROOT convert = f32[13,63]{1,0} convert(ternary)
})";
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
RunSupportTest(std::move(ti), {1, 32}, cc);
}
INSTANTIATE_TEST_SUITE_P(
TernaryElementwiseTestSuite, TernaryElementwiseTest,
::testing::Combine(::testing::Values(PRED, S8, S16, S32, F16, F32, BF16),
::testing::Values(HloOpcode::kSelect),
AllDevicesToTest()),
TritonSupportTestTypeOpcodeAndDeviceToString);
using ReduceTest = TritonSupportTestWithParam;
TEST_P(ReduceTest, IsTritonSupportedReduction) {
GTEST_SKIP() << "TODO(b/348565795): this test is currently broken.";
auto [data_type, opcode, cc] = GetParam();
bool dtype_is_complex = data_type == C64 || data_type == C128;
const std::string kHloTestTemplate =
absl::Substitute(R"(
add {
Arg_0 = $0[] parameter(0)
Arg_1 = $0[] parameter(1)
ROOT add = $0[] add(Arg_0, Arg_1)
}
ENTRY triton_computation {
parameter_0 = $0[125,127]{1,0} parameter(0)
constant_0 = $0[] constant($1)
ROOT reduce = $0[125]{0} reduce(parameter_0, constant_0),
dimensions={1}, to_apply=add
})",
"$0", dtype_is_complex ? "(0, 0)" : "0");
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
RunSupportTest(std::move(ti), {1}, cc);
}
TEST_P(
ReduceTest,
UnsupportedReduceWithMoreThanOneReduceDimensionsFailsGracefullyWithTriton) {
auto [data_type, opcode, cc] = GetParam();
bool dtype_is_complex = data_type == C64 || data_type == C128;
const std::string kHloTestTemplate =
absl::Substitute(R"(
add {
Arg_0 = $0[] parameter(0)
Arg_1 = $0[] parameter(1)
ROOT add = $0[] add(Arg_0, Arg_1)
}
ENTRY triton_computation {
parameter_0 = $0[2,125,127]{2,1,0} parameter(0)
constant_0 = $0[] constant($1)
ROOT reduce = $0[2]{0} reduce(parameter_0, constant_0),
dimensions={1,2}, to_apply=add
})",
"$0", dtype_is_complex ? "(0, 0)" : "0");
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc));
RunSupportTest(std::move(ti), {1}, cc);
}
TEST_P(ReduceTest,
UnsupportedReduceWithNonLastReduceDimensionFailsGracefullyWithTriton) {
auto [data_type, opcode, cc] = GetParam();
bool dtype_is_complex = data_type == C64 || data_type == C128;
const std::string kHloTestTemplate =
absl::Substitute(R"(
add {
Arg_0 = $0[] parameter(0)
Arg_1 = $0[] parameter(1)
ROOT add = $0[] add(Arg_0, Arg_1)
}
ENTRY triton_computation {
parameter_0 = $0[125,127]{1,0} parameter(0)
constant_0 = $0[] constant($1)
ROOT reduce = $0[127]{0} reduce(parameter_0, constant_0), dimensions={0}, to_apply=add
})",
"$0", dtype_is_complex ? "(0, 0)" : "0");
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc));
RunSupportTest(std::move(ti), {1}, cc);
}
TEST_P(ReduceTest,
UnsupportedReduceWithMoreThanOneOperandsFailsGracefullyWithTriton) {
auto [data_type, opcode, cc] = GetParam();
bool dtype_is_complex = data_type == C64 || data_type == C128;
const std::string kHloTestTemplate =
absl::Substitute(R"(
add {
Arg_0 = $0[] parameter(0)
Arg_1 = $0[] parameter(1)
Arg_2 = $0[] parameter(2)
Arg_3 = $0[] parameter(3)
add_0 = $0[] add(Arg_0, Arg_2)
add_1 = $0[] add(Arg_1, Arg_3)
ROOT pair = ($0[], $0[]) tuple(add_0, add_1)
}
ENTRY triton_computation {
parameter_0 = $0[125,127] parameter(0)
constant_0 = $0[] constant($1)
tuple = ($0[125]{0}, $0[125]{0}) reduce(
parameter_0, parameter_0, constant_0, constant_0),
dimensions={1}, to_apply=add
ROOT reduce = $0[125]{0} get-tuple-element(tuple), index=0
})",
"$0", dtype_is_complex ? "(0, 0)" : "0");
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc));
RunSupportTest(std::move(ti), {1}, cc);
}
TEST_P(ReduceTest,
UnsupportedReduceWithNonConstReduceValueFailsGracefullyWithTriton) {
auto [data_type, opcode, cc] = GetParam();
const std::string kHloTestTemplate = R"(
add {
Arg_0 = $0[] parameter(0)
Arg_1 = $0[] parameter(1)
ROOT add = $0[] add(Arg_0, Arg_1)
}
ENTRY triton_computation {
parameter_0 = $0[125,127]{1,0} parameter(0)
init = $0[] parameter(1)
ROOT reduce = $0[125]{0} reduce(parameter_0, init), dimensions={1}, to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc));
RunSupportTest(std::move(ti), {1}, cc);
}
TEST_P(ReduceTest, UnsupportedReductionComputationFailsGracefullyWithTriton) {
auto [data_type, opcode, cc] = GetParam();
bool dtype_is_complex = data_type == C64 || data_type == C128;
const std::string kHloTestTemplate =
absl::Substitute(R"(
custom_call {
Arg_0 = $0[] parameter(0)
Arg_1 = $0[] parameter(1)
ROOT custom_call = $0[] custom-call(Arg_0, Arg_1), custom_call_target="foo"
}
ENTRY triton_computation {
parameter_0 = $0[125,127]{1,0} parameter(0)
constant_0 = $0[] constant($1)
ROOT reduce = $0[125]{0} reduce(parameter_0, constant_0),
dimensions={1}, to_apply=custom_call
})",
"$0", dtype_is_complex ? "(0, 0)" : "0");
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc));
RunSupportTest(std::move(ti), {1}, cc);
}
INSTANTIATE_TEST_SUITE_P(ReduceTestSuite, ReduceTest,
AllTestCombinationsForOpcodes({HloOpcode::kReduce}),
TritonSupportTestTypeOpcodeAndDeviceToString);
}
}
} |
2,048 | cpp | tensorflow/tensorflow | ir_emitter_triton | null | null | #ifndef XLA_SERVICE_GPU_IR_EMITTER_TRITON_H_
#define XLA_SERVICE_GPU_IR_EMITTER_TRITON_H_
#include <cstdint>
#include <functional>
#include <optional>
#include <string>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/Module.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/ImplicitLocOpBuilder.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Value.h"
#include "mlir/Pass/PassManager.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/model/tiled_hlo_computation.h"
#include "xla/service/gpu/model/tiled_hlo_instruction.h"
#include "xla/service/gpu/triton_fusion_analysis.h"
#include "xla/service/hlo_module_config.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/launch_dim.h"
#include "triton/Dialect/Triton/IR/Dialect.h"
#include "triton/Dialect/TritonNvidiaGPU/Transforms/Passes.h"
namespace xla {
namespace gpu {
namespace mt = ::mlir::triton;
struct TritonWrapperResult {
int64_t shmem_bytes = 0;
std::optional<se::ClusterDim> cluster_dim;
};
absl::Status EmitGeneric(mlir::OpBuilder b, absl::string_view libdevice_path,
const se::DeviceDescription& device_info,
const HloFusionInstruction* fusion,
mlir::triton::FuncOp fn,
const BlockLevelParameters& block_level_parameters);
absl::StatusOr<LaunchDimensions> GetMatMulLaunchDimensions(
const TritonFusionAnalysis& analysis, const HloFusionAdaptor& fusion,
const TritonGemmConfig& config);
absl::Status EmitMatMul(mlir::OpBuilder b, absl::string_view libdevice_path,
const se::DeviceDescription& device_info,
const HloFusionInstruction* fusion,
mlir::triton::FuncOp fn,
const BlockLevelParameters& block_level_parameters);
absl::Status EmitSoftMax(mlir::OpBuilder b, absl::string_view libdevice_path,
const se::DeviceDescription& device_info,
const HloFusionInstruction* fusion,
mlir::triton::FuncOp fn,
const BlockLevelParameters& block_level_parameters);
using TritonIrEmitter = std::function<absl::Status(
mlir::OpBuilder, absl::string_view, const se::DeviceDescription&,
const HloFusionInstruction*, mlir::triton::FuncOp,
const BlockLevelParameters&)>;
void LoadMlirDialectsForTriton(mlir::MLIRContext& mlir_context);
absl::StatusOr<TritonWrapperResult> TritonWrapper(
absl::string_view fn_name, const HloFusionInstruction* fusion,
const se::GpuComputeCapability& cc,
const se::DeviceDescription& device_info,
const BlockLevelParameters& block_level_parameters,
llvm::Module* llvm_module, mlir::MLIRContext& mlir_context);
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> CreateTritonModule(
absl::string_view fn_name, const HloFusionInstruction* fusion,
const se::DeviceDescription& device_info,
const BlockLevelParameters& block_level_parameters,
mlir::MLIRContext& mlir_context);
absl::StatusOr<TritonWrapperResult> CompileTritonToLLVM(
const HloModuleConfig& hlo_config, absl::string_view hlo_module_name,
const se::GpuComputeCapability& cc,
const se::DeviceDescription& device_info,
const BlockLevelParameters& block_level_parameters,
mlir::ModuleOp triton_module, llvm::Module* llvm_module,
mlir::MLIRContext& mlir_context);
absl::Status CreateTritonPipeline(
mlir::OpPassManager& pm, const se::GpuComputeCapability& cc,
const BlockLevelParameters& block_level_parameters,
mt::nvidia_gpu::ClusterInfo& out_cluster_info);
std::string GetLibdevicePath(const HloModuleConfig& hlo_config,
const se::DeviceDescription& device_info);
namespace ir_emitter_triton_internal {
struct MakeTensorPtrOpAndBoundaryChecks {
mt::MakeTensorPtrOp op;
llvm::SmallVector<int32_t> boundary_checks;
};
MakeTensorPtrOpAndBoundaryChecks CreateMakeTensorPtrOp(
mlir::ImplicitLocOpBuilder& b, mlir::Value pid,
const TiledHloInstruction& tiled_hlo, mlir::Value argument_block);
}
}
}
#endif
#include "xla/service/gpu/ir_emitter_triton.h"
#include <array>
#include <climits>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <limits>
#include <memory>
#include <optional>
#include <queue>
#include <string>
#include <system_error>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/Linker/Linker.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/TargetParser/Triple.h"
#include "mlir/Conversion/AffineToStandard/AffineToStandard.h"
#include "mlir/Conversion/ArithToLLVM/ArithToLLVM.h"
#include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h"
#include "mlir/Conversion/IndexToLLVM/IndexToLLVM.h"
#include "mlir/Conversion/SCFToControlFlow/SCFToControlFlow.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/Extensions/InlinerExtension.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/Dialect/LLVMIR/LLVMTypes.h"
#include "mlir/Dialect/LLVMIR/NVVMDialect.h"
#include "mlir/Dialect/Math/IR/Math.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/ExecutionEngine/OptUtils.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/ImplicitLocOpBuilder.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/IR/ValueRange.h"
#include "mlir/IR/Verifier.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Support/TypeID.h"
#include "mlir/Target/LLVMIR/Dialect/Builtin/BuiltinToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Dialect/NVVM/NVVMToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Dialect/ROCDL/ROCDLToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Export.h"
#include "mlir/Transforms/Passes.h"
#include "xla/autotuning.pb.h"
#include "xla/comparison_util.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/mlir_hlo/mhlo/transforms/map_mhlo_to_scalar_op.h"
#include "xla/primitive_util.h"
#include "xla/service/algorithm_util.h"
#include "xla/service/dump.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/fusions/mlir/elemental_hlo_to_mlir.h"
#include "xla/service/gpu/fusions/mlir/ir/xla_gpu_ops.h"
#include "xla/service/gpu/fusions/mlir/passes.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/model/indexing_analysis.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/gpu/model/symbolic_tile_analysis.h"
#include "xla/service/gpu/model/tiled_hlo_computation.h"
#include "xla/service/gpu/model/tiled_hlo_instruction.h"
#include "xla/service/gpu/target_util.h"
#include "xla/service/gpu/triton_fusion_analysis.h"
#include "xla/service/gpu/triton_tiling_propagation.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/instruction_fusion.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/translate/hlo_to_mhlo/hlo_function_importer.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/tensor_float_32_utils.h"
#include "triton/Conversion/TritonGPUToLLVM/Passes.h"
#include "triton/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.h"
#include "triton/Dialect/Triton/IR/Dialect.h"
#include "triton/Dialect/Triton/IR/Types.h"
#include "triton/Dialect/TritonGPU/IR/Dialect.h"
#include "triton/Dialect/TritonNvidiaGPU/Transforms/Passes.h"
namespace xla {
namespace gpu {
namespace ma = ::mlir::arith;
namespace mm = ::mlir::math;
namespace ml = ::mlir::LLVM;
namespace mn = ::mlir::NVVM;
namespace mt = ::mlir::triton;
using ::llvm::SmallVector;
using mlir::ArrayRef;
using mlir::ImplicitLocOpBuilder;
using ::mlir::ShapedType;
using ::mlir::Type;
using ::mlir::Value;
using mlir::ValueRange;
namespace {
absl::StatusOr<Type> TritonType(mlir::OpBuilder b, PrimitiveType t) {
switch (t) {
case F64:
return b.getF64Type();
case F32:
return b.getF32Type();
case F16:
return b.getF16Type();
case BF16:
return b.getBF16Type();
case S64:
return b.getI64Type();
case S32:
return b.getI32Type();
case S16:
return b.getI16Type();
case PRED:
return b.getI1Type();
case S8:
return b.getI8Type();
case F8E5M2:
return b.getFloat8E5M2Type();
case F8E4M3FN:
return b.getFloat8E4M3FNUZType();
default:
return absl::UnimplementedError(
absl::StrCat("This type is not supported yet: ",
primitive_util::LowercasePrimitiveTypeName(t)));
}
}
Type StorageType(mlir::OpBuilder b, Type t) {
if (t.isInteger(1)) {
return b.getI8Type();
}
return t;
}
template <typename T>
T ScalarConstantValue(const HloInstruction& instr, PrimitiveType dst_type) {
CHECK(hlo_query::IsScalarConstant(&instr));
absl::StatusOr<Literal> converted = instr.literal().Convert(dst_type);
TF_CHECK_OK(converted.status());
return converted.value().GetFirstElement<T>();
}
template <typename T>
ma::ConstantOp CreateConst(ImplicitLocOpBuilder b, Type type, T value) {
if (mlir::isa<mlir::IntegerType>(type)) {
return b.create<ma::ConstantOp>(b.getIntegerAttr(type, value));
}
if (mlir::isa<mlir::FloatType>(type)) {
return b.create<ma::ConstantOp>(
b.getFloatAttr(type, static_cast<double>(value)));
}
LOG(FATAL) << "Constant type not supported: " << llvm_ir::DumpToString(type);
}
template <typename T>
ma::ConstantOp CreateConst(ImplicitLocOpBuilder& b, Type type, T value,
ArrayRef<int64_t> shape) {
auto tensor_type = mlir::RankedTensorType::get(shape, type);
if (auto int_type = mlir::dyn_cast<mlir::IntegerType>(type)) {
return b.create<ma::ConstantOp>(mlir::DenseElementsAttr::get(
tensor_type, mlir::APInt(int_type.getIntOrFloatBitWidth(), value)));
}
if (auto float_type = mlir::dyn_cast<mlir::FloatType>(type)) {
return b.create<ma::ConstantOp>(mlir::DenseElementsAttr::get(
tensor_type, b.getFloatAttr(type, static_cast<double>(value))));
}
LOG(FATAL) << "Constant type not supported: " << llvm_ir::DumpToString(type);
}
Value ZerosLike(ImplicitLocOpBuilder& b, Value x) {
if (auto src_shaped_ty = mlir::dyn_cast<ShapedType>(x.getType())) {
Type src_ty = src_shaped_ty.getElementType();
return CreateConst(b, src_ty, 0, src_shaped_ty.getShape());
}
return CreateConst(b, x.getType(), 0);
}
Value OnesLike(ImplicitLocOpBuilder& b, Value x) {
if (auto src_shaped_ty = mlir::dyn_cast<ShapedType>(x.getType())) {
Type src_ty = src_shaped_ty.getElementType();
return CreateConst(b, src_ty, 1, src_shaped_ty.getShape());
}
return CreateConst(b, x.getType(), 1);
}
bool IsFp8Type(Type t) {
return t.isFloat8E5M2() || t.isFloat8E4M3FN() || t.isFloat8E5M2FNUZ() ||
t.isFloat8E4M3FNUZ() || t.isFloat8E4M3B11FNUZ();
}
Value Cast(ImplicitLocOpBuilder& b, Value value, Type dst_element_ty) {
Type src_ty = value.getType();
Type src_element_ty = src_ty;
Type fp32_ty = b.getF32Type();
Type dst_ty = dst_element_ty;
if (auto src_shaped_ty = mlir::dyn_cast<ShapedType>(src_ty)) {
src_element_ty = src_shaped_ty.getElementType();
dst_ty = src_shaped_ty.clone(src_shaped_ty.getShape(), dst_element_ty);
fp32_ty = src_shaped_ty.clone(src_shaped_ty.getShape(), b.getF32Type());
}
if (src_ty == dst_ty) {
return value;
}
if (src_element_ty.isBF16()) {
return Cast(b, b.create<ma::ExtFOp>(fp32_ty, value), dst_element_ty);
}
if (dst_element_ty.isBF16()) {
if (!src_element_ty.isInteger(8)) {
return b.create<ma::TruncFOp>(dst_ty, Cast(b, value, b.getF32Type()));
}
}
auto src_fp_element_ty = mlir::dyn_cast<mlir::FloatType>(src_element_ty);
auto dst_fp_element_ty = mlir::dyn_cast<mlir::FloatType>(dst_element_ty);
if (src_fp_element_ty && dst_fp_element_ty) {
if (IsFp8Type(src_element_ty)) {
return b.create<mt::FpToFpOp>(dst_ty, value);
}
if (IsFp8Type(dst_element_ty)) {
return b.create<mt::FpToFpOp>(
dst_ty, value,
mt::RoundingModeAttr::get(b.getContext(), mt::RoundingMode::RTNE));
}
if (src_fp_element_ty.getFPMantissaWidth() >
dst_fp_element_ty.getFPMantissaWidth()) {
return b.create<ma::TruncFOp>(dst_ty, value);
} else {
return b.create<ma::ExtFOp>(dst_ty, value);
}
}
if (mlir::isa<mlir::IntegerType>(src_element_ty) &&
mlir::isa<mlir::IntegerType>(dst_element_ty)) {
if (src_element_ty.getIntOrFloatBitWidth() <
dst_element_ty.getIntOrFloatBitWidth()) {
if (src_element_ty.isInteger(1)) {
return b.create<ma::ExtUIOp>(dst_ty, value);
}
return b.create<ma::ExtSIOp>(dst_ty, value);
}
return b.create<ma::TruncIOp>(dst_ty, value);
}
if (mlir::isa<mlir::IntegerType>(src_element_ty) && dst_fp_element_ty) {
if (src_element_ty.isInteger(1)) {
return b.create<ma::UIToFPOp>(dst_ty, value);
}
return b.create<ma::SIToFPOp>(dst_ty, value);
}
if (src_fp_element_ty && mlir::isa<mlir::IntegerType>(dst_element_ty)) {
if (dst_element_ty.isInteger(1)) {
return b.create<ma::CmpFOp>(ma::CmpFPredicate::UNE, value,
ZerosLike(b, value));
}
return b.create<ma::FPToSIOp>(dst_ty, value);
}
LOG(FATAL) << "Type conversion not supported: "
<< llvm_ir::DumpToString(src_element_ty) << " -> "
<< llvm_ir::DumpToString(dst_element_ty);
}
Value Subtract(ImplicitLocOpBuilder& b, ValueRange values) {
if (mlir::isa<mlir::IntegerType>(mlir::getElementTypeOrSelf(values[0]))) {
return b.create<ma::SubIOp>(values[0], values[1]);
} else {
return b.create<ma::SubFOp>(values[0], values[1]);
}
}
Value Compare(ImplicitLocOpBuilder& b, ValueRange values,
mlir::mhlo::ComparisonDirection direction) {
const Type type = mlir::getElementTypeOrSelf(values[0]);
if (mlir::isa<mlir::IntegerType>(type)) {
return b.create<ma::CmpIOp>(
mlir::mhlo::impl::getCmpPredicate<ma::CmpIPredicate>(
direction,
!type.isInteger(1))
.value(),
values[0], values[1]);
}
return b.create<ma::CmpFOp>(
mlir::mhlo::impl::getCmpPredicate<ma::CmpFPredicate>(direction,
true)
.value(),
values[0], values[1]);
}
Value Maximum(ImplicitLocOpBuilder& b, const se::DeviceDescription& device_info,
ValueRange values) {
if (mlir::isa<mlir::FloatType>(mlir::getElementTypeOrSelf(values[0]))) {
return b.create<ma::MaximumFOp>(values);
}
Value lhs_is_nan =
Compare(b, {values[0], values[0]}, mlir::mhlo::ComparisonDirection::NE);
Value rhs_is_not_nan =
Compare(b, {values[1], values[1]}, mlir::mhlo::ComparisonDirection::EQ);
Value lhs_is_ge = Compare(b, values, mlir::mhlo::ComparisonDirection::GE);
return b.create<ma::SelectOp>(
b.create<ma::OrIOp>(lhs_is_nan,
b.create<ma::AndIOp>(rhs_is_not_nan, lhs_is_ge)),
values[0], values[1]);
}
Value Minimum(ImplicitLocOpBuilder& b, const se::DeviceDescription& device_info,
ValueRange values) {
if (mlir::isa<mlir::FloatType>(mlir::getElementTypeOrSelf(values[0]))) {
return b.create<ma::MinimumFOp>(values);
}
Value lhs_is_nan =
Compare(b, {values[0], values[0]}, mlir::mhlo::ComparisonDirection::NE);
Value rhs_is_not_nan =
Compare(b, {values[1], values[1]}, mlir::mhlo::ComparisonDirection::EQ);
Value lhs_is_le = Compare(b, values, mlir::mhlo::ComparisonDirection::LE);
return b.create<ma::SelectOp>(
b.create<ma::OrIOp>(lhs_is_nan,
b.create<ma::AndIOp>(rhs_is_not_nan, lhs_is_le)),
values[0], values[1]);
}
Value Splat(ImplicitLocOpBuilder& b, Value value, ArrayRef<int64_t> shape) {
auto type = mlir::RankedTensorType::get(shape, value.getType());
return b.create<mt::SplatOp>(type, value);
}
using TensorValue = mlir::TypedValue<mlir::RankedTensorType>;
Value Broadcast(ImplicitLocOpBuilder& b, TensorValue value,
ArrayRef<int64_t> shape) {
return b.create<mt::BroadcastOp>(value.getType().clone(shape), value);
}
Value Range(ImplicitLocOpBuilder& b, int32_t limit) {
auto type = mlir::RankedTensorType::get(limit, b.getI32Type());
return b.create<mt::MakeRangeOp>(type, 0, limit);
}
Value AddPtr(ImplicitLocOpBuilder& b, Value ptr, Value offset) {
return b.create<mt::AddPtrOp>(ptr.getType(), ptr, offset);
}
absl::StatusOr<Value> EmitElementwise(ImplicitLocOpBuilder& b,
absl::string_view libdevice_path,
const se::DeviceDescription& device_info,
const HloInstruction& hlo,
ValueRange inputs) {
if (mlir::getElementTypeOrSelf(inputs[0]).isF32() ||
mlir::getElementTypeOrSelf(inputs[0]).isF64()) {
auto dev_fn_id = GetTargetDeviceFunctionID(hlo.opcode());
if (dev_fn_id.ok()) {
llvm::Triple triple("nvptx64-unknown-unknown");
if (std::holds_alternative<se::RocmComputeCapability>(
device_info.gpu_compute_capability())) {
triple.setTriple("amdgcn-unknown-unknown");
}
return b.create<mt::ExternElementwiseOp>(
inputs[0].getType(), inputs, "libdevice", libdevice_path,
ObtainDeviceFunctionName(dev_fn_id.value(),
hlo.shape().element_type(), triple),
true);
}
}
const bool is_integer =
mlir::isa<mlir::IntegerType>(mlir::getElementTypeOrSelf(inputs[0]));
switch (hlo.opcode()) {
case HloOpcode::kCopy:
return inputs[0];
case HloOpcode::kAbs:
if (is_integer) {
return b.create<mm::AbsIOp>(inputs[0]);
}
return b.create<mm::AbsFOp>(inputs[0]);
case HloOpcode::kCeil:
return b.create<mm::CeilOp>(inputs[0]);
case HloOpcode::kFloor:
return b.create<mm::FloorOp>(inputs[0]);
case HloOpcode::kNot:
return b.create<ma::XOrIOp>(inputs[0], OnesLike(b, inputs[0]));
case HloOpcode::kNegate:
return Subtract(b, {ZerosLike(b, inputs[0]), inputs[0]});
case HloOpcode::kConvert: {
TF_ASSIGN_OR_RETURN(Type dst_ty,
TritonType(b, hlo.shape().element_type()));
return Cast(b, inputs[0], dst_ty);
}
case HloOpcode::kAdd:
if (is_integer) {
return b.create<ma::AddIOp>(inputs[0], inputs[1]);
}
return b.create<ma::AddFOp>(inputs[0], inputs[1]);
case HloOpcode::kSubtract:
return Subtract(b, inputs);
case HloOpcode::kMultiply:
if (is_integer) {
return b.create<ma::MulIOp>(inputs[0], inputs[1]);
}
return b.create<ma::MulFOp>(inputs[0], inputs[1]);
case HloOpcode::kMaximum:
return Maximum(b, device_info, inputs);
case HloOpcode::kMinimum:
return Minimum(b, device_info, inputs);
case HloOpcode::kClamp:
return Maximum(
b, device_info,
{Minimum(b, device_info, {inputs[1], inputs[2]}), inputs[0]});
case HloOpcode::kAnd:
return b.create<ma::AndIOp>(inputs[0], inputs[1]);
case HloOpcode::kOr:
return b.create<ma::OrIOp>(inputs[0], inputs[1]);
case HloOpcode::kXor:
return b.create<ma::XOrIOp>(inputs[0], inputs[1]);
case HloOpcode::kDivide:
if (is_integer) {
return b.create<ma::DivSIOp>(inputs[0], inputs[1]);
}
return b.create<ma::DivFOp>(inputs[0], inputs[1]);
case HloOpcode::kCompare:
return Compare(
b, inputs,
mlir::mhlo::symbolizeComparisonDirection(
ComparisonDirectionToString(hlo.comparison_direction()))
.value());
case HloOpcode::kSelect:
return b.create<ma::SelectOp>(
Compare(b, {inputs[0], ZerosLike(b, inputs[0])},
mlir::mhlo::ComparisonDirection::NE),
inputs[1], inputs[2]);
default:
return absl::InvalidArgumentError(
absl::StrCat("Unsupported elementwise operation ", hlo.ToString()));
}
}
Value EmitParameterLoad(ImplicitLocOpBuilder& b, Value pointer,
ArrayRef<int32_t> boundary_checks) {
if (auto make_tensor_ptr = pointer.getDefiningOp<mt::MakeTensorPtrOp>()) {
if (make_tensor_ptr.getOffsets().empty()) {
return Splat(b,
b.create<mt::LoadOp>(make_tensor_ptr.getBase(),
mt::CacheModifier::NONE,
mt::EvictionPolicy::NORMAL,
false),
{});
}
}
if (mt::isTensorPointerType(pointer.getType())) {
std::optional<mt::PaddingOption> padding;
if (!boundary_checks.empty()) {
padding = mt::PaddingOption::PAD_ZERO;
}
return b.create<mt::LoadOp>(pointer, boundary_checks, padding,
mt::CacheModifier::NONE,
mt::EvictionPolicy::NORMAL,
false);
}
return Splat(b,
b.create<mt::LoadOp>(pointer, mt::CacheModifier::NONE,
mt::EvictionPolicy::NORMAL,
false),
{});
}
absl::StatusOr<Value> EmitConstant(ImplicitLocOpBuilder& b,
const HloInstruction& constant) {
TF_ASSIGN_OR_RETURN(Type ty, TritonType(b, constant.shape().element_type()));
if (constant.shape().IsInteger()) {
if (constant.shape().element_type() == U64) {
return CreateConst(b, ty, ScalarConstantValue<uint64_t>(constant, U64));
} else {
return CreateConst(b, ty, ScalarConstantValue<int64_t>(constant, S64));
}
}
return CreateConst(b, ty, ScalarConstantValue<double>(constant, F64));
}
struct DimProperties {
DimProperties(int64_t index, Value pid, int block_size, int split_value)
: index(index),
pid(pid),
block_size(block_size),
split_value(split_value) {}
int64_t index;
Value pid;
int block_size;
int split_value;
};
absl::StatusOr<Value> EmitBroadcast(
ImplicitLocOpBuilder& b, const TritonFusionAnalysis* analysis,
TritonFusionAnalysis::Scope scope,
absl::Span<const DimProperties> tiled_dimensions,
const HloInstruction& broadcast, Value input) {
TF_RET_CHECK(analysis != nullptr);
std::vector<int64_t> out_shape;
for (const DimProperties& dim : tiled_dimensions) {
const TensorIterationSpec::DimIterationSpec* spec =
analysis->IterSpec(scope, &broadcast, dim.index);
if (spec != nullptr && spec->at(0).stride > 0) {
out_shape.push_back(dim.block_size);
}
}
auto tensor_input = mlir::dyn_cast<TensorValue>(input);
if (!tensor_input) {
return Splat(b, input, out_shape);
}
if (tensor_input.getType().getRank() == out_shape.size()) {
return input;
}
Value expanded_input = tensor_input;
int dim_idx = 0;
for (const DimProperties& dim : tiled_dimensions) {
if (analysis->IterSpec(scope, &broadcast, dim.index) != nullptr &&
analysis->IterSpec(scope, &broadcast, dim.index)->at(0).stride > 0) {
if (analysis->IterSpec(scope, broadcast.operand(0), dim.index) ==
nullptr) {
expanded_input = b.create<mt::ExpandDimsOp>(expanded_input, dim_idx);
}
++dim_idx;
}
}
return Broadcast(b, mlir::cast<TensorValue>(expanded_input), out_shape);
}
absl::StatusOr<Value> EmitScope(
ImplicitLocOpBuilder& b, absl::string_view libdevice_path,
const se::DeviceDescription& device_info,
const TritonFusionAnalysis* analysis, TritonFusionAnalysis::Scope scope, | #include "xla/service/gpu/ir_emitter_triton.h"
#include <cstdlib>
#include <iterator>
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "absl/types/span.h"
#include "llvm/IR/LLVMContext.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Pass/PassManager.h"
#include "xla/autotuning.pb.h"
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/model/tiled_hlo_computation.h"
#include "xla/service/gpu/tests/gpu_codegen_test.h"
#include "xla/service/gpu/triton_test_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/xla.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
class TritonTest : public GpuCodegenTest {
public:
stream_executor::CudaComputeCapability GetCudaComputeCapability() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
}
const stream_executor::GpuComputeCapability& GpuComputeComp() {
return device_desc().gpu_compute_capability();
}
stream_executor::GpuComputeCapability CudaAmpereOrRocm() {
if (std::holds_alternative<stream_executor::RocmComputeCapability>(
GpuComputeComp())) {
return stream_executor::GpuComputeCapability{
device_desc().rocm_compute_capability()};
} else {
return stream_executor::GpuComputeCapability{
stream_executor::CudaComputeCapability{
stream_executor::CudaComputeCapability::AMPERE, 0}};
}
}
protected:
const stream_executor::DeviceDescription& device_desc() {
return backend().default_stream_executor()->GetDeviceDescription();
}
};
class TritonGemmTest : public TritonTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = TritonTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_cublas_fallback(false);
debug_options.set_xla_gpu_enable_split_k_autotuning(false);
debug_options.set_xla_gpu_gemm_rewrite_size_threshold(0);
return debug_options;
}
void MatchHloModule(HloModule& module, absl::string_view pattern) {
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_result,
RunFileCheck(module.ToString(), pattern));
EXPECT_TRUE(filecheck_result);
}
};
class TritonGemmTestWithSplitK : public TritonGemmTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = TritonGemmTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_split_k_autotuning(true);
return debug_options;
}
};
class TritonGemmTestWithoutTritonGemmAny : public TritonGemmTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = TritonGemmTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_triton_gemm_any(false);
return debug_options;
}
};
TEST_F(TritonTest, TestGemm) {
const std::string kHloText = R"(
HloModule t, is_scheduled=true
triton_gemm_r {
parameter_0 = s8[80,115]{1,0} parameter(0)
convert.3 = f32[80,115]{1,0} convert(parameter_0)
parameter_1 = f32[137,115]{1,0} parameter(1)
ROOT r.1 = f32[80,137]{1,0} dot(convert.3, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p1 = f32[137,115]{1,0} parameter(1)
p0 = s8[80,115]{1,0} parameter(0)
ROOT triton_gemm_r = f32[80,137]{1,0} fusion(p0, p1), kind=kCustom,
calls=triton_gemm_r,
backend_config={"fusion_backend_config": {kind: "__triton_gemm",
triton_gemm_config: {"block_m":16,"block_n":64,"block_k":32,
"split_k":1,"num_stages":1,"num_warps":2,
"num_ctas":1}}}
})";
TF_EXPECT_OK(
CreateTritonIrAndFileCheckForDot(this, kHloText, "triton_gemm_r", R"(
CHECK: tt.func @triton_fn(%[[LHS:.*]]: !tt.ptr<i8> {tt.divisibility = 16 : i32}, %[[RHS:.*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}, %[[OUT:.*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}) {
CHECK-DAG: %[[ZERO_KN:.*]] = arith.constant dense<0.000000e+00> : tensor<32x64xf32>
CHECK-DAG: %[[ZERO_MK:.*]] = arith.constant dense<0.000000e+00> : tensor<16x32xf32>
CHECK-DAG: %[[ZERO_MN:.*]] = arith.constant dense<0.000000e+00> : tensor<16x64xf32>
CHECK-DAG: %[[SIZE_K:.*]] = arith.constant 115 : i32
CHECK-DAG: %[[SIZE_M:.*]] = arith.constant 137 : i64
CHECK-DAG: %[[C1:.*]] = arith.constant 1 : i64
CHECK-DAG: %[[C0:.*]] = arith.constant 0 : i32
CHECK-DAG: %[[C80:.*]] = arith.constant 80 : i64
CHECK-DAG: %[[TILE_SIZE_K:.*]] = arith.constant 32 : i32
CHECK-DAG: %[[TILE_SIZE_N:.*]] = arith.constant 64 : i32
CHECK-DAG: %[[TILE_SIZE_M:.*]] = arith.constant 16 : i32
CHECK-DAG: %[[NUM_TILES_M:.*]] = arith.constant 5 : i32
CHECK-DAG: %[[GROUP_M:.*]] = arith.constant 8 : i32
CHECK-DAG: %[[WIDTH:.*]] = arith.constant 24 : i32
CHECK: %[[PID_NC:.*]] = tt.get_program_id x
CHECK: %[[GROUP_ID:.*]] = arith.divsi %[[PID_NC]], %[[WIDTH]]
CHECK: %[[FIRST_PID_M:.*]] = arith.muli %[[GROUP_ID]], %[[GROUP_M]]
CHECK: %[[MAX_M:.*]] = arith.subi %[[NUM_TILES_M]], %[[FIRST_PID_M]]
CHECK: %[[CMP:.*]] = arith.cmpi slt, %[[MAX_M]], %[[GROUP_M]]
CHECK: %[[GROUP_SIZE:.*]] = arith.select %[[CMP]], %[[MAX_M]], %[[GROUP_M]]
CHECK: %[[PID_M:.*]] = arith.remsi %[[PID_NC]], %[[GROUP_SIZE]]
CHECK: %[[TILE_INDEX_M:.*]] = arith.addi %[[FIRST_PID_M]], %[[PID_M]] : i32
CHECK: %[[TMP:.*]] = arith.remsi %[[PID_NC]], %[[WIDTH]] : i32
CHECK: %[[TILE_INDEX_N:.*]] = arith.divsi %[[TMP]], %[[GROUP_SIZE]] : i32
CHECK: %[[TILE_OFFSET_M_LHS:.*]] = arith.muli %[[TILE_INDEX_M]], %[[TILE_SIZE_M]]
CHECK: %[[LHS_PTR:.*]] = tt.make_tensor_ptr %[[LHS]]
CHECK: %[[LHS_TILE_PTR:.*]] = tt.advance %[[LHS_PTR]], [%[[TILE_OFFSET_M_LHS]], %[[C0]]]
CHECK: %[[TILE_OFFSET_N_RHS:.*]] = arith.muli %[[TILE_INDEX_N]], %[[TILE_SIZE_N]]
CHECK: %[[RHS_PTR:.*]] = tt.make_tensor_ptr %[[RHS]]
CHECK: %[[RHS_TILE_PTR:.*]] = tt.advance %[[RHS_PTR]], [%[[C0]], %[[TILE_OFFSET_N_RHS]]]
CHECK: %[[FOR:.*]]:3 = scf.for %[[BLOCK_K:.*]] = %[[C0]] to %[[SIZE_K]] step %[[TILE_SIZE_K]]
CHECK-SAME: iter_args(%[[LHS_ITER_PTR:.*]] = %[[LHS_TILE_PTR]], %[[RHS_ITER_PTR:.*]] = %[[RHS_TILE_PTR]], %[[ACC:.*]] = %[[ZERO_MN]])
CHECK: %[[LHS_TILE:.*]] = tt.load %[[LHS_ITER_PTR]] {boundaryCheck = array<i32: 1>
CHECK: %[[LHS_ITER_PTR_NEXT:.*]] = tt.advance %[[LHS_ITER_PTR]], [%[[C0]], %[[TILE_SIZE_K]]]
CHECK: %[[RHS_TILE:.*]] = tt.load %[[RHS_ITER_PTR]] {boundaryCheck = array<i32: 0, 1>
CHECK: %[[RHS_ITER_PTR_NEXT:.*]] = tt.advance %[[RHS_ITER_PTR]], [%[[TILE_SIZE_K]], %[[C0]]]
CHECK: %[[CONVERTED:.*]] = arith.sitofp %[[LHS_TILE]] : tensor<16x32xi8> to tensor<16x32xf32>
CHECK: %[[TILE_K_LIMIT:.*]] = arith.subi %[[SIZE_K]], %[[BLOCK_K]] : i32
CHECK: %[[K_TILE_IOTA:.*]] = tt.make_range {end = 32 : i32, start = 0 : i32} : tensor<32xi32>
CHECK: %[[K_OFFSETS_1K:.*]] = tt.expand_dims %[[K_TILE_IOTA]] {axis = 0 : i32} : tensor<32xi32> -> tensor<1x32xi32>
CHECK: %[[TILE_K_LIMIT_1K:.*]] = tt.splat %[[TILE_K_LIMIT]] : i32 -> tensor<1x32xi32>
CHECK: %[[LHS_INBOUNDS_1K:.*]] = arith.cmpi slt, %[[K_OFFSETS_1K]], %[[TILE_K_LIMIT_1K]] : tensor<1x32xi32>
CHECK: %[[LHS_INBOUNDS_MK:.*]] = tt.broadcast %[[LHS_INBOUNDS_1K]] : tensor<1x32xi1> -> tensor<16x32xi1>
CHECK: %[[LHS_MASKED:.*]] = arith.select %[[LHS_INBOUNDS_MK]], %[[CONVERTED]], %[[ZERO_MK]]
CHECK: %[[K_OFFSETS_K1:.*]] = tt.expand_dims %[[K_TILE_IOTA]] {axis = 1 : i32} : tensor<32xi32> -> tensor<32x1xi32>
CHECK: %[[TILE_K_LIMIT_K1:.*]] = tt.splat %[[TILE_K_LIMIT]] : i32 -> tensor<32x1xi32>
CHECK: %[[RHS_INBOUNDS_K1:.*]] = arith.cmpi slt, %[[K_OFFSETS_K1]], %[[TILE_K_LIMIT_K1]] : tensor<32x1xi32>
CHECK: %[[RHS_INBOUNDS_KN:.*]] = tt.broadcast %[[RHS_INBOUNDS_K1]] : tensor<32x1xi1> -> tensor<32x64xi1>
CHECK: %[[RHS_MASKED:.*]] = arith.select %[[RHS_INBOUNDS_KN]], %[[RHS_TILE]], %[[ZERO_KN]] : tensor<32x64xi1>, tensor<32x64xf32>
CHECK: %[[ACC_NEXT:.*]] = tt.dot %[[LHS_MASKED]], %[[RHS_MASKED]], %[[ACC]]
CHECK: scf.yield %[[LHS_ITER_PTR_NEXT]], %[[RHS_ITER_PTR_NEXT]], %[[ACC_NEXT]] : !tt.ptr<tensor<16x32xi8>>, !tt.ptr<tensor<32x64xf32>>, tensor<16x64xf32>
CHECK: }
CHECK: %[[OUT_PTR:.*]] = tt.make_tensor_ptr %[[OUT]], [%[[C80]], %[[SIZE_M]]], [%[[SIZE_M]], %[[C1]]], [%[[C0]], %[[C0]]] {order = array<i32: 1, 0>} : <tensor<16x64xf32>>
CHECK: %[[OUT_OFFSET:.*]] = tt.advance %[[OUT_PTR]], [%[[TILE_OFFSET_M_LHS]], %[[TILE_OFFSET_N_RHS]]] : <tensor<16x64xf32>>
CHECK: tt.store %[[OUT_OFFSET]], %[[FOR]]#2 {boundaryCheck = array<i32: 1>} : !tt.ptr<tensor<16x64xf32>>
CHECK: tt.return
CHECK: }
)"));
}
TEST_F(TritonTest, TestGemmWithTrivialNonContractingDimension) {
const std::string kHloText = R"(
HloModule t, is_scheduled=true
triton_dot {
param_0.1 = f32[137,115]{1,0} parameter(0)
param_1.1 = f32[1,115]{1,0} parameter(1)
ROOT dot = f32[137,1]{1,0} dot(param_0.1, param_1.1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = f32[137,115]{1,0} parameter(0)
p1 = f32[1,115]{1,0} parameter(1)
ROOT custom-call = f32[137,1]{1,0} fusion(p0, p1), kind=kCustom,
calls=triton_dot,
backend_config={"fusion_backend_config": {kind: "__triton_gemm",
triton_gemm_config: {"block_m":16,"block_n":16,"block_k":32,
"split_k":1,"num_stages":1,"num_warps":2,
"num_ctas":1}}}
})";
TF_EXPECT_OK(
CreateTritonIrAndFileCheckForDot(this, kHloText, "triton_dot", R"(
CHECK: tt.func @triton_fn(%[[LHS:.*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}, %[[RHS:.*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}, %[[OUT:.*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}) {
CHECK-DAG: %[[ZERO_KN:.*]] = arith.constant dense<0.000000e+00> : tensor<32x16xf32>
CHECK-DAG: %[[ZERO_MK:.*]] = arith.constant dense<0.000000e+00> : tensor<16x32xf32>
CHECK-DAG: %[[ZERO_MN:.*]] = arith.constant dense<0.000000e+00> : tensor<16x16xf32>
CHECK-DAG: %[[SIZE_K:.*]] = arith.constant 115 : i32
CHECK-DAG: %[[SIZE_M:.*]] = arith.constant 137 : i64
CHECK-DAG: %[[C1:.*]] = arith.constant 1 : i64
CHECK-DAG: %[[C0:.*]] = arith.constant 0 : i32
CHECK-DAG: %[[C115:.*]] = arith.constant 115 : i64
CHECK-DAG: %[[TILE_SIZE_K:.*]] = arith.constant 32 : i32
CHECK-DAG: %[[TILE_SIZE_M:.*]] = arith.constant 16 : i32
CHECK-DAG: %[[C8:.*]] = arith.constant 8 : i32
CHECK-DAG: %[[NUM_TILES_M:.*]] = arith.constant 9 : i32
CHECK: %[[PID_NC:.*]] = tt.get_program_id x : i32
CHECK: %[[GROUP_ID:.*]] = arith.divsi %[[PID_NC]], %[[C8]]
CHECK: %[[FIRST_PID_M:.*]] = arith.muli %[[GROUP_ID]], %[[C8]]
CHECK: %[[MAX_M:.*]] = arith.subi %[[NUM_TILES_M]], %[[FIRST_PID_M]]
CHECK: %[[CMP:.*]] = arith.cmpi slt, %[[MAX_M]], %[[C8]]
CHECK: %[[GROUP_SIZE:.*]] = arith.select %[[CMP]], %[[MAX_M]], %[[C8]]
CHECK: %[[PID_M:.*]] = arith.remsi %[[PID_NC]], %[[GROUP_SIZE]]
CHECK: %[[TILE_INDEX_M:.*]] = arith.addi %[[FIRST_PID_M]], %[[PID_M]]
CHECK: %[[TMP:.*]] = arith.remsi %[[PID_NC]], %[[C8]]
CHECK: %[[TILE_INDEX_N:.*]] = arith.divsi %[[TMP]], %[[GROUP_SIZE]]
CHECK: %[[TILE_OFFSET_M_LHS:.*]] = arith.muli %[[TILE_INDEX_M]], %[[TILE_SIZE_M]]
CHECK: %[[LHS_PTR:.*]] = tt.make_tensor_ptr %[[LHS]]
CHECK: %[[LHS_TILE_PTR:.*]] = tt.advance %[[LHS_PTR]], [%[[TILE_OFFSET_M_LHS]], %[[C0]]]
CHECK: %[[TILE_OFFSET_N_RHS:.*]] = arith.muli %[[TILE_INDEX_N]], %[[TILE_SIZE_M]]
CHECK: %[[RHS_PTR:.*]] = tt.make_tensor_ptr %[[RHS]]
CHECK: %[[RHS_TILE_PTR:.*]] = tt.advance %[[RHS_PTR]], [%[[C0]], %[[TILE_OFFSET_N_RHS]]]
CHECK: %[[FOR:.*]]:3 = scf.for %[[BLOCK_K:.*]] = %[[C0]] to %[[SIZE_K]] step %[[TILE_SIZE_K]]
CHECK-SAME: iter_args(%[[LHS_ITER_PTR:.*]] = %[[LHS_TILE_PTR]], %[[RHS_ITER_PTR:.*]] = %[[RHS_TILE_PTR]], %[[ACC:.*]] = %[[ZERO_MN]])
CHECK: %[[LHS_TILE:.*]] = tt.load %[[LHS_ITER_PTR]] {boundaryCheck = array<i32: 0, 1>
CHECK: %[[LHS_ITER_PTR_NEXT:.*]] = tt.advance %[[LHS_ITER_PTR]], [%[[C0]], %[[TILE_SIZE_K]]]
CHECK: %[[RHS_TILE:.*]] = tt.load %[[RHS_ITER_PTR]] {boundaryCheck = array<i32: 0, 1>
CHECK: %[[RHS_ITER_PTR_NEXT:.*]] = tt.advance %[[RHS_ITER_PTR]], [%[[TILE_SIZE_K]], %[[C0]]]
CHECK: %[[TILE_K_LIMIT:.*]] = arith.subi %[[SIZE_K]], %[[BLOCK_K]] : i32
CHECK: %[[K_TILE_IOTA:.*]] = tt.make_range {end = 32 : i32, start = 0 : i32} : tensor<32xi32>
CHECK: %[[K_OFFSETS_1K:.*]] = tt.expand_dims %[[K_TILE_IOTA]] {axis = 0 : i32} : tensor<32xi32> -> tensor<1x32xi32>
CHECK: %[[TILE_K_LIMIT_1K:.*]] = tt.splat %[[TILE_K_LIMIT]] : i32 -> tensor<1x32xi32>
CHECK: %[[LHS_INBOUNDS_1K:.*]] = arith.cmpi slt, %[[K_OFFSETS_1K]], %[[TILE_K_LIMIT_1K]] : tensor<1x32xi32>
CHECK: %[[LHS_INBOUNDS_MK:.*]] = tt.broadcast %[[LHS_INBOUNDS_1K]] : tensor<1x32xi1> -> tensor<16x32xi1>
CHECK: %[[LHS_MASKED:.*]] = arith.select %[[LHS_INBOUNDS_MK]], %[[LHS_TILE]], %[[ZERO_MK]]
CHECK: %[[K_OFFSETS_K1:.*]] = tt.expand_dims %[[K_TILE_IOTA]] {axis = 1 : i32} : tensor<32xi32> -> tensor<32x1xi32>
CHECK: %[[TILE_K_LIMIT_K1:.*]] = tt.splat %[[TILE_K_LIMIT]] : i32 -> tensor<32x1xi32>
CHECK: %[[RHS_INBOUNDS_K1:.*]] = arith.cmpi slt, %[[K_OFFSETS_K1]], %[[TILE_K_LIMIT_K1]] : tensor<32x1xi32>
CHECK: %[[RHS_INBOUNDS_KN:.*]] = tt.broadcast %[[RHS_INBOUNDS_K1]] : tensor<32x1xi1> -> tensor<32x16xi1>
CHECK: %[[RHS_MASKED:.*]] = arith.select %[[RHS_INBOUNDS_KN]], %[[RHS_TILE]], %[[ZERO_KN]] : tensor<32x16xi1>, tensor<32x16xf32>
CHECK: %[[ACC_NEXT:.*]] = tt.dot %[[LHS_MASKED]], %[[RHS_MASKED]], %[[ACC]]
CHECK: scf.yield %[[LHS_ITER_PTR_NEXT]], %[[RHS_ITER_PTR_NEXT]], %[[ACC_NEXT]] : !tt.ptr<tensor<16x32xf32>>, !tt.ptr<tensor<32x16xf32>>, tensor<16x16xf32>
CHECK: }
CHECK: %[[OUT_PTR:.*]] = tt.make_tensor_ptr %[[OUT]], [%[[SIZE_M]], %[[C1]]], [%[[C1]], %[[C1]]], [%[[C0]], %[[C0]]] {order = array<i32: 1, 0>} : <tensor<16x16xf32>>
CHECK: %[[OUT_OFFSET:.*]] = tt.advance %[[OUT_PTR]], [%[[TILE_OFFSET_M_LHS]], %[[TILE_OFFSET_N_RHS]]] : <tensor<16x16xf32>>
CHECK: tt.store %[[OUT_OFFSET]], %[[FOR]]#2 {boundaryCheck = array<i32: 0, 1>} : !tt.ptr<tensor<16x16xf32>>
CHECK: tt.return
CHECK: }
)"));
}
TEST_F(TritonTest, TestSoftmaxEmitterWithSingleParameter) {
const std::string kHloText = R"(
HloModule t
add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0, Arg_1)
}
triton_softmax_computation {
parameter_0 = f32[125,127]{1,0} parameter(0)
multiply_0 = f32[125,127]{1,0} multiply(parameter_0, parameter_0)
constant_0 = f32[] constant(0)
reduce_0 = f32[125]{0} reduce(multiply_0, constant_0), dimensions={1}, to_apply=add
broadcast_4 = f32[125,127]{1,0} broadcast(reduce_0), dimensions={0}
ROOT multiply = f32[125,127]{1,0} multiply(multiply_0, broadcast_4)
}
ENTRY main {
param_0 = f32[125,127]{1,0} parameter(0)
ROOT triton_softmax = f32[125,127]{1,0} fusion(param_0), kind=kCustom, calls=triton_softmax_computation, backend_config={"fusion_backend_config": {"kind":"__triton"}}
})";
TF_EXPECT_OK(CreateTritonIrAndFileCheck(this, kHloText,
FromOutputTileSizes({1, 127}),
"triton_softmax_computation", R"(
CHECK: #[[MAP:.*]] = affine_map<(d0) -> (d0 * 127)>
CHECK: tt.func @triton_fn(%[[P0:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}, %[[P1:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}) {
CHECK: %[[PID:.*]] = tt.get_program_id x : i32
CHECK: arith.index_castui %[[PID]] : i32 to index
CHECK: tt.addptr %[[P0]]
CHECK-NEXT: tt.make_tensor_ptr
CHECK-SAME: <tensor<128xf32>>
CHECK-NEXT: tt.load
CHECK-SAME: {boundaryCheck = array<i32: 0>, padding = 1 : i32} : !tt.ptr<tensor<128xf32>>
CHECK: tt.reduce
CHECK-NEXT: ^bb0(%[[ARG2:[^:]*]]: f32, %[[ARG3:[^:]*]]: f32):
CHECK-NEXT: %[[ADD:.*]] = arith.addf %[[ARG2]], %[[ARG3]] : f32
CHECK-NEXT: tt.reduce.return %[[ADD]] : f32
CHECK-NEXT: }) : (tensor<128xf32>) -> f32
CHECK: tt.splat
CHECK: arith.mulf
CHECK-SAME: tensor<128xf32>
CHECK: tt.addptr %[[P1]]
CHECK-NEXT: tt.make_tensor_ptr
CHECK-SAME: <tensor<128xf32>>
CHECK-NEXT: tt.store
CHECK-SAME: {boundaryCheck = array<i32: 0>} : !tt.ptr<tensor<128xf32>>
CHECK: tt.return
CHECK: }
)"));
}
TEST_F(TritonTest, TestSoftmaxEmitterWithSingleScalarParameter) {
const std::string kHloText = R"(
HloModule t
add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0, Arg_1)
}
triton_softmax_computation {
parameter_0 = f32[] parameter(0)
broadcast_1 = f32[125,127]{1,0} broadcast(parameter_0), dimensions={}
multiply_0 = f32[125,127]{1,0} multiply(broadcast_1, broadcast_1)
constant_0 = f32[] constant(0)
reduce_0 = f32[125]{0} reduce(multiply_0, constant_0), dimensions={1}, to_apply=add
broadcast_4 = f32[125,127]{1,0} broadcast(reduce_0), dimensions={0}
ROOT multiply = f32[125,127]{1,0} multiply(multiply_0, broadcast_4)
}
ENTRY main {
param_0 = f32[] constant(42)
ROOT triton_softmax = f32[125,127]{1,0} fusion(param_0), kind=kCustom, calls=triton_softmax_computation, backend_config={"fusion_backend_config": {"kind":"__triton"}}
})";
TF_EXPECT_OK(CreateTritonIrAndFileCheck(this, kHloText,
FromOutputTileSizes({1, 127}),
"triton_softmax_computation", R"(
CHECK: #[[MAP:.*]] = affine_map<(d0) -> (d0 * 127)>
CHECK: tt.func @triton_fn(%[[P0:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}, %[[P1:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}) {
CHECK-DAG: %[[PID:.*]] = tt.get_program_id x : i32
CHECK-DAG: arith.index_castui %[[PID]] : i32 to index
CHECK-DAG: %[[ZERO_OFFSET:.*]] = arith.constant 0 : i64
CHECK-DAG: %[[ARG_0:.*]] = tt.addptr %[[P0]], %[[ZERO_OFFSET]] : !tt.ptr<f32>, i64
CHECK: tt.load %[[ARG_0]] : !tt.ptr<f32>
CHECK-NEXT: tt.splat
CHECK: tt.reduce
CHECK-NEXT: ^bb0(%[[ARG2:[^:]*]]: f32, %[[ARG3:[^:]*]]: f32):
CHECK-NEXT: %[[ADD:.*]] = arith.addf %[[ARG2]], %[[ARG3]] : f32
CHECK-NEXT: tt.reduce.return %[[ADD]] : f32
CHECK-NEXT: }) : (tensor<128xf32>) -> f32
CHECK: tt.splat
CHECK: arith.mulf
CHECK-SAME: tensor<128xf32>
CHECK: tt.addptr %[[P1]]
CHECK-NEXT: tt.make_tensor_ptr
CHECK-SAME: <tensor<128xf32>>
CHECK-NEXT: tt.store
CHECK-SAME: {boundaryCheck = array<i32: 0>} : !tt.ptr<tensor<128xf32>>
CHECK: tt.return
CHECK: }
)"));
}
TEST_F(TritonTest, TestSoftmaxEmitterWithMultipleParameters) {
const std::string kHloText = R"(
HloModule t
add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0, Arg_1)
}
triton_softmax_computation {
param_0 = f32[125,127]{1,0} parameter(0)
param_1 = f32[127]{0} parameter(1)
broadcast_0 = f32[125,127]{1,0} broadcast(param_1), dimensions={1}
multiply_0 = f32[125,127]{1,0} multiply(param_0, broadcast_0)
constant_0 = f32[] constant(0)
reduce_0 = f32[125]{0} reduce(multiply_0, constant_0), dimensions={1}, to_apply=add
broadcast_4 = f32[125,127]{1,0} broadcast(reduce_0), dimensions={0}
ROOT multiply = f32[125,127]{1,0} multiply(multiply_0, broadcast_4)
}
ENTRY main {
param_0 = f32[125,127]{1,0} parameter(0)
param_1 = f32[127]{0} parameter(1)
ROOT triton_softmax = f32[125,127]{1,0} fusion(param_0, param_1), kind=kCustom, calls=triton_softmax_computation, backend_config={"fusion_backend_config": {"kind":"__triton"}}
}
)";
TF_EXPECT_OK(CreateTritonIrAndFileCheck(this, kHloText,
FromOutputTileSizes({1, 127}),
"triton_softmax_computation", R"(
CHECK: #[[MAP:.*]] = affine_map<(d0) -> (d0 * 127)>
CHECK: tt.func @triton_fn(%[[P0:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}, %[[P1:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}, %[[P2:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}) {
CHECK-DAG: %[[PID:.*]] = tt.get_program_id x : i32
CHECK-DAG: %[[PID_INDEX:.*]] = arith.index_castui %[[PID]] : i32 to index
CHECK-DAG: %[[C127_i64:.*]] = arith.constant 127 : i64
CHECK-DAG: %[[ZERO_OFFSET:.*]] = arith.constant 0 : i64
CHECK: %[[ROW_OFFSET_INDEX:.*]] = xla_gpu.apply_indexing #[[MAP]](%[[PID_INDEX]]
CHECK: %[[ROW_OFFSET:.*]] = arith.index_castui %[[ROW_OFFSET_INDEX]] : index to i64
CHECK: %[[ARG0:.*]] = tt.addptr %[[P0]], %[[ROW_OFFSET]] : !tt.ptr<f32>, i64
CHECK-NEXT: tt.make_tensor_ptr
CHECK-SAME: <tensor<128xf32>>
CHECK-NEXT: tt.load
CHECK-SAME: {boundaryCheck = array<i32: 0>, padding = 1 : i32} : !tt.ptr<tensor<128xf32>>
CHECK: %[[ARG1:.*]] = tt.addptr %[[P1]], %[[ZERO_OFFSET]] : !tt.ptr<f32>, i64
CHECK-NEXT: tt.make_tensor_ptr
CHECK-SAME: <tensor<128xf32>>
CHECK-NEXT: tt.load
CHECK-SAME: {boundaryCheck = array<i32: 0>, padding = 1 : i32} : !tt.ptr<tensor<128xf32>>
CHECK: tt.reduce
CHECK-NEXT: ^bb0(%[[ARG3:[^:]*]]: f32, %[[ARG4:[^:]*]]: f32):
CHECK-NEXT: %[[ADD:.*]] = arith.addf %[[ARG3]], %[[ARG4]] : f32
CHECK-NEXT: tt.reduce.return %[[ADD]] : f32
CHECK-NEXT: }) : (tensor<128xf32>) -> f32
CHECK: tt.addptr %[[P2]]
CHECK-NEXT: tt.make_tensor_ptr
CHECK-SAME: <tensor<128xf32>>
CHECK-NEXT: tt.store
CHECK-SAME: {boundaryCheck = array<i32: 0>} : !tt.ptr<tensor<128xf32>>
CHECK: tt.return
CHECK: }
)"));
}
TEST_F(TritonTest, TestSoftmaxEmitterWithMultipleParametersOrderSwapped) {
const std::string kHloText = R"(
HloModule t
add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0, Arg_1)
}
triton_softmax_computation {
param_0 = f32[125,127]{1,0} parameter(1)
param_1 = f32[127]{0} parameter(0)
broadcast_0 = f32[125,127]{1,0} broadcast(param_1), dimensions={1}
multiply_0 = f32[125,127]{1,0} multiply(param_0, broadcast_0)
constant_0 = f32[] constant(0)
reduce_0 = f32[125]{0} reduce(multiply_0, constant_0), dimensions={1}, to_apply=add
broadcast_4 = f32[125,127]{1,0} broadcast(reduce_0), dimensions={0}
ROOT multiply = f32[125,127]{1,0} multiply(multiply_0, broadcast_4)
}
ENTRY main {
param_0 = f32[125,127]{1,0} parameter(1)
param_1 = f32[127]{0} parameter(0)
ROOT triton_softmax = f32[125,127]{1,0} fusion(param_1, param_0), kind=kCustom, calls=triton_softmax_computation, backend_config={"fusion_backend_config": {"kind":"__triton"}}
}
)";
TF_EXPECT_OK(CreateTritonIrAndFileCheck(this, kHloText,
FromOutputTileSizes({1, 127}),
"triton_softmax_computation", R"(
CHECK: #[[MAP:.*]] = affine_map<(d0) -> (d0 * 127)>
CHECK: tt.func @triton_fn(%[[P0:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}, %[[P1:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}, %[[P2:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}) {
CHECK-DAG: %[[PID:.*]] = tt.get_program_id x : i32
CHECK-DAG: %[[PID_INDEX:.*]] = arith.index_castui %[[PID]] : i32 to index
CHECK-DAG: %[[C127_i64:.*]] = arith.constant 127 : i64
CHECK-DAG: %[[ZERO_OFFSET:.*]] = arith.constant 0 : i64
CHECK: %[[ROW_OFFSET_INDEX:.*]] = xla_gpu.apply_indexing #[[MAP]](%[[PID_INDEX]]
CHECK: %[[ROW_OFFSET:.*]] = arith.index_castui %[[ROW_OFFSET_INDEX]] : index to i64
CHECK: %[[ARG1:.*]] = tt.addptr %[[P1]], %[[ROW_OFFSET]] : !tt.ptr<f32>, i64
CHECK-NEXT: tt.make_tensor_ptr
CHECK-SAME: <tensor<128xf32>>
CHECK-NEXT: tt.load
CHECK-SAME: {boundaryCheck = array<i32: 0>, padding = 1 : i32} : !tt.ptr<tensor<128xf32>>
CHECK: %[[ARG0:.*]] = tt.addptr %[[P0]], %[[ZERO_OFFSET]] : !tt.ptr<f32>, i64
CHECK-NEXT: tt.make_tensor_ptr
CHECK-SAME: <tensor<128xf32>>
CHECK-NEXT: tt.load
CHECK-SAME: {boundaryCheck = array<i32: 0>, padding = 1 : i32} : !tt.ptr<tensor<128xf32>>
CHECK: tt.reduce
CHECK-NEXT: ^bb0(%[[ARG3:[^:]*]]: f32, %[[ARG4:[^:]*]]: f32):
CHECK-NEXT: %[[ADD:.*]] = arith.addf %[[ARG3]], %[[ARG4]] : f32
CHECK-NEXT: tt.reduce.return %[[ADD]] : f32
CHECK-NEXT: }) : (tensor<128xf32>) -> f32
CHECK: tt.splat
CHECK: tt.addptr %[[P2]]
CHECK-NEXT: tt.make_tensor_ptr
CHECK-SAME: <tensor<128xf32>>
CHECK-NEXT: tt.store
CHECK-SAME: {boundaryCheck = array<i32: 0>} : !tt.ptr<tensor<128xf32>>
CHECK: tt.return
CHECK: }
)"));
}
TEST_F(TritonTest,
TestSoftmaxEmitterWithAdditionalParameterEnteringAfterDiamond) {
const std::string kHloText = R"(
HloModule t
add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0, Arg_1)
}
triton_softmax_computation {
param_0 = f32[125,127]{1,0} parameter(0)
constant_0 = f32[] constant(0)
reduce_0 = f32[125]{0} reduce(param_0, constant_0), dimensions={1}, to_apply=add
broadcast_4 = f32[125,127]{1,0} broadcast(reduce_0), dimensions={0}
param_1 = f32[127]{0} parameter(1)
broadcast_0 = f32[125,127]{1,0} broadcast(param_1), dimensions={1}
ROOT multiply_0 = f32[125,127]{1,0} multiply(broadcast_4, broadcast_0)
}
ENTRY main {
param_0 = f32[125,127]{1,0} parameter(0)
param_1 = f32[127]{0} parameter(1)
ROOT triton_softmax = f32[125,127]{1,0} fusion(param_0, param_1), kind=kCustom, calls=triton_softmax_computation, backend_config={"fusion_backend_config": {"kind":"__triton"}}
}
)";
TF_EXPECT_OK(CreateTritonIrAndFileCheck(this, kHloText,
FromOutputTileSizes({1, 127}),
"triton_softmax_computation", R"(
CHECK: #[[MAP:.*]] = affine_map<(d0) -> (d0 * 127)>
CHECK: tt.func @triton_fn(%[[P0:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}, %[[P1:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}, %[[P2:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}) {
CHECK-DAG: %[[PID:.*]] = tt.get_program_id x : i32
CHECK-DAG: %[[PID_INDEX:.*]] = arith.index_castui %[[PID]] : i32 to index
CHECK-DAG: %[[C127_i64:.*]] = arith.constant 127 : i64
CHECK-DAG: %[[ZERO_OFFSET:.*]] = arith.constant 0 : i64
CHECK: %[[ROW_OFFSET_INDEX:.*]] = xla_gpu.apply_indexing #[[MAP]](%[[PID_INDEX]]
CHECK: %[[ROW_OFFSET:.*]] = arith.index_castui %[[ROW_OFFSET_INDEX]] : index to i64
CHECK: %[[ARG0:.*]] = tt.addptr %[[P0]], %[[ROW_OFFSET]] : !tt.ptr<f32>, i64
CHECK-NEXT: tt.make_tensor_ptr
CHECK-SAME: <tensor<128xf32>>
CHECK-NEXT: tt.load
CHECK-SAME: {boundaryCheck = array<i32: 0>, padding = 1 : i32} : !tt.ptr<tensor<128xf32>>
CHECK: tt.reduce
CHECK-NEXT: ^bb0(%[[ARG3:[^:]*]]: f32, %[[ARG4:[^:]*]]: f32):
CHECK-NEXT: %[[ADD:.*]] = arith.addf %[[ARG3]], %[[ARG4]] : f32
CHECK-NEXT: tt.reduce.return %[[ADD]] : f32
CHECK-NEXT: }) : (tensor<128xf32>) -> f32
CHECK: %[[ARG1:.*]] = tt.addptr %[[P1]], %[[ZERO_OFFSET]] : !tt.ptr<f32>, i64
CHECK-NEXT: tt.make_tensor_ptr
CHECK-SAME: <tensor<128xf32>>
CHECK-NEXT: tt.load
CHECK-SAME: {boundaryCheck = array<i32: 0>, padding = 1 : i32} : !tt.ptr<tensor<128xf32>>
CHECK: tt.addptr %[[P2]]
CHECK-NEXT: tt.make_tensor_ptr
CHECK-SAME: <tensor<128xf32>>
CHECK-NEXT: tt.store
CHECK-SAME: {boundaryCheck = array<i32: 0>} : !tt.ptr<tensor<128xf32>>
CHECK: tt.return
CHECK: }
)"));
}
TEST_F(TritonTest,
TestSoftmaxEmitterWithMultipleParametersAlongTiledDimension) {
const std::string kHloText = R"(
HloModule t
add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0, Arg_1)
}
triton_softmax_computation {
param_0 = f32[125,127]{1,0} parameter(0)
param_1 = f32[127]{0} parameter(1)
param_2 = f32[125]{0} parameter(2)
broadcast_0 = f32[125,127]{1,0} broadcast(param_1), dimensions={1}
multiply_0 = f32[125,127]{1,0} multiply(param_0, broadcast_0)
broadcast_1 = f32[125,127]{1,0} broadcast(param_2), dimensions={0}
multiply_1 = f32[125,127]{1,0} multiply(multiply_0, broadcast_1)
constant_0 = f32[] constant(0)
reduce_0 = f32[125]{0} reduce(multiply_1, constant_0), dimensions={1}, to_apply=add
broadcast_4 = f32[125,127]{1,0} broadcast(reduce_0), dimensions={0}
ROOT multiply = f32[125,127]{1,0} multiply(multiply_1, broadcast_4)
}
ENTRY main {
param_0 = f32[125,127]{1,0} parameter(1)
param_1 = f32[127]{0} parameter(0)
param_2 = f32[125]{0} parameter(2)
ROOT triton_softmax = f32[125,127]{1,0} fusion(param_0, param_1, param_2), kind=kCustom, calls=triton_softmax_computation, backend_config={"fusion_backend_config": {"kind":"__triton"}}
}
)";
TF_EXPECT_OK(CreateTritonIrAndFileCheck(this, kHloText,
FromOutputTileSizes({1, 127}),
"triton_softmax_computation", R"(
CHECK: #[[MAP:.*]] = affine_map<(d0) -> (d0 * 127)>
CHECK: tt.func @triton_fn(%[[P0:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}, %[[P1:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}, %[[P2:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}, %[[P3:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}) {
CHECK-DAG: %[[C127_i64:.*]] = arith.constant 127 : i64
CHECK-DAG: %[[ZERO_OFFSET:.*]] = arith.constant 0 : i64
CHECK-DAG: %[[PID:.*]] = tt.get_program_id x : i32
CHECK-DAG: %[[PID_INDEX:.*]] = arith.index_castui %[[PID]] : i32 to index
CHECK: %[[ROW_OFFSET_INDEX:.*]] = xla_gpu.apply_indexing #[[MAP]](%[[PID_INDEX]]
CHECK: %[[ROW_OFFSET:.*]] = arith.index_castui %[[ROW_OFFSET_INDEX]] : index to i64
CHECK: %[[ARG0:.*]] = tt.addptr %[[P0]], %[[ROW_OFFSET]] : !tt.ptr<f32>, i64
CHECK-NEXT: tt.make_tensor_ptr
CHECK-SAME: <tensor<128xf32>>
CHECK-NEXT: tt.load
CHECK-SAME: {boundaryCheck = array<i32: 0>, padding = 1 : i32} : !tt.ptr<tensor<128xf32>>
CHECK: %[[ARG1:.*]] = tt.addptr %[[P1]], %[[ZERO_OFFSET]] : !tt.ptr<f32>, i64
CHECK-NEXT: tt.make_tensor_ptr
CHECK-SAME: <tensor<128xf32>>
CHECK-NEXT: |
2,049 | cpp | tensorflow/tensorflow | triton_tiling_propagation | third_party/xla/xla/service/gpu/triton_tiling_propagation.cc | third_party/xla/xla/service/gpu/triton_tiling_propagation_test.cc | #ifndef XLA_SERVICE_GPU_TRITON_TILING_PROPAGATION_H_
#define XLA_SERVICE_GPU_TRITON_TILING_PROPAGATION_H_
#include <cstdint>
#include <optional>
#include <string>
#include <tuple>
#include <variant>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/instruction_fusion.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
class TensorIterationSpec {
public:
struct IterationSpecFragment {
int64_t stride;
int64_t count;
int64_t slice_start;
int64_t sliced_count;
std::vector<int64_t> subfragments;
bool is_sliced() const { return count != sliced_count; }
auto ToTuple() const {
return std::make_tuple(stride, count, slice_start, sliced_count,
subfragments);
}
bool operator==(const IterationSpecFragment& other) const {
return ToTuple() == other.ToTuple();
}
template <typename H>
friend H AbslHashValue(H h, const IterationSpecFragment& fragment) {
return H::combine(std::move(h), fragment.ToTuple());
}
bool IsPhysicallyEquivalent(const IterationSpecFragment& other) const {
return stride == other.stride && count == other.count &&
slice_start == other.slice_start &&
sliced_count == other.sliced_count;
}
std::string ToString() const;
};
using DimIterationSpec = std::vector<IterationSpecFragment>;
const DimIterationSpec& operator[](const int dimension) const {
return dim_iteration_specs_.at(dimension);
}
DimIterationSpec& operator[](const int dimension) {
return dim_iteration_specs_[dimension];
}
const DimIterationSpec* Find(int dimension) const;
std::vector<int> GetDimensions() const;
void RemoveEmptyDimensions() {
absl::erase_if(dim_iteration_specs_,
[](const auto& it) { return it.second.empty(); });
}
bool operator==(const TensorIterationSpec& other) const {
return dim_iteration_specs_ == other.dim_iteration_specs_;
}
template <typename H>
friend H AbslHashValue(H h, const TensorIterationSpec& spec) {
return H::combine(std::move(h), spec.dim_iteration_specs_);
}
bool IsPhysicallyEquivalent(const TensorIterationSpec& other) const;
std::string ToString() const;
private:
absl::flat_hash_map<int, DimIterationSpec> dim_iteration_specs_;
};
namespace triton_fusion {
class DimensionOrder {
public:
static DimensionOrder FromDotOperandOrOutput(
const HloInstruction& hlo, int split_k_dimension_index = -1);
class Fragment {
public:
explicit Fragment(int dst_dim_number, int64_t count)
: dst_dim_number_(dst_dim_number),
count_(count),
slice_start_(0),
sliced_count_(count) {}
std::string ToString() const;
int dst_dim_number() const { return dst_dim_number_; }
int64_t full_count() const { return count_; }
int64_t slice_start() const { return slice_start_; }
int64_t sliced_count() const { return sliced_count_; }
bool is_sliced() const { return count_ != sliced_count_; }
void set_slice(int64_t start, int64_t count) {
slice_start_ = start;
sliced_count_ = count;
}
void set_count(int64_t count) { count_ = count; }
private:
const int dst_dim_number_;
int64_t count_;
int64_t slice_start_;
int64_t sliced_count_;
};
using Fragments = std::vector<Fragment>;
using FragmentOrders = absl::flat_hash_map<int, std::vector<int>>;
const Fragments& TensorFragmentsOrder() const {
return tensor_fragments_order_;
}
Fragments& TensorFragmentsOrder() { return tensor_fragments_order_; }
const FragmentOrders& DimFragmentsOrders() const {
return dim_fragments_orders_;
}
FragmentOrders& DimFragmentsOrders() { return dim_fragments_orders_; }
std::string ToString() const;
TensorIterationSpec ToTensorIterationSpec() const;
bool IsPhysicallyEquivalent(const DimensionOrder& other) const {
return ToTensorIterationSpec().IsPhysicallyEquivalent(
other.ToTensorIterationSpec());
}
private:
Fragments tensor_fragments_order_;
FragmentOrders dim_fragments_orders_;
};
inline constexpr int kNoDimensionIndex = -1;
struct DotProperties {
const int noncontracting_dimension;
const int splittable_dimension_index;
};
inline constexpr int kNoSplitRequirement = 1;
struct DotRequirements {
explicit DotRequirements(int64_t splittable_dimension_major_part_size)
: splittable_dimension_major_part_size(
splittable_dimension_major_part_size) {
CHECK_GE(splittable_dimension_major_part_size, 1);
}
int64_t splittable_dimension_major_part_size;
};
using DotRequirementsOrError = std::variant<DotRequirements, FusionDecision>;
DotRequirementsOrError CombineDotRequirements(
DotRequirements a, DotRequirementsOrError b_or_error);
enum class TransformDirection { kInputToOutput, kOutputToInput };
using DimOrderMap = absl::flat_hash_map<const HloInstruction*, DimensionOrder>;
using DimOrderMapOrError = std::variant<DimOrderMap, FusionDecision>;
struct DimOrdersAndReqs {
DimOrderMap dim_orders;
DotRequirements requirements;
};
using DimOrdersAndReqsOrError = std::variant<DimOrdersAndReqs, FusionDecision>;
DimOrdersAndReqsOrError GetPropagatedDimOrdersAndRequirements(
const HloInstruction& hlo, const DimensionOrder& src_dim_order,
TransformDirection direction, const DotProperties& properties);
DimOrdersAndReqsOrError
GetPropagatedDimOrdersAndRequirementsIfProfitablyFusible(
const HloInstruction& hlo, TransformDirection transform_direction,
const std::optional<int>& src_operand_index,
const DimensionOrder& src_dim_order,
const se::GpuComputeCapability& gpu_version,
const DotProperties& properties);
}
}
}
#endif
#include "xla/service/gpu/triton_tiling_propagation.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <list>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/layout.h"
#include "xla/permutation_util.h"
#include "xla/service/gpu/triton_support.h"
#include "xla/service/instruction_fusion.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
namespace {
absl::flat_hash_map<int, TensorIterationSpec::DimIterationSpec>
FilterTrivialDims(
const absl::flat_hash_map<int, TensorIterationSpec::DimIterationSpec>&
dim_iter_specs) {
absl::flat_hash_map<int, TensorIterationSpec::DimIterationSpec>
non_trivial_dim_iteration_specs;
for (const auto& [dim, dim_spec] : dim_iter_specs) {
if (dim_spec.size() == 1 && dim_spec[0].count == 1) {
continue;
}
non_trivial_dim_iteration_specs[dim] = dim_spec;
}
return non_trivial_dim_iteration_specs;
}
}
const TensorIterationSpec::DimIterationSpec* TensorIterationSpec::Find(
const int dimension) const {
if (auto it = dim_iteration_specs_.find(dimension);
it != dim_iteration_specs_.end()) {
return &it->second;
}
return nullptr;
}
std::vector<int> TensorIterationSpec::GetDimensions() const {
std::vector<int> result;
result.reserve(dim_iteration_specs_.size());
for (const auto& [dim, _] : dim_iteration_specs_) {
result.push_back(dim);
}
return result;
}
bool TensorIterationSpec::IsPhysicallyEquivalent(
const TensorIterationSpec& other) const {
const absl::flat_hash_map<int, DimIterationSpec>
non_trivial_dim_iteration_specs = FilterTrivialDims(dim_iteration_specs_);
const absl::flat_hash_map<int, DimIterationSpec>
other_non_trivial_dim_iteration_specs =
FilterTrivialDims(other.dim_iteration_specs_);
if (non_trivial_dim_iteration_specs.size() !=
other_non_trivial_dim_iteration_specs.size()) {
return false;
}
for (const auto& pair : non_trivial_dim_iteration_specs) {
int dimension = pair.first;
const DimIterationSpec& dim_iter_spec = pair.second;
auto other_it = other_non_trivial_dim_iteration_specs.find(dimension);
if (other_it == other_non_trivial_dim_iteration_specs.end()) {
return false;
}
const DimIterationSpec& other_dim_iter_spec = other_it->second;
if (dim_iter_spec.size() != other_dim_iter_spec.size()) {
return false;
}
for (size_t i = 0; i < dim_iter_spec.size(); i++) {
if (!dim_iter_spec[i].IsPhysicallyEquivalent(other_dim_iter_spec[i])) {
return false;
}
}
}
return true;
}
std::string TensorIterationSpec::IterationSpecFragment::ToString() const {
return absl::StrCat("{stride=", stride, ", count=", count,
", slice_start=", slice_start,
", sliced_count=", sliced_count, ", subfragments=[",
absl::StrJoin(subfragments, ", "), "]}");
}
std::string TensorIterationSpec::ToString() const {
return absl::StrCat(
"{",
absl::StrJoin(dim_iteration_specs_, ", ",
[&](std::string* s, const auto& kv) {
absl::StrAppend(
s, kv.first, ": ", "[",
absl::StrJoin(kv.second, ", ",
[&](std::string* ss, const auto& v) {
absl::StrAppend(ss, v.ToString());
}),
"]");
}),
"}");
}
namespace triton_fusion {
using Fragment = DimensionOrder::Fragment;
using Fragments = DimensionOrder::Fragments;
using FragmentOrders = DimensionOrder::FragmentOrders;
DimensionOrder DimensionOrder::FromDotOperandOrOutput(
const HloInstruction& hlo, const int split_k_dimension_index) {
DimensionOrder dim_order;
dim_order.tensor_fragments_order_.reserve(hlo.shape().rank());
for (const int i : hlo.shape().layout().minor_to_major()) {
int target_dim_number = i;
if (i == split_k_dimension_index) {
CHECK(!dim_order.tensor_fragments_order_.empty())
<< "The split-K batch dimension has be preceded by the contracting "
"dimension it originates from by construction.";
target_dim_number =
dim_order.tensor_fragments_order_.back().dst_dim_number();
}
dim_order.dim_fragments_orders_[target_dim_number].push_back(
dim_order.tensor_fragments_order_.size());
dim_order.tensor_fragments_order_.push_back(
Fragment{target_dim_number, hlo.shape().dimensions(i)});
}
return dim_order;
}
std::string DimensionOrder::Fragment::ToString() const {
return absl::StrCat(dst_dim_number_, ":", count_, ":", slice_start_, "-",
sliced_count_);
}
std::string DimensionOrder::ToString() const {
std::string ret = absl::StrJoin(tensor_fragments_order_, " - ",
[](std::string* out, const Fragment& f) {
absl::StrAppend(out, f.ToString(), " ");
});
absl::StrAppend(&ret, "|");
for (const auto& [dim, fragments] : dim_fragments_orders_) {
absl::StrAppend(&ret, dim, ":", absl::StrJoin(fragments, ","), " ");
}
return ret;
}
TensorIterationSpec DimensionOrder::ToTensorIterationSpec() const {
const Fragments& dim_fragments = TensorFragmentsOrder();
TensorIterationSpec tensor_spec;
int64_t accumulated_stride = 1;
int last_dim = -1;
for (int dim_order_index = 0; dim_order_index < dim_fragments.size();
++dim_order_index) {
const DimensionOrder::Fragment& fragment = dim_fragments[dim_order_index];
VLOG(6) << fragment.ToString();
TensorIterationSpec::DimIterationSpec& dim_spec =
tensor_spec[fragment.dst_dim_number()];
if (last_dim == fragment.dst_dim_number()) {
if (!dim_spec.empty() && !dim_spec.back().subfragments.empty() &&
dim_spec.back().subfragments.back() == 1) {
dim_spec.back().subfragments.pop_back();
}
if (fragment.full_count() > 1) {
CHECK(!dim_spec.empty());
CHECK(!dim_spec.back().is_sliced())
<< "Only the major-most fragment can have an offset.";
dim_spec.back().slice_start =
fragment.slice_start() * dim_spec.back().count;
dim_spec.back().sliced_count =
fragment.sliced_count() * dim_spec.back().count;
dim_spec.back().count *= fragment.full_count();
dim_spec.back().subfragments.push_back(fragment.sliced_count());
}
} else {
dim_spec.push_back(TensorIterationSpec::IterationSpecFragment{
accumulated_stride,
fragment.full_count(),
fragment.slice_start(),
fragment.sliced_count(),
{fragment.sliced_count()}});
}
accumulated_stride *= fragment.full_count();
last_dim = fragment.dst_dim_number();
}
for (int dim_idx : tensor_spec.GetDimensions()) {
TensorIterationSpec::DimIterationSpec& dim_spec = tensor_spec[dim_idx];
if (dim_spec.size() <= 1) continue;
TensorIterationSpec::DimIterationSpec filtered_dim_spec;
absl::c_copy_if(dim_spec, std::back_inserter(filtered_dim_spec),
[](const TensorIterationSpec::IterationSpecFragment& f) {
return f.count != 1;
});
tensor_spec[dim_idx] = filtered_dim_spec;
}
tensor_spec.RemoveEmptyDimensions();
return tensor_spec;
}
namespace {
std::optional<int> LogicalIndexOfLabeledDimension(
const Shape& shape, const DimensionOrder& dim_order, const int label) {
auto fragment_it = dim_order.TensorFragmentsOrder().cbegin();
for (int dim : shape.layout().minor_to_major()) {
const int64_t dim_size = shape.dimensions()[dim];
int64_t fragments_size = 1;
while (fragments_size < dim_size) {
fragments_size *= fragment_it->full_count();
if (fragment_it->dst_dim_number() == label) {
return dim;
}
++fragment_it;
}
}
return std::nullopt;
}
using Int64OrError = std::variant<int64_t, FusionDecision>;
Int64OrError CombineSplitDimMajorPartSizeReqs(int64_t a, int64_t b) {
if (a == b || b == kNoSplitRequirement) {
return a;
}
if (a == kNoSplitRequirement) {
return b;
}
return FusionDecision("Conflicting splits of splittable dimension");
}
}
DotRequirementsOrError CombineDotRequirements(
DotRequirements a, DotRequirementsOrError b_or_error) {
if (std::holds_alternative<FusionDecision>(b_or_error)) {
return b_or_error;
}
const DotRequirements& b = std::get<DotRequirements>(b_or_error);
Int64OrError combined_size_req =
CombineSplitDimMajorPartSizeReqs(a.splittable_dimension_major_part_size,
b.splittable_dimension_major_part_size);
if (std::holds_alternative<FusionDecision>(combined_size_req)) {
return std::get<FusionDecision>(combined_size_req);
}
return DotRequirements(std::get<int64_t>(combined_size_req));
}
namespace {
DotRequirementsOrError GetRequirementsIfSupportedOrder(
const DimensionOrder& order, const DotProperties& properties) {
VLOG(8) << order.ToString();
int64_t split_dim_major_part = kNoSplitRequirement;
const Fragments& tensor_dim_fragments = order.TensorFragmentsOrder();
for (const auto& [dim_index, dim_fragments] : order.DimFragmentsOrders()) {
CHECK(!dim_fragments.empty());
for (int i = 0; i < dim_fragments.size() - 1; ++i) {
if (tensor_dim_fragments[dim_fragments[i]].is_sliced()) {
return "Sliced non-major-most fragment.";
}
}
int group_counter = 0;
int last_seen_group_last_fragment_index = -1;
auto fragment_it = dim_fragments.cbegin();
while (true) {
if (fragment_it == dim_fragments.cend()) {
break;
}
int64_t grouped_size = tensor_dim_fragments[*fragment_it].full_count();
while ((fragment_it + 1) != dim_fragments.cend() &&
*(fragment_it + 1) == *fragment_it + 1) {
++fragment_it;
grouped_size *= tensor_dim_fragments[*fragment_it].full_count();
}
if (grouped_size == 1) {
++fragment_it;
continue;
}
if (last_seen_group_last_fragment_index > *fragment_it) {
return "Transpose within a dimension.";
}
++group_counter;
if (group_counter > 1) {
const int splittable_dimension_index =
properties.splittable_dimension_index;
if (dim_index == splittable_dimension_index) {
if (group_counter == 2) {
if (split_dim_major_part != kNoSplitRequirement &&
split_dim_major_part != grouped_size) {
return "Conflicting splits of splittable dimension";
}
split_dim_major_part = grouped_size;
} else if (group_counter > 2) {
return "2nd split of a splittable dimension.";
}
} else {
return "Unsupported split of a dimension.";
}
}
last_seen_group_last_fragment_index = *fragment_it;
++fragment_it;
}
}
return DotRequirements(split_dim_major_part);
}
DotRequirementsOrError GetRequirementsIfSupportedOrders(
const HloInstruction& hlo, const DimOrderMap& dim_orders,
const DotProperties& properties) {
const DotRequirements empty_requirements(kNoSplitRequirement);
auto get_requirements =
[&](const HloInstruction& instr) -> DotRequirementsOrError {
if (auto it = dim_orders.find(&instr); it != dim_orders.end()) {
return GetRequirementsIfSupportedOrder(it->second, properties);
}
return empty_requirements;
};
DotRequirements requirements = empty_requirements;
for (const HloInstruction* operand : hlo.operands()) {
DotRequirementsOrError requirements_or_error =
CombineDotRequirements(requirements, get_requirements(*operand));
if (std::holds_alternative<FusionDecision>(requirements_or_error)) {
return requirements_or_error;
}
requirements = std::get<DotRequirements>(requirements_or_error);
}
return CombineDotRequirements(requirements, get_requirements(hlo));
}
DimOrderMap GetPropagatedDimOrdersForElementwise(
const HloInstruction& hlo, TransformDirection direction,
const DimensionOrder& src_dim_order) {
if (direction == TransformDirection::kOutputToInput) {
DimOrderMap map;
for (const HloInstruction* operand : hlo.operands()) {
map.insert({operand, src_dim_order});
}
return map;
}
return {{&hlo, src_dim_order}};
}
const HloInstruction& GetSourceHlo(const HloInstruction& hlo,
TransformDirection direction) {
CHECK_GE(hlo.operand_count(), 1);
if (direction == TransformDirection::kOutputToInput) {
return hlo;
}
return *hlo.operand(0);
}
using ConstInstructionVector = absl::InlinedVector<const HloInstruction*, 2>;
ConstInstructionVector GetDestHlos(const HloInstruction& hlo,
TransformDirection direction) {
if (direction == TransformDirection::kInputToOutput) {
return {&hlo};
}
ConstInstructionVector hlos;
hlos.reserve(hlo.operands().size());
for (const HloInstruction* operand : hlo.operands()) {
hlos.push_back(operand);
}
return hlos;
}
const HloInstruction& GetDestHlo(const HloInstruction& hlo,
TransformDirection direction) {
CHECK_EQ(hlo.operand_count(), 1);
if (direction == TransformDirection::kInputToOutput) {
return hlo;
}
return *hlo.operand(0);
}
DimOrderMapOrError GetPropagatedDimOrdersForBitcast(
const HloInstruction& hlo, const TransformDirection direction,
const DimensionOrder& src_dim_order, const DotProperties& properties) {
const HloInstruction& dst = GetDestHlo(hlo, direction);
const Shape& dst_shape = dst.shape();
const Fragments& src_fragments_order = src_dim_order.TensorFragmentsOrder();
DimOrderMap dst_dim_orders;
DimensionOrder& dst_dim_order =
dst_dim_orders.insert({&dst, DimensionOrder()}).first->second;
Fragments& dst_fragments_order = dst_dim_order.TensorFragmentsOrder();
int64_t dst_remaining_size = 1;
absl::flat_hash_map<const Fragment*, std::vector<int>> src_to_dst;
auto dst_dim_it = dst_shape.layout().minor_to_major().cbegin();
const auto dst_dim_end = dst_shape.layout().minor_to_major().cend();
for (auto src_dim = src_fragments_order.cbegin();
src_dim != src_fragments_order.cend(); ++src_dim) {
auto add_new_fragment = [&](const Fragment& fragment) {
dst_fragments_order.push_back(fragment);
src_to_dst[&*src_dim].push_back(dst_fragments_order.size() - 1);
};
if (dst_remaining_size >= src_dim->full_count()) {
if (dst_remaining_size % src_dim->full_count()) {
return "Unsupported bitcast";
}
add_new_fragment(*src_dim);
dst_remaining_size /= src_dim->full_count();
} else {
int64_t src_remaining_size = src_dim->full_count();
if (dst_remaining_size > 1) {
if (src_remaining_size % dst_remaining_size || (src_dim->is_sliced())) {
return "Unsupported bitcast";
}
add_new_fragment(
Fragment{src_dim->dst_dim_number(), dst_remaining_size});
src_remaining_size /= dst_remaining_size;
dst_remaining_size = 1;
}
while (src_remaining_size > 1) {
CHECK(dst_dim_it != dst_dim_end);
int64_t dst_dim_size = dst_shape.dimensions(*dst_dim_it);
int64_t new_fragment_size = dst_dim_size;
if (dst_dim_size > src_remaining_size) {
if (dst_dim_size % src_remaining_size) {
return "Unsupported bitcast";
}
dst_remaining_size = dst_dim_size / src_remaining_size;
new_fragment_size = src_remaining_size;
}
if (src_dim->is_sliced()) {
return "Unsupported bitcast";
}
add_new_fragment(
Fragment{src_dim->dst_dim_number(), new_fragment_size});
src_remaining_size /= new_fragment_size;
++dst_dim_it;
}
}
}
CHECK_EQ(dst_remaining_size, 1);
while (dst_dim_it != dst_dim_end) {
if (dst_shape.dimensions(*dst_dim_it) != 1) {
return "Unsupported bitcast";
}
if (!dst_fragments_order.empty()) {
dst_fragments_order.push_back(
Fragment{dst_fragments_order.back().dst_dim_number(), 1});
src_to_dst[&src_fragments_order.back()].push_back(
dst_fragments_order.size() - 1);
}
++dst_dim_it;
}
FragmentOrders& dst_dim_fragment_orders = dst_dim_order.DimFragmentsOrders();
for (const auto& [dim_index, dim_sequence] :
src_dim_order.DimFragmentsOrders()) {
std::vector<int>& dst = dst_dim_fragment_orders[dim_index];
dst.reserve(dim_sequence.size());
for (const int src : dim_sequence) {
std::copy(src_to_dst[&src_fragments_order[src]].cbegin(),
src_to_dst[&src_fragments_order[src]].cend(),
std::back_inserter(dst));
}
}
return dst_dim_orders;
}
DimOrderMapOrError GetPropagatedDimOrdersForDimAlteringOp(
const HloInstruction& hlo, const TransformDirection direction,
const DimensionOrder& src_dim_order, const DotProperties& properties) {
std::list<Fragment> new_fragments;
const HloInstruction& src = GetSourceHlo(hlo, direction);
Fragments src_fragments_order = src_dim_order.TensorFragmentsOrder();
if (hlo.opcode() == HloOpcode::kSlice &&
ShapeUtil::IsEffectiveScalar(hlo.shape())) {
return FusionDecision("Slice to scalar is not implemented yet.");
}
std::vector<std::vector<Fragment*>> src_physical;
src_physical.reserve(src.shape().rank());
if (src_fragments_order.size() < src.shape().rank()) { | #include "xla/service/gpu/triton_tiling_propagation.h"
#include <vector>
#include <gtest/gtest.h>
#include "xla/tests/hlo_test_base.h"
namespace xla::gpu {
namespace {
using TritonTilingPropagationTest = HloTestBase;
using triton_fusion::DimensionOrder;
DimensionOrder FromFragments(DimensionOrder::Fragments fragments) {
DimensionOrder dim_order;
DimensionOrder::Fragments& tensor_fragments_order =
dim_order.TensorFragmentsOrder();
DimensionOrder::FragmentOrders& dim_fragments_orders =
dim_order.DimFragmentsOrders();
for (const DimensionOrder::Fragment& fragment : fragments) {
tensor_fragments_order.push_back(fragment);
dim_fragments_orders[fragment.dst_dim_number()].push_back(
tensor_fragments_order.size());
}
return dim_order;
}
TEST_F(
TritonTilingPropagationTest,
DimensionOrdersRemainPhysicallyEquivalentAfterInsertingTrivialDimensions) {
DimensionOrder::Fragment fragment_1(0, 97);
DimensionOrder::Fragment fragment_2(0, 1);
DimensionOrder dimension_order_1 = FromFragments({fragment_1, fragment_2});
DimensionOrder::Fragment fragment_3(0, 97);
DimensionOrder::Fragment fragment_4(1, 1);
DimensionOrder dimension_order_2 = FromFragments({fragment_3, fragment_4});
EXPECT_TRUE(dimension_order_1.IsPhysicallyEquivalent(dimension_order_2));
}
TEST_F(
TritonTilingPropagationTest,
IterationSpecsRemainPhysicallyEquivalentAfterInsertingTrivialDimensions) {
TensorIterationSpec::IterationSpecFragment fragment_1 = {
1, 97, 0, 97,
{97}};
TensorIterationSpec spec_1;
spec_1[0].push_back(fragment_1);
TensorIterationSpec::IterationSpecFragment fragment_2 = {
1, 97, 0, 97,
{97}};
TensorIterationSpec::IterationSpecFragment fragment_3 = {
97, 1, 0, 1,
{1}};
TensorIterationSpec spec_2;
spec_2[0].push_back(fragment_2);
spec_2[1].push_back(fragment_3);
EXPECT_TRUE(spec_1.IsPhysicallyEquivalent(spec_2));
}
TEST_F(TritonTilingPropagationTest,
DimensionsShouldNotBeRemovedByToTensorIterationSpec) {
DimensionOrder::Fragment fragment_0(0, 97);
DimensionOrder::Fragment fragment_1(1, 1);
DimensionOrder dimension_order = FromFragments({fragment_0, fragment_1});
TensorIterationSpec spec = dimension_order.ToTensorIterationSpec();
const TensorIterationSpec::DimIterationSpec* dim_spec_0 = spec.Find(0);
EXPECT_NE(dim_spec_0, nullptr);
EXPECT_EQ(dim_spec_0->size(), 1);
EXPECT_EQ(dim_spec_0->at(0).count, 97);
const TensorIterationSpec::DimIterationSpec* dim_spec_1 = spec.Find(1);
EXPECT_NE(dim_spec_1, nullptr);
EXPECT_EQ(dim_spec_1->size(), 1);
EXPECT_EQ(dim_spec_1->at(0).count, 1);
}
}
} |
2,050 | cpp | tensorflow/tensorflow | reduction_degenerate_dim_remover | third_party/xla/xla/service/gpu/transforms/reduction_degenerate_dim_remover.cc | third_party/xla/xla/service/gpu/transforms/reduction_degenerate_dim_remover_test.cc | #ifndef XLA_SERVICE_GPU_REDUCTION_DEGENERATE_DIM_REMOVER_H_
#define XLA_SERVICE_GPU_REDUCTION_DEGENERATE_DIM_REMOVER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class ReductionDegenerateDimRemover : public HloModulePass {
public:
absl::string_view name() const override {
return "reduction-degenerate-dim-remover";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
}
#endif
#include "xla/service/gpu/reduction_degenerate_dim_remover.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
class ReductionDegenerateDimRemoverVisitor : public DfsHloRewriteVisitor {
public:
absl::Status HandleReduce(HloInstruction *hlo) override {
auto instr = Cast<HloReduceInstruction>(hlo);
absl::InlinedVector<HloInstruction *, 2> input_reshapes;
absl::InlinedVector<Shape, 2> canonical_reduce_shapes;
int idx = -1;
std::vector<int64_t> updated_reduced_dimensions;
for (HloInstruction *reduced_op : instr->inputs()) {
idx++;
const Shape &input_shape = reduced_op->shape();
const Shape &reduce_shape = instr->shape().IsTuple()
? instr->shape().tuple_shapes(idx)
: instr->shape();
if (!ShapeUtil::HasDegenerateDimensions(reduced_op->shape())) {
return absl::OkStatus();
}
Shape canonical_input_shape =
ShapeUtil::DropDegenerateDimensions(input_shape);
Shape canonical_reduce_shape =
ShapeUtil::DropDegenerateDimensions(reduce_shape);
auto reduced_dimensions = instr->dimensions();
int64_t shift = 0;
for (int dim = 0; dim < input_shape.rank(); dim++) {
if (input_shape.dimensions(dim) == 1) {
shift++;
} else {
if (absl::c_linear_search(reduced_dimensions, dim) && idx == 0) {
updated_reduced_dimensions.push_back(dim - shift);
}
}
}
if (updated_reduced_dimensions.empty()) {
std::unique_ptr<HloInstruction> reshape =
HloInstruction::CreateBitcast(reduce_shape, reduced_op);
return ReplaceWithNewInstruction(instr, std::move(reshape));
}
input_reshapes.push_back(instr->parent()->AddInstruction(
HloInstruction::CreateBitcast(canonical_input_shape, reduced_op)));
canonical_reduce_shapes.push_back(canonical_reduce_shape);
}
Shape canonical_reduce_shape =
ShapeUtil::MakeMaybeTupleShape(canonical_reduce_shapes);
const Shape &orig_reduce_shape = instr->shape();
std::unique_ptr<HloInstruction> new_reduce = HloInstruction::CreateReduce(
canonical_reduce_shape, input_reshapes, instr->init_values(),
updated_reduced_dimensions, instr->to_apply());
instr->SetupDerivedInstruction(new_reduce.get());
if (canonical_reduce_shape != instr->shape()) {
HloInstruction *wrapped_reduce =
instr->parent()->AddInstruction(std::move(new_reduce));
absl::InlinedVector<HloInstruction *, 2> out;
if (!canonical_reduce_shape.IsTuple()) {
new_reduce =
HloInstruction::CreateBitcast(orig_reduce_shape, wrapped_reduce);
} else {
for (int oidx = 0; oidx < instr->input_count(); oidx++) {
HloInstruction *gte = instr->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(wrapped_reduce, oidx));
out.push_back(
instr->parent()->AddInstruction(HloInstruction::CreateBitcast(
orig_reduce_shape.tuple_shapes(oidx), gte)));
}
new_reduce = HloInstruction::CreateTuple(out);
}
}
return ReplaceWithNewInstruction(instr, std::move(new_reduce));
}
};
absl::StatusOr<bool> ReductionDegenerateDimRemover::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
TF_ASSIGN_OR_RETURN(bool changed,
ReductionDegenerateDimRemoverVisitor().RunOnModule(
module, execution_threads));
return changed;
}
}
} | #include "xla/service/gpu/reduction_degenerate_dim_remover.h"
#include <optional>
#include "absl/strings/string_view.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class ReductionDegenerateDimRemoverTest : public HloTestBase {
public:
void CheckDegenerateDimRemover(absl::string_view hlo,
std::optional<absl::string_view> expected) {
RunAndFilecheckHloRewrite(hlo, gpu::ReductionDegenerateDimRemover{},
expected);
}
};
TEST_F(ReductionDegenerateDimRemoverTest, ReductionWithDegenerateDimensions) {
const char* hlo = R"(
HloModule ReduceWithDegenerateDimensions
add {
accum = f32[] parameter(0)
op = f32[] parameter(1)
ROOT out = f32[] add(accum, op)
}
ENTRY main {
input = f32[1,3,1,4,1,5,1] parameter(0)
zero = f32[] constant(0)
ROOT out = f32[1,1,1,1] reduce(input, zero), dimensions={1,3,5}, to_apply=add
}
)";
CheckDegenerateDimRemover(hlo, R"(
)");
}
TEST_F(ReductionDegenerateDimRemoverTest,
ReductionWithDegenerateDimensionsVariadic) {
const char* hlo = R"(
HloModule ReduceWithDegenerateDimensions
argmax {
running_max = f32[] parameter(0)
running_max_idx = u32[] parameter(1)
current_value = f32[] parameter(2)
current_value_idx = u32[] parameter(3)
current = (f32[], u32[]) tuple(running_max, running_max_idx)
potential = (f32[], u32[]) tuple(current_value, current_value_idx)
cmp_code = pred[] compare(current_value, running_max), direction=GT
new_max = f32[] select(cmp_code, current_value, running_max)
new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx)
ROOT out = (f32[], u32[]) tuple(new_max, new_idx)
}
ENTRY main {
input = f32[1,3,1,4,1,5,1] parameter(0)
idxs = u32[1,3,1,4,1,5,1] parameter(1)
zero = f32[] constant(0)
zero_idx = u32[] constant(0)
ROOT out = (f32[1,1,1,1], u32[1,1,1,1]) reduce(input, idxs, zero, zero_idx), dimensions={1,3,5}, to_apply=argmax
}
)";
CheckDegenerateDimRemover(hlo, R"(
)");
}
TEST_F(ReductionDegenerateDimRemoverTest, DegenerateWithEmptyDimension) {
const char* hlo = R"(
HloModule ReduceWithDegenerateDimensions
add {
accum = f32[] parameter(0)
op = f32[] parameter(1)
ROOT out = f32[] add(accum, op)
}
ENTRY main {
input = f32[1,3,1,4,1,5,1] parameter(0)
zero = f32[] constant(0)
ROOT out = f32[3,4,5,1] reduce(input, zero), dimensions={0,2,4}, to_apply=add
}
)";
CheckDegenerateDimRemover(hlo,
R"(
)");
}
}
} |
2,051 | cpp | tensorflow/tensorflow | gpu_windowed_einsum_handler | null | null | #ifndef XLA_SERVICE_GPU_GPU_WINDOWED_EINSUM_HANDLER_H_
#define XLA_SERVICE_GPU_GPU_WINDOWED_EINSUM_HANDLER_H_
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla::gpu {
class GpuWindowedEinsumHandler : public HloModulePass {
public:
absl::string_view name() const override {
return "gpu-windowed-einsum-handler";
}
struct WindowedEinsumAgLoops {
explicit WindowedEinsumAgLoops(HloInstruction* loop) : loop(loop) {}
HloInstruction* loop;
bool consumed = false;
};
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
constexpr static const char* kWindowedEinsumRsLoopName =
"windowed_dot_general_body_rs";
constexpr static const char* kWindowedEinsumAgLoopName =
"windowed_dot_general_body_ag";
private:
std::vector<WindowedEinsumAgLoops> all_ag_loops_;
};
}
#endif
#include "xla/service/gpu/gpu_windowed_einsum_handler.h"
#include <cstdint>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/literal_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/shape_inference.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
namespace m = match;
absl::Status ShiftDequantizationF8(const HloComputation* comp,
const std::array<HloInstruction*, 2>& gte) {
HloInstruction* while_instr = comp->WhileCallInstruction();
if (!while_instr) {
return absl::OkStatus();
}
HloInstruction* param_tuple = while_instr->mutable_operand(0);
std::array<HloInstruction*, 2> binaries, operands, scales;
for (int k = 0; k < 2; ++k) {
if (!Match(param_tuple->mutable_operand(k),
m::AnyOf<HloInstruction>(
m::Divide(&binaries[k], m::Convert(m::Op(&operands[k])),
m::Broadcast(m::Op(&scales[k]))),
m::MultiplyAnyOrder(&binaries[k],
m::Convert(m::Op(&operands[k])),
m::Broadcast(m::Op(&scales[k])))))) {
VLOG(5) << "Unable to identify FP8 dequantization pattern.";
return absl::OkStatus();
}
}
std::array<PrimitiveType, 2> operand_types{
operands[0]->shape().element_type(), operands[1]->shape().element_type()};
if (!((operand_types[0] == F8E4M3FN && operand_types[1] == F8E4M3FN) ||
(operand_types[0] == F8E4M3FN && operand_types[1] == F8E5M2) ||
(operand_types[0] == F8E5M2 && operand_types[1] == F8E4M3FN))) {
VLOG(5) << "Unsupported types.";
return absl::OkStatus();
}
for (int k = 0; k < 2; ++k) {
if (binaries[k]->shape().element_type() != BF16 &&
binaries[k]->shape().element_type() != F16 &&
binaries[k]->shape().element_type() != F32) {
VLOG(5) << "Unsupported types.";
return absl::OkStatus();
}
}
if (!ShapeUtil::IsScalar(scales[0]->shape()) ||
!ShapeUtil::IsScalar(scales[1]->shape())) {
VLOG(5) << "Scaling factors must be scalars.";
return absl::OkStatus();
}
HloComputation* while_body = while_instr->while_body();
HloComputation* while_condition = while_instr->while_condition();
HloInstruction* while_root = while_body->root_instruction();
std::array<HloInstruction*, 2> dots, dyn_slices{nullptr, nullptr},
coll_perms{nullptr, nullptr};
if (Match(
while_root,
m::Tuple(m::CollectivePermute(
&coll_perms[1], m::CollectivePermute(
&coll_perms[0], m::Op().Is(gte[0]))),
m::Op().Is(gte[1]),
m::DynamicUpdateSlice(
m::DynamicUpdateSlice().WithOperand(
1, m::Dot(&dots[0], m::Op().Is(gte[0]),
m::Op().Is(gte[1]))),
m::Dot(&dots[1], m::Op(), m::Op().Is(gte[1])), m::Op(),
m::Op(), m::Op()),
m::Op(), m::Op()))) {
VLOG(5) << "Identified all-gather windowed einsum pattern.";
} else if (Match(
while_root,
m::Tuple(m::Op().Is(gte[0]), m::Op().Is(gte[1]),
m::AddAnyOrder(
m::Dot(&dots[0], m::DynamicSlice(&dyn_slices[0]),
m::Op().Is(gte[1])),
m::Op()),
m::CollectivePermute(m::AddAnyOrder(
m::Dot(&dots[1], m::DynamicSlice(&dyn_slices[1]),
m::Op().Is(gte[1])),
m::Op())),
m::Op()))) {
VLOG(5) << "Identified reduce-scatter windowed einsum pattern.";
} else {
VLOG(5) << "Unable to identify valid windowed einsum pattern.";
return absl::OkStatus();
}
for (int k = 0; k < 2; ++k) {
TF_RETURN_IF_ERROR(
param_tuple->ReplaceOperandWithDifferentShape(k, operands[k]));
ShapeUtil::UpdateTupleShape(operands[k]->shape(), k,
param_tuple->mutable_shape());
param_tuple->AppendOperand(scales[k]);
ShapeUtil::AppendShapeToTuple(scales[k]->shape(),
param_tuple->mutable_shape());
}
for (HloComputation* while_comp : {while_body, while_condition}) {
while_comp->ReplaceParameter(
0, HloInstruction::CreateParameter(
0, param_tuple->shape(),
while_comp->parameter_instruction(0)->name()));
}
HloInstruction* body_param = while_body->parameter_instruction(0);
for (int k = 0; k < 2; ++k) {
TF_ASSIGN_OR_RETURN(HloInstruction * operand_f8,
MakeGetTupleElementHlo(body_param, k));
if (while_root->operand(k) == gte[k]) {
TF_RETURN_IF_ERROR(
while_root->ReplaceOperandWithDifferentShape(k, operand_f8));
ShapeUtil::UpdateTupleShape(operand_f8->shape(), k,
while_root->mutable_shape());
}
TF_ASSIGN_OR_RETURN(
HloInstruction * operand_scale,
MakeGetTupleElementHlo(
body_param, body_param->shape().tuple_shapes_size() - 2 + k));
while_root->AppendOperand(operand_scale);
ShapeUtil::AppendShapeToTuple(operand_scale->shape(),
while_root->mutable_shape());
HloInstruction* operand_f32 =
MakeConvertToHlo(operand_f8, gte[k]->shape().element_type());
HloInstruction* broadcast_scale =
MakeBroadcastHlo(operand_scale, {}, operand_f32->shape());
TF_ASSIGN_OR_RETURN(
HloInstruction * operand_scaled,
MakeBinaryHlo(binaries[k]->opcode(), operand_f32, broadcast_scale));
for (int l = 0; l < 2; ++l) {
if (dots[l]->operand(k) == gte[k]) {
TF_RETURN_IF_ERROR(dots[l]->ReplaceOperandWith(k, operand_scaled));
}
if (dyn_slices[l] && dyn_slices[l]->operand(0) == gte[k]) {
TF_RETURN_IF_ERROR(
dyn_slices[l]->ReplaceOperandWith(0, operand_scaled));
}
}
if (coll_perms[0] && coll_perms[0]->operand(0) == gte[k]) {
std::array<HloInstruction*, 2> coll_perms_f8{nullptr, nullptr};
coll_perms_f8[0] =
while_body->AddInstruction(coll_perms[0]->CloneWithNewOperands(
operand_f8->shape(), {operand_f8}));
coll_perms_f8[1] =
while_body->AddInstruction(coll_perms[1]->CloneWithNewOperands(
coll_perms_f8[0]->shape(), {coll_perms_f8[0]}));
HloInstruction* coll_perm0_f32 =
MakeConvertToHlo(coll_perms_f8[0], gte[k]->shape().element_type());
TF_ASSIGN_OR_RETURN(HloInstruction * x_scaled,
MakeBinaryHlo(binaries[k]->opcode(), coll_perm0_f32,
broadcast_scale));
TF_RETURN_IF_ERROR(dots[1]->ReplaceOperandWith(0, x_scaled));
TF_RETURN_IF_ERROR(
while_root->ReplaceOperandWithDifferentShape(0, coll_perms_f8[1]));
ShapeUtil::UpdateTupleShape(coll_perms_f8[1]->shape(), 0,
while_root->mutable_shape());
}
}
TF_RETURN_IF_ERROR(
while_instr->ReplaceAllUsesWithDifferentShape(while_instr->AddInstruction(
while_instr->CloneWithNewShape(while_root->shape()))));
TF_RETURN_IF_ERROR(while_instr->parent()->RemoveInstruction(while_instr));
if (coll_perms[0]) {
TF_RETURN_IF_ERROR(while_body->RemoveInstruction(coll_perms[1]));
TF_RETURN_IF_ERROR(while_body->RemoveInstruction(coll_perms[0]));
}
TF_RETURN_IF_ERROR(while_body->RemoveInstruction(gte[0]));
TF_RETURN_IF_ERROR(while_body->RemoveInstruction(gte[1]));
VLOG(5) << "FP8 dequantization moved into while loop.";
return absl::OkStatus();
}
int64_t NumberOfInstructionsInComp(const HloComputation* comp, HloOpcode op) {
int64_t total_count = 0;
for (const HloInstruction* inst : comp->instructions()) {
if (inst->opcode() == op) {
++total_count;
}
}
return total_count;
}
absl::Status UpdateDotAndConsumerConfig(HloInstruction* dot,
int64_t stream_id) {
auto dot_gpu_config = dot->backend_config<gpu::GpuBackendConfig>();
HloInstruction* updater = dot->users()[0];
auto updater_gpu_config = updater->backend_config<gpu::GpuBackendConfig>();
dot_gpu_config->set_operation_queue_id(stream_id);
updater_gpu_config->mutable_wait_on_operation_queues()->Add(stream_id);
TF_RETURN_IF_ERROR(dot->set_backend_config(dot_gpu_config.value()));
TF_RETURN_IF_ERROR(updater->set_backend_config(updater_gpu_config.value()));
return absl::OkStatus();
}
absl::Status SetForceDelayForInstruction(HloInstruction* instr,
bool force_delay) {
auto gpu_config = instr->backend_config<gpu::GpuBackendConfig>();
gpu_config->set_force_earliest_schedule(force_delay);
TF_RETURN_IF_ERROR(instr->set_backend_config(gpu_config.value()));
return absl::OkStatus();
}
absl::StatusOr<bool> HandleRsWindowedEinsumLoop(HloComputation* comp,
int64_t stream_id) {
bool changed = false;
if (NumberOfInstructionsInComp(comp, HloOpcode::kDot) <= 1) {
return changed;
}
for (auto inst : comp->MakeInstructionPostOrder()) {
HloInstruction* matched_dot;
std::array<HloInstruction*, 2> gte;
if (Match(inst,
m::Dot(&matched_dot,
m::DynamicSlice().WithOperand(
0, m::GetTupleElement(>e[0], m::Parameter(), 0)),
m::GetTupleElement(>e[1], m::Parameter(), 1)))) {
TF_RETURN_IF_ERROR(ShiftDequantizationF8(comp, gte));
TF_RETURN_IF_ERROR(UpdateDotAndConsumerConfig(matched_dot, stream_id));
++stream_id;
changed = true;
}
HloInstruction* matched_cp;
if (Match(inst, m::CollectivePermute(
&matched_cp, m::GetTupleElement(m::Parameter(), 2)))) {
TF_RETURN_IF_ERROR(
SetForceDelayForInstruction(matched_cp, true));
changed = true;
}
}
return changed;
}
absl::StatusOr<bool> HandleAgWindowedEinsumLoop(HloComputation* comp,
int64_t stream_id) {
bool changed = false;
if (NumberOfInstructionsInComp(comp, HloOpcode::kDot) <= 1) {
return changed;
}
for (auto inst : comp->MakeInstructionPostOrder()) {
HloInstruction* matched_dot;
std::array<HloInstruction*, 2> gte;
if (Match(inst, m::Dot(&matched_dot,
m::GetTupleElement(>e[0], m::Parameter(), 0),
m::GetTupleElement(>e[1], m::Parameter(), 1)))) {
TF_RETURN_IF_ERROR(ShiftDequantizationF8(comp, gte));
TF_RETURN_IF_ERROR(UpdateDotAndConsumerConfig(matched_dot, stream_id));
++stream_id;
TF_RETURN_IF_ERROR(
SetForceDelayForInstruction(matched_dot, true));
changed = true;
}
HloInstruction* matched_cp;
if (Match(inst, m::CollectivePermute(
&matched_cp, m::GetTupleElement(m::Parameter(), 0)))) {
TF_RETURN_IF_ERROR(
SetForceDelayForInstruction(matched_cp, true));
changed = true;
}
}
return changed;
}
static int64_t GetAgActivationCacheIndex(const HloInstruction* while_loop) {
const HloInstruction* loop_tuple = while_loop->operand(0);
const Shape& tuple_shape = loop_tuple->shape();
CHECK(tuple_shape.IsTuple());
return tuple_shape.tuple_shapes_size();
}
absl::Status ProcessWindowedEinsumLoopForActivationCaching(
GpuWindowedEinsumHandler::WindowedEinsumAgLoops& ag_loop,
HloInstruction* ag_with_shared_operand) {
HloInstruction* loop = ag_loop.loop;
HloComputation* while_body = loop->while_body();
HloInstruction* input_gte;
for (HloInstruction* gte : while_body->parameter_instruction(0)->users()) {
if (gte->tuple_index() == 0) {
input_gte = gte;
}
}
HloInstruction* root = while_body->root_instruction();
HloInstruction* input_tuple = while_body->parameter_instruction(0);
const Shape& input_shape = input_tuple->shape();
int64_t full_cache_buffer_index = GetAgActivationCacheIndex(loop);
std::vector<Shape> new_input_shapes(input_shape.tuple_shapes().begin(),
input_shape.tuple_shapes().end());
new_input_shapes.push_back(ag_with_shared_operand->shape());
Shape new_input_shape = ShapeUtil::MakeTupleShape(new_input_shapes);
*input_tuple->mutable_shape() = new_input_shape;
HloInstruction* full_buffer_output_gte =
while_body->AddInstruction(HloInstruction::CreateGetTupleElement(
ag_with_shared_operand->shape(), input_tuple,
full_cache_buffer_index));
HloComputation* cond_comp = loop->while_condition();
HloInstruction* cond_input_tuple = cond_comp->parameter_instruction(0);
*cond_input_tuple->mutable_shape() = new_input_shape;
HloInstruction* original_while_input = loop->mutable_operand(0);
HloComputation* parent_comp = loop->parent();
std::vector<HloInstruction*> new_operands(
original_while_input->operands().begin(),
original_while_input->operands().end());
new_operands.push_back(
parent_comp->AddInstruction(HloInstruction::CreateBroadcast(
ag_with_shared_operand->shape(),
parent_comp->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(new_input_shapes[0].element_type()))),
{})));
HloInstruction* new_while_input =
parent_comp->AddInstruction(HloInstruction::CreateTuple(new_operands));
TF_RETURN_IF_ERROR(
loop->ReplaceOperandWithDifferentShape(0, new_while_input));
TF_RETURN_IF_ERROR(parent_comp->ReplaceInstructionWithDifferentShape(
original_while_input, new_while_input));
*loop->mutable_shape() = new_input_shape;
HloInstruction* new_full_buffer_output = nullptr;
HloInstruction* dus_boundary_constant;
HloInstruction* first_cp_output;
for (HloInstruction* gte_user : input_gte->users()) {
if (gte_user->opcode() == HloOpcode::kCollectivePermute) {
first_cp_output = gte_user;
break;
}
}
for (HloInstruction* inst : while_body->MakeInstructionPostOrder()) {
HloInstruction* slice_indices;
if (Match(inst,
m::DynamicUpdateSlice(
m::GetTupleElement(m::Parameter()), m::Op(),
m::Constant(&dus_boundary_constant),
m::Reshape(m::DynamicSlice(&slice_indices, m::Op(), m::Op())),
m::Op()))) {
slice_indices = while_body->AddInstruction(HloInstruction::CreateReshape(
dus_boundary_constant->shape(), slice_indices));
VLOG(5) << "Created slice op for first slice: "
<< slice_indices->ToString();
full_buffer_output_gte =
while_body->AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
full_buffer_output_gte->shape(), full_buffer_output_gte,
input_gte,
{dus_boundary_constant, slice_indices, dus_boundary_constant}));
}
if (Match(inst,
m::DynamicUpdateSlice(
m::DynamicUpdateSlice(), m::Op(), m::Constant(),
m::Reshape(m::DynamicSlice(&slice_indices, m::Op(), m::Op())),
m::Op()))) {
slice_indices = while_body->AddInstruction(HloInstruction::CreateReshape(
dus_boundary_constant->shape(), slice_indices));
VLOG(5) << "Created slice op for second slice: "
<< slice_indices->ToString();
new_full_buffer_output =
while_body->AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
full_buffer_output_gte->shape(), full_buffer_output_gte,
first_cp_output,
{dus_boundary_constant, slice_indices, dus_boundary_constant}));
}
HloInstruction* slice_index;
HloInstruction* ds_index_constant;
HloInstruction* remainder;
HloInstruction* ds_param;
if (Match(inst, m::Dot(m::Op(), m::DynamicSlice(&ds_param))) &&
Match(ds_param->operand(0), m::GetTupleElement(m::Parameter(), 1))) {
for (int64_t ds_op_i = 1; ds_op_i < ds_param->operands().size();
ds_op_i++) {
if (!Match(
ds_param->mutable_operand(ds_op_i),
m::Reshape(&slice_index, m::DynamicSlice(m::Constant(),
m::Op(&remainder)))) &&
!Match(ds_param->mutable_operand(ds_op_i),
m::Constant(&ds_index_constant))) {
return absl::OkStatus();
}
}
if (Match(remainder,
m::Remainder(m::Add(m::GetTupleElement(), m::Op()), m::Op()))) {
full_buffer_output_gte =
while_body->AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
full_buffer_output_gte->shape(), full_buffer_output_gte,
input_gte,
{ds_index_constant, ds_index_constant, slice_index}));
}
if (Match(remainder,
m::Remainder(
m::Add(m::Add(m::GetTupleElement(), m::Op()), m::Op()),
m::Op()))) {
new_full_buffer_output =
while_body->AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
full_buffer_output_gte->shape(), full_buffer_output_gte,
first_cp_output,
{ds_index_constant, ds_index_constant, slice_index}));
}
}
}
std::vector<HloInstruction*> original_operands(root->operands().begin(),
root->operands().end());
original_operands.push_back(new_full_buffer_output);
HloInstruction* new_output_tuple = while_body->AddInstruction(
HloInstruction::CreateTuple(original_operands));
TF_RETURN_IF_ERROR(
while_body->ReplaceInstructionWithDifferentShape(root, new_output_tuple));
return absl::OkStatus();
}
bool HasReplicaGroups(const HloInstruction* inst) {
return inst->replica_groups().size() > 0;
}
bool ShouldAddToChain(const HloInstruction* inst) {
switch (inst->opcode()) {
case HloOpcode::kTranspose:
case HloOpcode::kReshape:
case HloOpcode::kCopy:
return inst->user_count() == 1;
default:
return false;
}
}
struct MatchedGemmA2aResult {
HloInstruction* producer_gemm;
HloInstruction* lhs;
HloInstruction* rhs;
HloInstruction* a2a_replacement = nullptr;
bool matched = false;
};
class WindowedEinsumVisitor : public DfsHloRewriteVisitor {
public:
explicit WindowedEinsumVisitor(
std::vector<GpuWindowedEinsumHandler::WindowedEinsumAgLoops>&
all_ag_loops)
: all_ag_loops_(all_ag_loops) {}
absl::StatusOr<bool> MatchA2aGemmWithIntermediateReshapes(
HloInstruction* dot, HloInstruction** lhs, HloInstruction** rhs) {
if (Match(dot, m::Dot(m::AllToAll(lhs).WithOneUse().WithPredicate(
HasReplicaGroups),
m::Op(rhs))) &&
!DynCast<HloAllToAllInstruction>((*lhs))->constrain_layout() &&
!(*lhs)->shape().IsTuple()) {
return true;
}
std::vector<HloInstruction*> allowed_intermediate_ops(
{dot->mutable_operand(0)});
HloAllToAllInstruction* matched_a2a = nullptr;
while (true) {
HloInstruction* curr = allowed_intermediate_ops.back();
if (ShouldAddToChain(curr)) {
allowed_intermediate_ops.insert(allowed_intermediate_ops.end(),
std::begin(curr->operands()),
std::end(curr->operands()));
} else if (curr->opcode() == HloOpcode::kAllToAll &&
curr->user_count() == 1) {
matched_a2a = DynCast<HloAllToAllInstruction>(curr);
allowed_intermediate_ops.pop_back();
break;
} else {
return false;
}
}
CHECK(matched_a2a != nullptr);
if (matched_a2a->constrain_layout() || matched_a2a->shape().IsTuple() ||
!HasReplicaGroups(matched_a2a) || !matched_a2a->split_dimension()) {
return false;
}
int64_t split_dimension = *matched_a2a->split_dimension();
for (int64_t i = allowed_intermediate_ops.size() - 1; i >= 0; i--) {
HloInstruction* current_op = allowed_intermediate_ops[i];
if (current_op->opcode() == HloOpcode::kReshape) {
std::vector<std::pair<int64_t, int64_t>> unmodified_dims =
ShapeUtil::DimensionsUnmodifiedByReshape(
current_op->operand(0)->shape(), current_op->shape());
auto it = absl::c_find_if(
unmodified_dims,
[&split_dimension](std::pair<int64_t, int64_t>& dim_pair) {
return dim_pair.first == split_dimension;
});
if (it == unmodified_dims.end()) {
VLOG(5) << "Split dimension of: " << matched_a2a->ToShortString()
<< " has been modified by reshapes. Skip process it for "
"decomposition.";
return false;
}
split_dimension = it->second;
} else if (current_op->opcode() == HloOpcode::kTranspose) {
const auto& transpose_dims = current_op->dimensions();
for (int64_t j = 0; j < transpose_dims.size(); j++) {
if ((int64_t)transpose_dims[j] == split_dimension) {
split_dimension = j;
break;
}
}
}
}
TF_RETURN_IF_ERROR(allowed_intermediate_ops.back()->ReplaceOperandWith(
0, matched_a2a->mutable_operand(0)));
HloInstruction* new_a2a =
matched_a2a->parent()->AddInstruction(HloInstruction::CreateAllToAll(
allowed_intermediate_ops.front()->shape(),
{allowed_intermediate_ops.front()}, matched_a2a->replica_groups(),
false, hlo_query::NextChannelId(*matched_a2a->GetModule()),
split_dimension));
TF_RETURN_IF_ERROR(dot->ReplaceOperandWith(0, new_a2a));
TF_RETURN_IF_ERROR(
matched_a2a->parent()->RemoveInstructionAndUnusedOperands(matched_a2a));
MarkAsChanged();
*lhs = new_a2a;
*rhs = dot->mutable_operand(1);
return true;
}
absl::Status HandleDot(HloInstruction* dot) override {
CHECK_EQ(dot->opcode(), HloOpcode::kDot);
HloComputation* comp = dot->parent();
for (GpuWindowedEinsumHandler::WindowedEinsumAgLoops ag_loop : | #include "xla/service/gpu/gpu_windowed_einsum_handler.h"
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
namespace m = ::xla::match;
using GpuWindowedEinsumHanlderTest = HloTestBase;
HloInstruction* FindInstructionByName(HloComputation* comp, std::string name) {
for (auto inst : comp->instructions()) {
if (inst->name() == name) {
return inst;
}
}
return nullptr;
}
TEST_F(GpuWindowedEinsumHanlderTest, AgLoopsHaveStreamIds) {
constexpr absl::string_view kHloString = R"(
HloModule pjit__unnamed_wrapped_function_, entry_computation_layout={(bf16[1,512,24576]{2,1,0}, bf16[24576,24576]{1,0})->bf16[2048,24576]{1,0}}, num_partitions=4
windowed_dot_general_body_ag.1 {
param = (bf16[512,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[2048,24576]{1,0}, bf16[2048,24576]{1,0}, u32[]) parameter(0)
get-tuple-element = bf16[512,24576]{1,0} get-tuple-element(param), index=0
collective-permute = bf16[512,24576]{1,0} collective-permute(get-tuple-element), channel_id=2, source_target_pairs={{0,3},{1,0},{2,1},{3,2}}, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[]}
get-tuple-element.1 = bf16[24576,24576]{1,0} get-tuple-element(param), index=1
get-tuple-element.2 = bf16[2048,24576]{1,0} get-tuple-element(param), index=2
dot.2 = bf16[512,24576]{1,0} dot(get-tuple-element, get-tuple-element.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[]}
constant.1 = s32[4]{0} constant({0, 512, 1024, 1536})
get-tuple-element.4 = u32[] get-tuple-element(param), index=4
partition-id = u32[] partition-id()
add = u32[] add(get-tuple-element.4, partition-id)
constant = u32[] constant(4)
remainder = u32[] remainder(add, constant)
dynamic-slice = s32[1]{0} dynamic-slice(constant.1, remainder), dynamic_slice_sizes={1}
reshape.4 = s32[] reshape(dynamic-slice)
constant.2 = s32[] constant(0)
dynamic-update-slice = bf16[2048,24576]{1,0} dynamic-update-slice(get-tuple-element.2, dot.2, reshape.4, constant.2), backend_config={"operation_queue_id":"0","wait_on_operation_queues":[]}
dot.3 = bf16[512,24576]{1,0} dot(collective-permute, get-tuple-element.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
constant.3 = u32[] constant(1)
add.1 = u32[] add(get-tuple-element.4, constant.3)
add.2 = u32[] add(add.1, partition-id)
remainder.1 = u32[] remainder(add.2, constant)
dynamic-slice.1 = s32[1]{0} dynamic-slice(constant.1, remainder.1), dynamic_slice_sizes={1}
reshape.5 = s32[] reshape(dynamic-slice.1)
dynamic-update-slice.1 = bf16[2048,24576]{1,0} dynamic-update-slice(dynamic-update-slice, dot.3, reshape.5, constant.2)
get-tuple-element.3 = bf16[2048,24576]{1,0} get-tuple-element(param), index=3
add.3 = u32[] add(add.1, constant.3)
ROOT tuple = (bf16[512,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[2048,24576]{1,0}, bf16[2048,24576]{1,0}, u32[]) tuple(collective-permute, get-tuple-element.1, dynamic-update-slice.1, get-tuple-element.3, add.3)
}
windowed_dot_general_cond_ag {
param.1 = (bf16[512,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[2048,24576]{1,0}, bf16[2048,24576]{1,0}, u32[]) parameter(0)
get-tuple-element.5 = u32[] get-tuple-element(param.1), index=4
constant.8 = u32[] constant(4)
ROOT compare = pred[] compare(get-tuple-element.5, constant.8), direction=LT
}
ENTRY test_main {
param.4 = bf16[1,512,24576]{2,1,0} parameter(0), sharding={devices=[1,4,1]<=[4]}
reshape.8 = bf16[512,24576]{1,0} reshape(param.4)
param.5 = bf16[24576,24576]{1,0} parameter(1), sharding={devices=[1,4]<=[4]}
constant.18 = bf16[] constant(0)
broadcast = bf16[2048,24576]{1,0} broadcast(constant.18), dimensions={}
constant.20 = u32[] constant(0)
tuple.2 = (bf16[512,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[2048,24576]{1,0}, bf16[2048,24576]{1,0}, u32[]) tuple(reshape.8, param.5, broadcast, broadcast, constant.20)
while = (bf16[512,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[2048,24576]{1,0}, bf16[2048,24576]{1,0}, u32[]) while(tuple.2), condition=windowed_dot_general_cond_ag, body=windowed_dot_general_body_ag.1
ROOT get-tuple-element.13 = bf16[2048,24576]{1,0} get-tuple-element(while), index=2
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
GpuWindowedEinsumHandler gpu_handler;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, gpu_handler.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* ag_loop =
module->entry_computation()->root_instruction()->mutable_operand(0);
HloComputation* ag_loop_body = ag_loop->while_body();
HloInstruction* inst = FindInstructionByName(ag_loop_body, "dot.2");
EXPECT_GT(inst->backend_config<GpuBackendConfig>()->operation_queue_id(), 0);
EXPECT_TRUE(
inst->backend_config<GpuBackendConfig>()->force_earliest_schedule());
HloInstruction* cp1 =
FindInstructionByName(ag_loop_body, "collective-permute");
EXPECT_TRUE(
cp1->backend_config<GpuBackendConfig>()->force_earliest_schedule());
}
TEST_F(GpuWindowedEinsumHanlderTest, RsLoopsHaveStreamIds) {
constexpr absl::string_view kHloString = R"(
HloModule pjit__unnamed_wrapped_function_, entry_computation_layout={(bf16[24576,24576]{1,0}, bf16[512,24576]{1,0}, bf16[2048,24576]{1,0})->bf16[512,24576]{1,0}}, num_partitions=4
windowed_dot_general_body_rs_clone.1 {
param.2 = (bf16[2048,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[512,24576]{1,0}, bf16[512,24576]{1,0}, u32[]) parameter(0)
get-tuple-element.6 = bf16[2048,24576]{1,0} get-tuple-element(param.2), index=0
get-tuple-element.7 = bf16[24576,24576]{1,0} get-tuple-element(param.2), index=1
get-tuple-element.9 = bf16[512,24576]{1,0} get-tuple-element(param.2), index=2
collective-permute.1 = bf16[512,24576]{1,0} collective-permute(get-tuple-element.9), channel_id=4, source_target_pairs={{0,2},{1,3},{2,0},{3,1}}, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[]}
constant.10 = s32[4]{0} constant({0, 512, 1024, 1536})
get-tuple-element.11 = u32[] get-tuple-element(param.2), index=4
constant.12 = u32[] constant(2)
add.8 = u32[] add(get-tuple-element.11, constant.12)
constant.13 = u32[] constant(1)
add.9 = u32[] add(add.8, constant.13)
partition-id.3 = u32[] partition-id()
add.10 = u32[] add(add.9, partition-id.3)
constant.9 = u32[] constant(4)
remainder.3 = u32[] remainder(add.10, constant.9)
dynamic-slice.4 = s32[1]{0} dynamic-slice(constant.10, remainder.3), dynamic_slice_sizes={1}
reshape.7 = s32[] reshape(dynamic-slice.4)
constant.11 = s32[] constant(0)
dynamic-slice.5 = bf16[512,24576]{1,0} dynamic-slice(get-tuple-element.6, reshape.7, constant.11), dynamic_slice_sizes={512,24576}
dot.7 = bf16[512,24576]{1,0} dot(dynamic-slice.5, get-tuple-element.7), lhs_contracting_dims={1}, rhs_contracting_dims={0}, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[]}
add.11 = bf16[512,24576]{1,0} add(collective-permute.1, dot.7), backend_config={"operation_queue_id":"0","wait_on_operation_queues":[]}
get-tuple-element.10 = bf16[512,24576]{1,0} get-tuple-element(param.2), index=3
add.6 = u32[] add(get-tuple-element.11, partition-id.3)
remainder.2 = u32[] remainder(add.6, constant.9)
dynamic-slice.2 = s32[1]{0} dynamic-slice(constant.10, remainder.2), dynamic_slice_sizes={1}
reshape.6 = s32[] reshape(dynamic-slice.2)
dynamic-slice.3 = bf16[512,24576]{1,0} dynamic-slice(get-tuple-element.6, reshape.6, constant.11), dynamic_slice_sizes={512,24576}
dot.5 = bf16[512,24576]{1,0} dot(dynamic-slice.3, get-tuple-element.7), lhs_contracting_dims={1}, rhs_contracting_dims={0}, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[]}
add.7 = bf16[512,24576]{1,0} add(get-tuple-element.10, dot.5), backend_config={"operation_queue_id":"0","wait_on_operation_queues":[]}
collective-permute.2 = bf16[512,24576]{1,0} collective-permute(add.7), channel_id=5, source_target_pairs={{0,2},{1,3},{2,0},{3,1}}
ROOT tuple.1 = (bf16[2048,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[512,24576]{1,0}, bf16[512,24576]{1,0}, u32[]) tuple(get-tuple-element.6, get-tuple-element.7, add.11, collective-permute.2, add.8)
}
windowed_dot_general_cond_rs {
param.3 = (bf16[2048,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[512,24576]{1,0}, bf16[512,24576]{1,0}, u32[]) parameter(0)
get-tuple-element.12 = u32[] get-tuple-element(param.3), index=4
constant.17 = u32[] constant(4)
ROOT compare.1 = pred[] compare(get-tuple-element.12, constant.17), direction=LT
}
ENTRY main.9_spmd {
param.6 = bf16[24576,24576]{1,0} parameter(0), sharding={devices=[4,1]<=[4]}
param.7 = bf16[512,24576]{1,0} parameter(1)
param.8 = bf16[2048,24576]{1,0} parameter(2)
constant.20 = u32[] constant(0)
tuple.3 = (bf16[2048,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[512,24576]{1,0}, bf16[512,24576]{1,0}, u32[]) tuple(param.8, param.6, param.7, param.7, constant.20)
while.1 = (bf16[2048,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[512,24576]{1,0}, bf16[512,24576]{1,0}, u32[]) while(tuple.3), condition=windowed_dot_general_cond_rs, body=windowed_dot_general_body_rs_clone.1
ROOT get-tuple-element.14 = bf16[512,24576]{1,0} get-tuple-element(while.1), index=2
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
GpuWindowedEinsumHandler gpu_handler;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, gpu_handler.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* rs_loop =
module->entry_computation()->root_instruction()->mutable_operand(0);
HloComputation* rs_loop_body = rs_loop->while_body();
HloInstruction* inst = FindInstructionByName(rs_loop_body, "dot.7");
EXPECT_TRUE(inst->backend_config<GpuBackendConfig>()->operation_queue_id() >
0);
HloInstruction* cp1 =
FindInstructionByName(rs_loop_body, "collective-permute.1");
EXPECT_TRUE(
cp1->backend_config<GpuBackendConfig>()->force_earliest_schedule());
}
TEST_F(GpuWindowedEinsumHanlderTest, AgLoopsMultipleConsumersAreChained) {
constexpr absl::string_view kHloString = R"(
HloModule pjit__unnamed_wrapped_function_, entry_computation_layout={(bf16[2,512,24576]{2,1,0}, bf16[24576,24576]{1,0}, bf16[24576,24576]{1,0})->bf16[2,2048,24576]{2,1,0}}, num_partitions=4
windowed_dot_general_body_ag {
param.1 = (bf16[2,512,24576]{2,1,0}, bf16[24576,24576]{1,0}, bf16[2,2048,24576]{2,1,0}, bf16[2,2048,24576]{2,1,0}, u32[]) parameter(0)
get-tuple-element.1 = bf16[2,512,24576]{2,1,0} get-tuple-element(param.1), index=0
collective-permute = bf16[2,512,24576]{2,1,0} collective-permute(get-tuple-element.1), channel_id=2, source_target_pairs={{0,3},{1,0},{2,1},{3,2}}
collective-permute.1 = bf16[2,512,24576]{2,1,0} collective-permute(collective-permute), channel_id=3, source_target_pairs={{0,3},{1,0},{2,1},{3,2}}
get-tuple-element.2 = bf16[24576,24576]{1,0} get-tuple-element(param.1), index=1
get-tuple-element.3 = bf16[2,2048,24576]{2,1,0} get-tuple-element(param.1), index=2
dot = bf16[2,512,24576]{2,1,0} dot(get-tuple-element.1, get-tuple-element.2), lhs_contracting_dims={2}, rhs_contracting_dims={0}
constant.2 = s32[] constant(0)
constant.3 = s32[4]{0} constant({0, 512, 1024, 1536})
get-tuple-element.5 = u32[] get-tuple-element(param.1), index=4
partition-id = u32[] partition-id()
add = u32[] add(get-tuple-element.5, partition-id)
constant.1 = u32[] constant(4)
remainder = u32[] remainder(add, constant.1)
dynamic-slice = s32[1]{0} dynamic-slice(constant.3, remainder), dynamic_slice_sizes={1}
reshape = s32[] reshape(dynamic-slice)
dynamic-update-slice = bf16[2,2048,24576]{2,1,0} dynamic-update-slice(get-tuple-element.3, dot, constant.2, reshape, constant.2)
dot.1 = bf16[2,512,24576]{2,1,0} dot(collective-permute, get-tuple-element.2), lhs_contracting_dims={2}, rhs_contracting_dims={0}
constant.5 = u32[] constant(1)
add.1 = u32[] add(get-tuple-element.5, constant.5)
add.2 = u32[] add(add.1, partition-id)
remainder.1 = u32[] remainder(add.2, constant.1)
dynamic-slice.1 = s32[1]{0} dynamic-slice(constant.3, remainder.1), dynamic_slice_sizes={1}
reshape.1 = s32[] reshape(dynamic-slice.1)
dynamic-update-slice.1 = bf16[2,2048,24576]{2,1,0} dynamic-update-slice(dynamic-update-slice, dot.1, constant.2, reshape.1, constant.2)
get-tuple-element.4 = bf16[2,2048,24576]{2,1,0} get-tuple-element(param.1), index=3
add.3 = u32[] add(add.1, constant.5)
ROOT tuple = (bf16[2,512,24576]{2,1,0}, bf16[24576,24576]{1,0}, bf16[2,2048,24576]{2,1,0}, bf16[2,2048,24576]{2,1,0}, u32[]) tuple(collective-permute.1, get-tuple-element.2, dynamic-update-slice.1, get-tuple-element.4, add.3)
}
windowed_dot_general_cond_ag {
param = (bf16[2,512,24576]{2,1,0}, bf16[24576,24576]{1,0}, bf16[2,2048,24576]{2,1,0}, bf16[2,2048,24576]{2,1,0}, u32[]) parameter(0)
get-tuple-element = u32[] get-tuple-element(param), index=4
constant = u32[] constant(4)
ROOT compare = pred[] compare(get-tuple-element, constant), direction=LT
}
ENTRY main.12_spmd {
param.4 = bf16[2,512,24576]{2,1,0} parameter(0), sharding={devices=[1,4,1]<=[4]}
param.5 = bf16[24576,24576]{1,0} parameter(1), sharding={devices=[1,4]<=[4]}
constant.22 = bf16[] constant(0)
broadcast = bf16[2,2048,24576]{2,1,0} broadcast(constant.22), dimensions={}
constant.24 = u32[] constant(0)
tuple.2 = (bf16[2,512,24576]{2,1,0}, bf16[24576,24576]{1,0}, bf16[2,2048,24576]{2,1,0}, bf16[2,2048,24576]{2,1,0}, u32[]) tuple(param.4, param.5, broadcast, broadcast, constant.24)
while = (bf16[2,512,24576]{2,1,0}, bf16[24576,24576]{1,0}, bf16[2,2048,24576]{2,1,0}, bf16[2,2048,24576]{2,1,0}, u32[]) while(tuple.2), condition=windowed_dot_general_cond_ag, body=windowed_dot_general_body_ag
get-tuple-element.13 = bf16[2,2048,24576]{2,1,0} get-tuple-element(while), index=2
copy.1 = bf16[2,2048,24576]{2,1,0} copy(get-tuple-element.13)
all-gather = bf16[2,2048,24576]{2,1,0} all-gather(param.4), channel_id=1, replica_groups={{0,1,2,3}}, dimensions={1}, use_global_device_ids=true
param.6 = bf16[24576,24576]{1,0} parameter(2), sharding={devices=[1,4]<=[4]}
ROOT dot.7 = bf16[2,2048,24576]{2,1,0} dot(all-gather, param.6), lhs_contracting_dims={2}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
GpuWindowedEinsumHandler gpu_handler;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, gpu_handler.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* ag_loop =
FindInstructionByName(module->entry_computation(), "while");
HloInstruction* inst =
FindInstructionByName(module->entry_computation(), "dot.7");
EXPECT_EQ(inst->operand(0)->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(inst->operand(0)->tuple_index(), 5);
EXPECT_EQ(inst->operand(0)->operand(0), ag_loop);
HloInstruction* ag_while_root = ag_loop->while_body()->root_instruction();
EXPECT_THAT(ag_while_root,
GmockMatch(m::Tuple(
m::Op(), m::Op(), m::Op(), m::Op(), m::Op(),
m::DynamicUpdateSlice(
m::DynamicUpdateSlice(
m::GetTupleElement(m::Parameter())
.WithPredicate([](const HloInstruction* instr) {
return instr->tuple_index() == 5;
}),
m::Op(), m::Op(), m::Op(), m::Op()),
m::Op(), m::Op(), m::Op(), m::Op()))));
}
TEST_F(GpuWindowedEinsumHanlderTest, A2aGemmHaveStreamIds) {
constexpr absl::string_view kHloString = R"(
HloModule pjit__unnamed_wrapped_function_, entry_computation_layout={(bf16[1,8192,32768]{2,1,0}, bf16[1,4,2048,8192]{3,2,1,0})->bf16[1,4,2048,32768]{3,2,1,0}}, num_partitions=8
ENTRY main.9_spmd {
param0 = bf16[1,8192,32768]{2,1,0} parameter(0)
param1 = bf16[1,4,2048,8192]{3,2,1,0} parameter(1)
all-to-all = bf16[1,4,2048,8192]{3,2,1,0} all-to-all(param1), channel_id=4, replica_groups={{0,1,2,3},{4,5,6,7}}, dimensions={1}
ROOT dot.12 = bf16[1,4,2048,32768]{3,2,1,0} dot(all-to-all, param0), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}
}
)";
const char* kExpected = R"(
CHECK: ENTRY
CHECK-DAG: %[[P1:.*]] = bf16[1,4,2048,8192]{3,2,1,0} parameter(1)
CHECK-DAG: %[[SLICE0:.*]] = bf16[1,4,2048,2048]{3,2,1,0} slice(bf16[1,4,2048,8192]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [6144:8192]}
CHECK: %[[A2A0:.*]] = bf16[1,4,2048,2048]{3,2,1,0} all-to-all(bf16[1,4,2048,2048]{3,2,1,0} %[[SLICE0]]),
CHECK: replica_groups={
CHECK: {0,1,2,3},{4,5,6,7}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[P0:.*]] = bf16[1,8192,32768]{2,1,0} parameter(0)
CHECK-DAG: %[[SLICE4:.*]] = bf16[1,2048,32768]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [6144:8192], [0:32768]}
CHECK-DAG: %[[DOT0:.*]] = bf16[1,4,2048,32768]{3,2,1,0} dot(bf16[1,4,2048,2048]{3,2,1,0} %[[A2A0:.*]], bf16[1,2048,32768]{2,1,0} %[[SLICE4:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"8","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK-DAG: %[[SLICE1:.*]] = bf16[1,4,2048,2048]{3,2,1,0} slice(bf16[1,4,2048,8192]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [4096:6144]}
CHECK: %[[A2A1:.*]] = bf16[1,4,2048,2048]{3,2,1,0} all-to-all(bf16[1,4,2048,2048]{3,2,1,0} %[[SLICE1]]),
CHECK: replica_groups={
CHECK: {0,1,2,3},{4,5,6,7}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[SLICE5:.*]] = bf16[1,2048,32768]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [4096:6144], [0:32768]}
CHECK-DAG: %[[DOT1:.*]] = bf16[1,4,2048,32768]{3,2,1,0} dot(bf16[1,4,2048,2048]{3,2,1,0} %[[A2A1:.*]], bf16[1,2048,32768]{2,1,0} %[[SLICE5:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"7","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK-DAG: %[[SLICE2:.*]] = bf16[1,4,2048,2048]{3,2,1,0} slice(bf16[1,4,2048,8192]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [2048:4096]}
CHECK: %[[A2A2:.*]] = bf16[1,4,2048,2048]{3,2,1,0} all-to-all(bf16[1,4,2048,2048]{3,2,1,0} %[[SLICE2]]),
CHECK: replica_groups={
CHECK: {0,1,2,3},{4,5,6,7}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[SLICE6:.*]] = bf16[1,2048,32768]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [2048:4096], [0:32768]}
CHECK-DAG: %[[DOT2:.*]] = bf16[1,4,2048,32768]{3,2,1,0} dot(bf16[1,4,2048,2048]{3,2,1,0} %[[A2A2:.*]], bf16[1,2048,32768]{2,1,0} %[[SLICE6:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"6","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK-DAG: %[[SLICE3:.*]] = bf16[1,4,2048,2048]{3,2,1,0} slice(bf16[1,4,2048,8192]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [0:2048]}
CHECK: %[[A2A2:.*]] = bf16[1,4,2048,2048]{3,2,1,0} all-to-all(bf16[1,4,2048,2048]{3,2,1,0} %[[SLICE3]]),
CHECK: replica_groups={
CHECK: {0,1,2,3},{4,5,6,7}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[SLICE7:.*]] = bf16[1,2048,32768]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [0:2048], [0:32768]}
CHECK-DAG: %[[DOT3:.*]] = bf16[1,4,2048,32768]{3,2,1,0} dot(bf16[1,4,2048,2048]{3,2,1,0} %[[A2A3:.*]], bf16[1,2048,32768]{2,1,0} %[[SLICE7:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"5","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK-DAG: %[[CONSTANT:.*]] = bf16[] constant(0)
CHECK-DAG: %[[BROADCAST:.*]] = bf16[1,4,2048,32768]{3,2,1,0} broadcast(bf16[] %[[CONSTANT:.*]]), dimensions={}
CHECK-DAG: %[[ADD0:.*]] = bf16[1,4,2048,32768]{3,2,1,0} add(bf16[1,4,2048,32768]{3,2,1,0} %[[DOT0:.*]], bf16[1,4,2048,32768]{3,2,1,0} %[[BROADCAST:.*]]), backend_config={"operation_queue_id":"0","wait_on_operation_queues":["5"],"force_earliest_schedule":false}
CHECK-DAG: %[[ADD1:.*]] = bf16[1,4,2048,32768]{3,2,1,0} add(bf16[1,4,2048,32768]{3,2,1,0} %[[DOT1:.*]], bf16[1,4,2048,32768]{3,2,1,0} %[[ADD0:.*]]), backend_config={"operation_queue_id":"0","wait_on_operation_queues":["6"],"force_earliest_schedule":false}
CHECK-DAG: %[[ADD2:.*]] = bf16[1,4,2048,32768]{3,2,1,0} add(bf16[1,4,2048,32768]{3,2,1,0} %[[DOT2:.*]], bf16[1,4,2048,32768]{3,2,1,0} %[[ADD1:.*]]), backend_config={"operation_queue_id":"0","wait_on_operation_queues":["7"],"force_earliest_schedule":false}
CHECK: ROOT {{.*}} = bf16[1,4,2048,32768]{3,2,1,0} add(bf16[1,4,2048,32768]{3,2,1,0} %[[DOT3:.*]], bf16[1,4,2048,32768]{3,2,1,0} %[[ADD2:.*]])
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
GpuWindowedEinsumHandler gpu_handler;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, gpu_handler.Run(module.get()));
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_matched,
RunFileCheck(module->ToString(), kExpected));
EXPECT_TRUE(filecheck_matched);
}
TEST_F(GpuWindowedEinsumHanlderTest, GemmA2aHaveStreamIds) {
constexpr absl::string_view kHloString = R"(
HloModule pjit__unnamed_wrapped_function_, entry_computation_layout={(bf16[1,8192,32768]{2,1,0}, bf16[1,4,2048,32768]{3,2,1,0})->bf16[1,4,2048,8192]{3,2,1,0}}, num_partitions=4
ENTRY main.9_spmd {
param.9 = bf16[1,8192,32768]{2,1,0} parameter(0)
param.10 = bf16[1,4,2048,32768]{3,2,1,0} parameter(1)
dot.12 = bf16[1,4,2048,8192]{3,2,1,0} dot(param.10, param.9), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={2}
ROOT all-to-all = bf16[1,4,2048,8192]{3,2,1,0} all-to-all(dot.12), channel_id=4, replica_groups={{0,1,2,3}}, dimensions={1}
}
)";
const char* kExpected = R"(
CHECK: ENTRY
CHECK-DAG: %[[P1:.*]] = bf16[1,4,2048,32768]{3,2,1,0} parameter(1)
CHECK-DAG: %[[SLICE0:.*]] = bf16[1,4,2048,8192]{3,2,1,0} slice(bf16[1,4,2048,32768]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [24576:32768]}
CHECK-DAG: %[[P0:.*]] = bf16[1,8192,32768]{2,1,0} parameter(0)
CHECK-DAG: %[[SLICE4:.*]] = bf16[1,8192,8192]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [0:8192], [24576:32768]}
CHECK-DAG: %[[DOT0:.*]] = bf16[1,4,2048,8192]{3,2,1,0} dot(bf16[1,4,2048,8192]{3,2,1,0} %[[SLICE0:.*]], bf16[1,8192,8192]{2,1,0} %[[SLICE4:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={2}, backend_config={"operation_queue_id":"8","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK: %[[A2A0:.*]] = bf16[1,4,2048,8192]{3,2,1,0} all-to-all(bf16[1,4,2048,8192]{3,2,1,0} %[[DOT0:.*]]),
CHECK: replica_groups={
CHECK: {0,1,2,3}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[SLICE1:.*]] = bf16[1,4,2048,8192]{3,2,1,0} slice(bf16[1,4,2048,32768]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [16384:24576]}
CHECK-DAG: %[[SLICE5:.*]] = bf16[1,8192,8192]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [0:8192], [16384:24576]}
CHECK-DAG: %[[DOT1:.*]] = bf16[1,4,2048,8192]{3,2,1,0} dot(bf16[1,4,2048,8192]{3,2,1,0} %[[SLICE1:.*]], bf16[1,8192,8192]{2,1,0} %[[SLICE5:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={2}, backend_config={"operation_queue_id":"7","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK: %[[A2A1:.*]] = bf16[1,4,2048,8192]{3,2,1,0} all-to-all(bf16[1,4,2048,8192]{3,2,1,0} %[[DOT1:.*]]),
CHECK: replica_groups={
CHECK: {0,1,2,3}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[SLICE2:.*]] = bf16[1,4,2048,8192]{3,2,1,0} slice(bf16[1,4,2048,32768]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [8192:16384]}
CHECK-DAG: %[[SLICE6:.*]] = bf16[1,8192,8192]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [0:8192], [8192:16384]}
CHECK-DAG: %[[DOT2:.*]] = bf16[1,4,2048,8192]{3,2,1,0} dot(bf16[1,4,2048,8192]{3,2,1,0} %[[SLICE2:.*]], bf16[1,8192,8192]{2,1,0} %[[SLICE6:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={2}, backend_config={"operation_queue_id":"6","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK: %[[A2A2:.*]] = bf16[1,4,2048,8192]{3,2,1,0} all-to-all(bf16[1,4,2048,8192]{3,2,1,0} %[[DOT2:.*]]),
CHECK: replica_groups={
CHECK: {0,1,2,3}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[SLICE3:.*]] = bf16[1,4,2048,8192]{3,2,1,0} slice(bf16[1,4,2048,32768]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [0:8192]}
CHECK-DAG: %[[SLICE7:.*]] = bf16[1,8192,8192]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [0:8192], [0:8192]}
CHECK-DAG: %[[DOT3:.*]] = bf16[1,4,2048,8192]{3,2,1,0} dot(bf16[1,4,2048,8192]{3,2,1,0} %[[SLICE3:.*]], bf16[1,8192,8192]{2,1,0} %[[SLICE7:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={2}, backend_config={"operation_queue_id":"5","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK: %[[A2A2:.*]] = bf16[1,4,2048,8192]{3,2,1,0} all-to-all(bf16[1,4,2048,8192]{3,2,1,0} %[[DOT3:.*]]),
CHECK: replica_groups={
CHECK: {0,1,2,3}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[CONSTANT:.*]] = bf16[] constant(0)
CHECK-DAG: %[[BROADCAST:.*]] = bf16[1,4,2048,8192]{3,2,1,0} broadcast(bf16[] %[[CONSTANT:.*]]), dimensions={}
CHECK-DAG: %[[ADD0:.*]] = bf16[1,4,2048,8192]{3,2,1,0} add(bf16[1,4,2048,8192]{3,2,1,0} %[[A2A0:.*]], bf16[1,4,2048,8192]{3,2,1,0} %[[BROADCAST:.*]])
CHECK-DAG: %[[ADD1:.*]] = bf16[1,4,2048,8192]{3,2,1,0} add(bf16[1,4,2048,8192]{3,2,1,0} %[[A2A1:.*]], bf16[1,4,2048,8192]{3,2,1,0} %[[ADD0:.*]])
CHECK-DAG: %[[ADD2:.*]] = bf16[1,4,2048,8192]{3,2,1,0} add(bf16[1,4,2048,8192]{3,2,1,0} %[[A2A2:.*]], bf16[1,4,2048,8192]{3,2,1,0} %[[ADD1:.*]])
CHECK: ROOT {{.*}} = bf16[1,4,2048,8192]{3,2,1,0} add(bf16[1,4,2048,8192]{3,2,1,0} %[[A2A3:.*]], bf16[1,4,2048,8192]{3,2,1,0} %[[ADD2:.*]])
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
GpuWindowedEinsumHandler gpu_handler;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, gpu_handler.Run(module.get()));
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_matched,
RunFileCheck(module->ToString(), kExpected));
EXPECT_TRUE(filecheck_matched);
}
TEST_F(GpuWindowedEinsumHanlderTest, A2aTransposeLoopsHaveStreamIds) {
constexpr absl::string_view kHloString = R"(
HloModule pjit__unnamed_wrapped_function_, entry_computation_layout={(bf16[1,8192,32768]{2,1,0}, bf16[1,1,8192,4,1,2048]{5,4,3,2,1,0})->bf16[1,4,2048,32768]{3,2,1,0}}, num_partitions=4
ENTRY main.9_spmd {
param.9 = bf16[1,8192,32768]{2,1,0} parameter(0)
param.10 = bf16[1,1,8192,4,1,2048]{5,4,3,2,1,0} parameter(1)
all-to-all = bf16[1,1,8192,4,1,2048]{5,4,3,2,1,0} all-to-all(param.10), channel_id=4, replica_groups={{0,1,2,3}}, dimensions={3}
transpose.15 = bf16[1,4,1,8192,1,2048]{5,4,1,3,2,0} transpose(all-to-all), dimensions={0,3,1,2,4,5}
reshape.2170 = bf16[1,4,8192,1,2048]{4,3,2,1,0} reshape(transpose.15)
reshape.2173 = bf16[4,8192,1,2048]{3,2,1,0} reshape(reshape.2170)
transpose.16 = bf16[1,4,2048,8192]{2,0,3,1} transpose(reshape.2173), dimensions={2,0,3,1}
copy.53 = bf16[1,4,2048,8192]{3,2,1,0} copy(transpose.16)
ROOT dot.12 = bf16[1,4,2048,32768]{3,2,1,0} dot(copy.53, param.9), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}
}
)";
const char* kExpected = R"(
CHECK: ENTRY
CHECK-DAG: %[[P1:.*]] = bf16[1,1,8192,4,1,2048]{5,4,3,2,1,0} parameter(1)
CHECK-DAG: %[[TRANSPOSE0:.*]] = bf16[1,4,1,8192,1,2048]{5,4,1,3,2,0} transpose(bf16[1,1,8192,4,1,2048]{5,4,3,2,1,0} %[[P1:.*]]), dimensions={0,3,1,2,4,5}
CHECK-DAG: %[[RESHAPE0:.*]] = bf16[1,4,8192,1,2048]{4,3,2,1,0} reshape(bf16[1,4,1,8192,1,2048]{5,4,1,3,2,0} %[[TRANSPOSE0:.*]])
CHECK-DAG: %[[RESHAPE1:.*]] = bf16[4,8192,1,2048]{3,2,1,0} reshape(bf16[1,4,8192,1,2048]{4,3,2,1,0} %[[RESHAPE0:.*]])
CHECK-DAG: %[[TRANSPOSE1:.*]] = bf16[1,4,2048,8192]{2,0,3,1} transpose(bf16[4,8192,1,2048]{3,2,1,0} %[[RESHAPE1:.*]]), dimensions={2,0,3,1}
CHECK-DAG: %[[COPY:.*]] = bf16[1,4,2048,8192]{3,2,1,0} copy(bf16[1,4,2048,8192]{2,0,3,1} %[[TRANSPOSE1:.*]])
CHECK-DAG: %[[SLICE0:.*]] = bf16[1,4,2048,2048]{3,2,1,0} slice(bf16[1,4,2048,8192]{3,2,1,0} %[[COPY:.*]]), slice={[0:1], [0:4], [0:2048], [6144:8192]}
CHECK: %[[A2A0:.*]] = bf16[1,4,2048,2048]{3,2,1,0} all-to-all(bf16[1,4,2048,2048]{3,2,1,0} %[[SLICE0]]),
CHECK: replica_groups={
CHECK: {0,1,2,3}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[P0:.*]] = bf16[1,8192,32768]{2,1,0} parameter(0)
CHECK-DAG: %[[SLICE4:.*]] = bf16[1,2048,32768]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [6144:8192], [0:32768]}
CHECK-DAG: %[[DOT0:.*]] = bf16[1,4,2048,32768]{3,2,1,0} dot(bf16[1,4,2048,2048]{3,2,1,0} %[[A2A0:.*]], bf16[1,2048,32768]{2,1,0} %[[SLICE4:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"9","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK-DAG: %[[SLICE1:.*]] = bf16[1,4,2048,2048]{3,2,1,0} slice(bf16[1,4,2048,8192]{3,2,1,0} %[[COPY:.*]]), slice={[0:1], [0:4], [0:2048], [4096:6144]}
CHECK: %[[A2A1:.*]] = bf16[1,4,2048,2048]{3,2,1,0} all-to-all(bf16[1,4,2048,2048]{3,2,1,0} %[[SLICE1]]),
CHECK: replica_groups={
CHECK: {0,1,2,3}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[SLICE5:.*]] = bf16[1,2048,32768]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [4096:6144], [0:32768]}
CHECK-DAG: %[[DOT1:.*]] = bf16[1,4,2048,32768]{3,2,1,0} dot(bf16[1,4,2048,2048]{3,2,1,0} %[[A2A1:.*]], bf16[1,2048,32768]{2,1,0} %[[SLICE5:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"8","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK-DAG: %[[SLICE2:.*]] = bf16[1,4,2048,2048]{3,2,1,0} slice(bf16[1,4,2048,8192]{3,2,1,0} %[[COPY:.*]]), slice={[0:1], [0:4], [0:2048], [2048:4096]}
CHECK: %[[A2A2:.*]] = bf16[1,4,2048,2048]{3,2,1,0} all-to-all(bf16[1,4,2048,2048]{3,2,1,0} %[[SLICE2]]),
CHECK: replica_groups={
CHECK: {0,1,2,3}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[SLICE6:.*]] = bf16[1,2048,32768]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [2048:4096], [0:32768]}
CHECK-DAG: %[[DOT2:.*]] = bf16[1,4,2048,32768]{3,2,1,0} dot(bf16[1,4,2048,2048]{3,2,1,0} %[[A2A2:.*]], bf16[1,2048,32768]{2,1,0} %[[SLICE6:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"7","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK-DAG: %[[SLICE3:.*]] = bf16[1,4,2048,2048]{3,2,1,0} slice(bf16[1,4,2048,8192]{3,2,1,0} %[[COPY:.*]]), slice={[0:1], [0:4], [0:2048], [0:2048]}
CHECK: %[[A2A2:.*]] = bf16[1,4,2048,2048]{3,2,1,0} all-to-all(bf16[1,4,2048,2048]{3,2,1,0} %[[SLICE3]]),
CHECK: replica_groups={
CHECK: {0,1,2,3}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[SLICE7:.*]] = bf16[1,2048,32768]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [0:2048], [0:32768]}
CHECK-DAG: %[[DOT3:.*]] = bf16[1,4,2048,32768]{3,2,1,0} dot(bf16[1,4,2048,2048]{3,2,1,0} %[[A2A3:.*]], bf16[1,2048,32768]{2,1,0} %[[SLICE7:.*]]), |
2,052 | cpp | tensorflow/tensorflow | cudnn_norm_rewriter | third_party/xla/xla/service/gpu/transforms/cudnn_norm_rewriter.cc | third_party/xla/xla/service/gpu/transforms/cudnn_norm_rewriter_test.cc | #ifndef XLA_SERVICE_GPU_CUDNN_NORM_REWRITER_H_
#define XLA_SERVICE_GPU_CUDNN_NORM_REWRITER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
class CudnnNormRewriter : public HloModulePass {
public:
explicit CudnnNormRewriter(se::CudaComputeCapability cuda_compute_capability);
absl::string_view name() const override { return "norm-rewriter"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
se::CudaComputeCapability cuda_compute_capability_;
};
}
}
#endif
#include "xla/service/gpu/cudnn_norm_rewriter.h"
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <functional>
#include <iterator>
#include <limits>
#include <optional>
#include <utility>
#include <vector>
#include "google/protobuf/wrappers.pb.h"
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/protobuf/dnn.pb.h"
#if GOOGLE_CUDA
#include "third_party/gpus/cuda/include/cuda.h"
#include "third_party/gpus/cudnn/cudnn.h"
#include "third_party/gpus/cudnn/cudnn_version.h"
#endif
namespace xla {
namespace gpu {
namespace {
namespace m = match;
const HloInstruction* SkipUnaryOps(const HloInstruction* instr) {
while (instr->opcode() == HloOpcode::kConvert ||
instr->opcode() == HloOpcode::kBitcast ||
instr->opcode() == HloOpcode::kReshape) {
instr = instr->operand(0);
}
return instr;
}
void SkipUnaryOpsTopDownRecursive(HloInstruction* instr,
std::vector<HloInstruction*>& instrs) {
if (instr->opcode() == HloOpcode::kConvert ||
instr->opcode() == HloOpcode::kBitcast ||
instr->opcode() == HloOpcode::kReshape) {
for (HloInstruction* user : instr->users()) {
SkipUnaryOpsTopDownRecursive(user, instrs);
}
} else {
instrs.emplace_back(instr);
}
}
struct NormMetadata {
HloInstruction *x_transpose, *y_transpose;
std::vector<int64_t> norm_dims_adjusted, non_norm_dims_adjusted;
};
using NormMetadataMap = absl::flat_hash_map<HloInstruction*, NormMetadata>;
class UniqueHloInstruction {
public:
UniqueHloInstruction()
: is_set_(false), instr_(nullptr), capture_or_verify_() {}
HloInstruction* Instr() const { return instr_; }
void SetInstr(HloInstruction* instr) {
is_set_ = true;
instr_ = instr;
}
bool CaptureOrVerify(HloInstruction* instr) {
if (is_set_ && instr != instr_) {
instr_ = nullptr;
}
if (!is_set_) {
is_set_ = true;
instr_ = instr;
}
return instr_;
}
std::function<bool(const HloInstruction*)> GetCaptureOrVerifyFn() {
if (!capture_or_verify_) {
capture_or_verify_ = [this](const HloInstruction* instr) -> bool {
return CaptureOrVerify(const_cast<HloInstruction*>(instr));
};
}
return capture_or_verify_;
}
private:
bool is_set_;
HloInstruction* instr_;
std::function<bool(const HloInstruction*)> capture_or_verify_;
};
absl::StatusOr<int64_t> CConstant(
se::CudaComputeCapability cuda_compute_capability) {
if (cuda_compute_capability.major == se::CudaComputeCapability::AMPERE) {
return 32 * 128;
} else if (cuda_compute_capability.major ==
se::CudaComputeCapability::HOPPER) {
return 32 * 144;
}
return xla::Internal("Norm kernels require Ampere or Hopper architecture.");
}
bool CompatibleElementType(const HloInstruction* instr) {
PrimitiveType element_type = instr->shape().element_type();
return element_type == BF16 || element_type == F16 || element_type == F32;
}
std::vector<int64_t> AdjustedDimensions(const Shape& shape,
absl::Span<const int64_t> dimensions) {
absl::flat_hash_map<int64_t, int64_t> dimension_map;
for (int64_t dimension = 0, non_degen_dimension = 0; dimension < shape.rank();
++dimension) {
if (shape.dimensions(dimension) > 1) {
dimension_map.insert({dimension, non_degen_dimension});
non_degen_dimension++;
}
}
std::vector<int64_t> adjusted_dimensions;
for (int64_t dimension : dimensions) {
auto non_degenerate_dimension = dimension_map.find(dimension);
if (non_degenerate_dimension != dimension_map.end()) {
adjusted_dimensions.emplace_back(non_degenerate_dimension->second);
}
}
return adjusted_dimensions;
}
std::vector<int64_t> AdjustedDimensions(const HloInstruction* instr) {
Shape shape;
if (instr->opcode() == HloOpcode::kBroadcast) {
shape = instr->shape();
} else if (instr->opcode() == HloOpcode::kReduce) {
shape = instr->operand(0)->shape();
} else {
return {};
}
return AdjustedDimensions(shape, instr->dimensions());
}
bool AppliesAddReduce(const HloInstruction* instr,
absl::Span<const int64_t> reduce_dims = {}) {
if (instr->opcode() != HloOpcode::kReduce) {
return false;
}
if (!reduce_dims.empty() && AdjustedDimensions(instr) != reduce_dims) {
return false;
}
HloComputation* reduce_comp = instr->to_apply();
HloInstruction* reduce_comp_root = reduce_comp->root_instruction();
return instr->operand_count() == 2 &&
instr->operand(1)->opcode() == HloOpcode::kConstant &&
ShapeUtil::IsScalar(instr->operand(1)->shape()) &&
instr->operand(1)->literal().GetAsDouble({}) == 0. &&
reduce_comp_root->opcode() == HloOpcode::kAdd &&
reduce_comp_root->operand(0)->opcode() == HloOpcode::kParameter &&
reduce_comp_root->operand(1)->opcode() == HloOpcode::kParameter;
}
bool CalculatesExpectation(const HloInstruction* instr) {
instr = SkipUnaryOps(instr);
if (instr->opcode() != HloOpcode::kMultiply) {
return false;
}
bool bcast_operand = instr->operand(0)->opcode() != HloOpcode::kBroadcast;
const HloInstruction *broadcast = instr->operand(bcast_operand),
*reduce = SkipUnaryOps(instr->operand(!bcast_operand));
if (reduce->opcode() != HloOpcode::kReduce ||
broadcast->opcode() != HloOpcode::kBroadcast ||
broadcast->operand(0)->opcode() != HloOpcode::kConstant) {
return false;
}
float actual_r_nelems =
broadcast->operand(0)->literal().GetAsDouble({}).value();
int64_t nelems = 1;
for (int64_t norm_dim : reduce->dimensions()) {
nelems *= reduce->operand(0)->shape().dimensions()[norm_dim];
}
float r_nelems = 1. / static_cast<float>(nelems);
float numerical_epsilon = std::numeric_limits<bfloat16>::epsilon();
return abs(actual_r_nelems - r_nelems) <
((actual_r_nelems + r_nelems) * numerical_epsilon);
}
bool FindTargetRecursive(
const HloInstruction* instr, const HloInstruction* target,
absl::flat_hash_set<const HloInstruction*>& visited_instrs,
const HloInstruction* transpose) {
visited_instrs.emplace(instr);
const absl::flat_hash_set<HloOpcode> supported_ops = {
HloOpcode::kConvert, HloOpcode::kBitcast, HloOpcode::kReshape};
if (instr == target) {
return true;
}
for (HloInstruction* user : instr->users()) {
if ((supported_ops.contains(user->opcode()) || user == transpose) &&
!visited_instrs.contains(user)) {
return FindTargetRecursive(user, target, visited_instrs, transpose);
}
}
if (supported_ops.contains(instr->opcode())) {
return FindTargetRecursive(instr->operand(0), target, visited_instrs,
transpose);
}
return false;
}
bool FindTarget(const HloInstruction* custom_call, const HloInstruction* instr,
const HloInstruction* target,
const NormMetadataMap& norm_metadata) {
absl::flat_hash_set<const HloInstruction*> visited_instrs;
auto custom_call_metadata = norm_metadata.find(custom_call);
if (custom_call_metadata == norm_metadata.end()) {
return false;
}
return FindTargetRecursive(instr, target, visited_instrs,
custom_call_metadata->second.x_transpose);
}
std::vector<int64_t> MapDimensions(const Shape& original_shape,
const Shape& reshaped_shape,
const absl::Span<const int64_t> dimensions) {
auto dimension_product =
[](const Shape& shape,
absl::Span<const int64_t> product_dimensions) -> int64_t {
int64_t product = 1;
for (int64_t product_dimension : product_dimensions) {
product *= shape.dimensions(product_dimension);
}
return product;
};
absl::flat_hash_map<int64_t, std::vector<int64_t>> dimensions_map;
std::vector<int64_t> original_dimensions, reshaped_dimensions;
for (int64_t original_dimension = 0, reshaped_dimension = 0;
original_dimension < original_shape.rank(); ++original_dimension) {
original_dimensions.emplace_back(original_dimension);
while ((reshaped_dimensions.empty() ||
dimension_product(reshaped_shape, reshaped_dimensions) <
dimension_product(original_shape, original_dimensions)) &&
reshaped_dimension < reshaped_shape.rank()) {
reshaped_dimensions.emplace_back(reshaped_dimension++);
}
if (original_dimensions.size() > 1 && reshaped_dimensions.size() > 1) {
return {};
}
if (dimension_product(original_shape, original_dimensions) ==
dimension_product(reshaped_shape, reshaped_dimensions)) {
std::vector<int64_t> original_dimensions_in_dimensions;
std::set_intersection(
original_dimensions.begin(), original_dimensions.end(),
dimensions.begin(), dimensions.end(),
std::back_inserter(original_dimensions_in_dimensions));
if (!original_dimensions_in_dimensions.empty() &&
original_dimensions_in_dimensions.size() !=
original_dimensions.size()) {
return {};
}
for (int64_t dimension : original_dimensions) {
dimensions_map.insert({dimension, reshaped_dimensions});
}
original_dimensions.clear();
reshaped_dimensions.clear();
}
}
std::vector<int64_t> mapped_dimensions;
for (int64_t dimension : dimensions) {
auto mapped_dimension = dimensions_map.find(dimension);
if (mapped_dimension == dimensions_map.end()) {
return {};
}
mapped_dimensions.insert(mapped_dimensions.end(),
mapped_dimension->second.begin(),
mapped_dimension->second.end());
}
mapped_dimensions.erase(
std::unique(mapped_dimensions.begin(), mapped_dimensions.end()),
mapped_dimensions.end());
return mapped_dimensions;
}
HloInstruction* FindAddReduceRecursive(
HloInstruction* instr, const Shape& orig_instr_shape,
const absl::Span<const int64_t> reduce_dims,
absl::flat_hash_set<HloInstruction*>& visited_instrs) {
visited_instrs.emplace(instr);
const absl::flat_hash_set<HloOpcode> supported_ops = {
HloOpcode::kConvert, HloOpcode::kBitcast, HloOpcode::kReshape};
for (HloInstruction* user : instr->users()) {
if (user->opcode() == HloOpcode::kReduce) {
std::vector<int64_t> mapped_reduce_dims =
MapDimensions(orig_instr_shape, instr->shape(), reduce_dims);
if (!mapped_reduce_dims.empty() &&
AppliesAddReduce(user, mapped_reduce_dims)) {
return user;
}
}
if (supported_ops.contains(user->opcode()) &&
!visited_instrs.contains(user)) {
return FindAddReduceRecursive(user, orig_instr_shape, reduce_dims,
visited_instrs);
}
}
if (supported_ops.contains(instr->opcode())) {
return FindAddReduceRecursive(instr->mutable_operand(0), orig_instr_shape,
reduce_dims, visited_instrs);
}
return nullptr;
}
HloInstruction* FindAddReduce(HloInstruction* instr,
const absl::Span<const int64_t> reduce_dims) {
absl::flat_hash_set<HloInstruction*> visited_instrs;
return FindAddReduceRecursive(instr, instr->shape(), reduce_dims,
visited_instrs);
}
template <typename Pattern>
auto SupportedConvert(Pattern pattern) {
auto supported_convert = [](const HloInstruction* instr) -> bool {
return CompatibleElementType(instr) &&
CompatibleElementType(instr->operand(0));
};
return m::Convert(pattern).WithPredicate(supported_convert);
}
template <typename Pattern>
auto SupportedBitcastOrReshape(Pattern pattern) {
auto supported_bitcast_or_reshape = [](const HloInstruction* instr) -> bool {
return ShapeUtil::Equal(
ShapeUtil::DropDegenerateDimensions(instr->shape()),
ShapeUtil::DropDegenerateDimensions(instr->operand(0)->shape()));
};
return m::AnyOf<HloInstruction>(
m::Bitcast(pattern).WithPredicate(supported_bitcast_or_reshape),
m::Reshape(pattern).WithPredicate(supported_bitcast_or_reshape));
}
template <typename Pattern>
auto OptionalSupportedTransform(Pattern pattern) {
auto shared_subpattern = m::SharedSubpattern(pattern);
return m::AnyOf<HloInstruction>(
SupportedConvert(SupportedBitcastOrReshape(shared_subpattern)),
SupportedBitcastOrReshape(SupportedConvert(shared_subpattern)),
SupportedConvert(shared_subpattern),
SupportedBitcastOrReshape(shared_subpattern), shared_subpattern);
}
template <typename Pattern>
auto BitcastOrReshape(Pattern pattern) {
return OptionalSupportedTransform(
m::AnyOf<HloInstruction>(m::Bitcast(pattern), m::Reshape(pattern)));
}
template <typename Pattern>
auto Transpose(Pattern pattern) {
return OptionalSupportedTransform(m::Transpose(pattern));
}
template <typename Pattern>
auto Rsqrt(HloInstruction** rsqrt, Pattern pattern) {
return OptionalSupportedTransform(m::Rsqrt(rsqrt, pattern));
}
template <typename Pattern0, typename Pattern1>
auto AddAnyOrder(Pattern0 pattern0, Pattern1 pattern1) {
return OptionalSupportedTransform(m::AddAnyOrder(pattern0, pattern1));
}
template <typename Pattern0, typename Pattern1>
auto Subtract(Pattern0 pattern0, Pattern1 pattern1) {
return OptionalSupportedTransform(m::Subtract(pattern0, pattern1));
}
template <typename Pattern0, typename Pattern1>
auto Subtract(HloInstruction** subtract, Pattern0 pattern0, Pattern1 pattern1) {
return OptionalSupportedTransform(m::Subtract(subtract, pattern0, pattern1));
}
template <typename Pattern0, typename Pattern1>
auto MultiplyAnyOrder(Pattern0 pattern0, Pattern1 pattern1) {
return OptionalSupportedTransform(m::MultiplyAnyOrder(pattern0, pattern1));
}
template <typename Pattern0, typename Pattern1>
auto MultiplyAnyOrder(HloInstruction** multiply, Pattern0 pattern0,
Pattern1 pattern1) {
return OptionalSupportedTransform(
m::MultiplyAnyOrder(multiply, pattern0, pattern1));
}
template <typename Pattern>
auto Square(Pattern pattern) {
return MultiplyAnyOrder(pattern, pattern)
.WithPredicate([](const HloInstruction* instr) {
return instr->unique_operands().size() == 1;
});
}
template <typename Pattern>
auto Cube(Pattern pattern) {
auto unique_cube = [](const HloInstruction* instr) -> bool {
bool square_operand = instr->operand(0)->opcode() != HloOpcode::kMultiply;
return instr->operand(!square_operand)->opcode() != HloOpcode::kMultiply &&
instr->operand(square_operand)->operand(0) ==
instr->operand(!square_operand);
};
return MultiplyAnyOrder(Square(pattern), pattern).WithPredicate(unique_cube);
}
template <typename Pattern>
auto AddReduce(Pattern pattern) {
return OptionalSupportedTransform(
m::Reduce(pattern, m::Op())
.WithPredicate([](const HloInstruction* instr) {
return AppliesAddReduce(instr);
}));
}
template <typename Pattern>
auto AddReduce(HloInstruction** reduction, Pattern pattern) {
return OptionalSupportedTransform(
m::Reduce(reduction, pattern, m::Op())
.WithPredicate([](const HloInstruction* instr) {
return AppliesAddReduce(instr);
}));
}
template <typename Pattern>
auto NegateAddReduce(HloInstruction** reduction, Pattern pattern) {
return m::AnyOf<HloInstruction>(AddReduce(reduction, m::Negate(pattern)),
m::Negate(AddReduce(reduction, pattern)));
}
template <typename Pattern>
auto Expectation(Pattern pattern) {
auto shared_subpattern =
MultiplyAnyOrder(m::Broadcast(m::ConstantScalar()), AddReduce(pattern))
.WithPredicate([](const HloInstruction* instr) {
return CalculatesExpectation(instr);
});
return m::AnyOf<HloInstruction>(m::Broadcast(shared_subpattern),
shared_subpattern);
}
template <typename Pattern>
auto Expectation(UniqueHloInstruction* expectation, Pattern pattern) {
auto shared_subpattern = OptionalSupportedTransform(
m::MultiplyAnyOrder(m::Broadcast(m::ConstantScalar()), AddReduce(pattern))
.WithPredicate([](const HloInstruction* instr) {
return CalculatesExpectation(instr);
})
.WithPredicate(expectation->GetCaptureOrVerifyFn()));
return m::AnyOf<HloInstruction>(m::Broadcast(shared_subpattern),
shared_subpattern);
}
template <typename Pattern>
auto Expectation(UniqueHloInstruction* expectation, HloInstruction** reduce,
Pattern pattern) {
auto shared_subpattern = OptionalSupportedTransform(
m::MultiplyAnyOrder(m::Broadcast(m::ConstantScalar()),
AddReduce(reduce, pattern))
.WithPredicate([](const HloInstruction* instr) {
return CalculatesExpectation(instr);
})
.WithPredicate(expectation->GetCaptureOrVerifyFn()));
return m::AnyOf<HloInstruction>(m::Broadcast(shared_subpattern),
shared_subpattern);
}
auto Variance(UniqueHloInstruction* variance, UniqueHloInstruction* expectation,
UniqueHloInstruction* x) {
return m::AnyOf<HloInstruction>(
Subtract(
Expectation(Square(OptionalSupportedTransform(
m::Op().WithPredicate(x->GetCaptureOrVerifyFn())))),
Square(Expectation(expectation,
OptionalSupportedTransform(m::Op().WithPredicate(
x->GetCaptureOrVerifyFn())))))
.WithPredicate(variance->GetCaptureOrVerifyFn()),
Expectation(
Square(Subtract(
OptionalSupportedTransform(
m::Op().WithPredicate(x->GetCaptureOrVerifyFn())),
Expectation(expectation,
OptionalSupportedTransform(m::Op().WithPredicate(
x->GetCaptureOrVerifyFn()))))))
.WithPredicate(variance->GetCaptureOrVerifyFn()));
}
auto NormFactor(HloInstruction** norm_factor, UniqueHloInstruction* x,
UniqueHloInstruction* variance,
UniqueHloInstruction* expectation,
UniqueHloInstruction* epsilon) {
auto shared_subpattern = m::SharedSubpattern(Rsqrt(
norm_factor, AddAnyOrder(Variance(variance, expectation, x),
m::Broadcast(m::ConstantScalar().WithPredicate(
epsilon->GetCaptureOrVerifyFn())))));
return m::AnyOf<HloInstruction>(m::Broadcast(shared_subpattern),
shared_subpattern);
}
template <typename P0, typename P1, typename P2>
auto MultiplyMultiplyAnyOrder(P0 p0, P1 p1, P2 p2) {
return m::AnyOf<HloInstruction>(
MultiplyAnyOrder(p0, MultiplyAnyOrder(p1, p2)),
MultiplyAnyOrder(p1, MultiplyAnyOrder(p0, p2)),
MultiplyAnyOrder(p2, MultiplyAnyOrder(p0, p1)));
}
template <typename P0, typename P1, typename P2>
auto AddAddAnyOrder(P0 p0, P1 p1, P2 p2) {
return m::AnyOf<HloInstruction>(AddAnyOrder(p0, AddAnyOrder(p1, p2)),
AddAnyOrder(p1, AddAnyOrder(p0, p2)),
AddAnyOrder(p2, AddAnyOrder(p0, p1)));
}
template <typename P0, typename P1, typename P2>
auto MultiplyAddAnyOrder(P0 p0, P1 p1, P2 p2) {
return m::AnyOf<HloInstruction>(
MultiplyAnyOrder(p0, AddAnyOrder(p1, p2)),
AddAnyOrder(MultiplyAnyOrder(p0, p1), MultiplyAnyOrder(p0, p2)));
}
template <typename P0, typename P1, typename P2>
auto SubtractAddAnyOrder(P0 p0, P1 p1, P2 p2) {
return m::AnyOf<HloInstruction>(AddAnyOrder(Subtract(p0, p1), p2),
AddAnyOrder(Subtract(p2, p1), p0),
Subtract(AddAnyOrder(p0, p2), p1));
}
template <typename P0, typename P1, typename P2, typename P3, typename P4>
auto SubtractMultiplyAddAnyOrder(P0 p0, P1 p1, P2 p2, P3 p3, P4 p4) {
return m::AnyOf<HloInstruction>(
SubtractAddAnyOrder(MultiplyMultiplyAnyOrder(p0, p2, p3),
MultiplyMultiplyAnyOrder(p1, p2, p3), p4),
AddAnyOrder(MultiplyMultiplyAnyOrder(Subtract(p0, p1), p2, p3), p4));
}
auto FusedExpectation(UniqueHloInstruction* custom_call) {
auto shared_subpattern = m::SharedSubpattern(m::GetTupleElement(
m::CustomCall({kCudnnNormCallTarget})
.WithPredicate(custom_call->GetCaptureOrVerifyFn()),
1));
return m::AnyOf<HloInstruction>(shared_subpattern,
BitcastOrReshape(shared_subpattern));
}
auto FusedExpectation(UniqueHloInstruction* fused_expectation,
UniqueHloInstruction* custom_call) {
auto shared_subpattern = m::SharedSubpattern(
m::GetTupleElement(
m::CustomCall({kCudnnNormCallTarget})
.WithPredicate(custom_call->GetCaptureOrVerifyFn()),
1)
.WithPredicate(fused_expectation->GetCaptureOrVerifyFn()));
return m::AnyOf<HloInstruction>(shared_subpattern,
BitcastOrReshape(shared_subpattern));
}
auto FusedNormFactor(UniqueHloInstruction* custom_call) {
auto shared_subpattern = m::SharedSubpattern(m::GetTupleElement(
m::CustomCall({kCudnnNormCallTarget})
.WithPredicate(custom_call->GetCaptureOrVerifyFn()),
2));
return m::AnyOf<HloInstruction>(shared_subpattern,
BitcastOrReshape(shared_subpattern));
}
auto FusedNormFactor(UniqueHloInstruction* fused_norm_factor,
UniqueHloInstruction* custom_call) {
auto shared_subpattern = m::SharedSubpattern(
m::GetTupleElement(
m::CustomCall({kCudnnNormCallTarget})
.WithPredicate(custom_call->GetCaptureOrVerifyFn()),
2)
.WithPredicate(fused_norm_factor->GetCaptureOrVerifyFn()));
return m::AnyOf<HloInstruction>(shared_subpattern,
BitcastOrReshape(shared_subpattern));
}
auto DNormFactor(UniqueHloInstruction* custom_call) {
return MultiplyAnyOrder(m::Broadcast(m::ConstantScalar(-0.5)),
Cube(FusedNormFactor(custom_call)));
}
auto XCenter(UniqueHloInstruction* x, UniqueHloInstruction* custom_call,
const NormMetadataMap& norm_metadata) {
auto capture_or_verify_x =
[x, | #include <string>
#include <gtest/gtest.h>
#include "xla/error_spec.h"
#include "xla/stream_executor/device_description.h"
#if GOOGLE_CUDA
#include "third_party/gpus/cuda/include/cuda.h"
#include "third_party/gpus/cudnn/cudnn.h"
#include "third_party/gpus/cudnn/cudnn_version.h"
#endif
#include "xla/service/gpu/tests/gpu_codegen_test.h"
namespace xla {
namespace gpu {
namespace {
class CudnnNormRewriterTest : public GpuCodegenTest {
public:
se::CudaComputeCapability GetCudaComputeCapability() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
}
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = GpuCodegenTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_cudnn_layer_norm(true);
return debug_options;
}
protected:
void TestNorm(std::string hlo_text, std::string optimized_hlo) {
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3}));
MatchOptimizedHlo(hlo_text, optimized_hlo);
}
};
TEST_F(CudnnNormRewriterTest, LayerNorm2D1) {
#if (CUDA_VERSION < 12000 || CUDNN_VERSION < 8905)
GTEST_SKIP() << "Layer norm kernels require CUDA 12 and cuDNN 8.9.5.";
#endif
if (!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::AMPERE) &&
!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::HOPPER)) {
GTEST_SKIP()
<< "Layer norm kernels require Ampere or Hopper architectures.";
}
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,4] parameter(0)
input_square = f32[2,4] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2] reduce(input_square, c0), dimensions={1}, to_apply=apply
r_nelems = f32[] constant(0.25)
r_nelems_bcast = f32[2] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2] multiply(input_square_sum, r_nelems_bcast)
input_sum = f32[2] reduce(input, c0),dimensions={1}, to_apply=apply
input_mean = f32[2] multiply(input_sum, r_nelems_bcast)
input_mean_square = f32[2] multiply(input_mean, input_mean)
variance = f32[2] subtract(input_square_mean, input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2] add(variance, epsilon_bcast)
norm_factor = f32[2] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,4] broadcast(norm_factor), dimensions={0}
input_mean_bcast = f32[2,4] broadcast(input_mean), dimensions={0}
input_center = f32[2,4] subtract(input, input_mean_bcast)
norm = f32[2,4] multiply(norm_factor_bcast, input_center)
scale = f32[4] parameter(1)
scale_bcast = f32[2,4] broadcast(scale), dimensions={1}
norm_scale = f32[2,4] multiply(norm, scale_bcast)
bias = f32[4] parameter(2)
bias_broadcast = f32[2,4] broadcast(bias), dimensions={1}
ROOT out = f32[2,4] add(norm_scale, bias_broadcast)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4], {{.*}}: f32[4], {{.*}}: f32[4]) -> f32[2,4] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4]{1,0} parameter(0)
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[2,4,1,1]{3,2,1,0} bitcast([[P0]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4]{0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4]{0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[2,4,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK: }
; CHECK-NEXT: [[GTE:%[^ ]+]] = f32[2,4,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=0
; CHECK-NEXT: ROOT [[GTE_BITCAST:%[^ ]+]] = f32[2,4]{1,0} bitcast([[GTE]])
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNorm4D3) {
#if (CUDA_VERSION < 12000 || CUDNN_VERSION < 8905)
GTEST_SKIP() << "Layer norm kernels require CUDA 12 and cuDNN 8.9.5.";
#endif
if (!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::AMPERE) &&
!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::HOPPER)) {
GTEST_SKIP()
<< "Layer norm kernels require Ampere or Hopper architectures.";
}
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,4,6,8] parameter(0)
input_square = f32[2,4,6,8] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2,4,6] reduce(input_square, c0), dimensions={3}, to_apply=apply
r_nelems = f32[] constant(0.125)
r_nelems_bcast = f32[2,4,6] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2,4,6] multiply(input_square_sum, r_nelems_bcast)
input_sum = f32[2,4,6] reduce(input, c0), dimensions={3}, to_apply=apply
input_mean = f32[2,4,6] multiply(input_sum, r_nelems_bcast)
input_mean_square = f32[2,4,6] multiply(input_mean, input_mean)
variance = f32[2,4,6] subtract(input_square_mean, input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2,4,6] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2,4,6] add(variance, epsilon_bcast)
norm_factor = f32[2,4,6] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,4,6,8] broadcast(norm_factor), dimensions={0,1,2}
input_mean_bcast = f32[2,4,6,8] broadcast(input_mean), dimensions={0,1,2}
input_center = f32[2,4,6,8] subtract(input, input_mean_bcast)
norm = f32[2,4,6,8] multiply(norm_factor_bcast, input_center)
scale = f32[8] parameter(1)
scale_bcast = f32[2,4,6,8] broadcast(scale), dimensions={3}
norm_scale = f32[2,4,6,8] multiply(norm, scale_bcast)
bias = f32[8] parameter(2)
bias_bcast = f32[2,4,6,8] broadcast(bias), dimensions={3}
ROOT out = f32[2,4,6,8] add(norm_scale, bias_bcast)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,6,8], {{.*}}: f32[8], {{.*}}: f32[8]) -> f32[2,4,6,8] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} parameter(0)
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[48,8,1,1]{3,2,1,0} bitcast([[P0]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[8]{0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,8,1,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[8]{0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,8,1,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[48,8,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK: }
; CHECK-NEXT: [[GTE:%[^ ]+]] = f32[48,8,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=0
; CHECK-NEXT: ROOT [[GTE_BITCAST:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} bitcast([[GTE]])
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNorm4D3Degenerate0) {
#if (CUDA_VERSION < 12000 || CUDNN_VERSION < 8905)
GTEST_SKIP() << "Layer norm kernels require CUDA 12 and cuDNN 8.9.5.";
#endif
if (!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::AMPERE) &&
!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::HOPPER)) {
GTEST_SKIP()
<< "Layer norm kernels require Ampere or Hopper architectures.";
}
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[1,4,6,8] parameter(0)
input_square = f32[1,4,6,8] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[1,4,6] reduce(input_square, c0), dimensions={3}, to_apply=apply
r_nelems = f32[] constant(0.125)
r_nelems_bcast = f32[1,4,6] broadcast(r_nelems), dimensions={}
input_square_mean = f32[1,4,6] multiply(input_square_sum, r_nelems_bcast)
input_sum = f32[1,4,6] reduce(input, c0), dimensions={3}, to_apply=apply
input_mean = f32[1,4,6] multiply(input_sum, r_nelems_bcast)
input_mean_square = f32[1,4,6] multiply(input_mean, input_mean)
variance = f32[1,4,6] subtract(input_square_mean, input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[1,4,6] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[1,4,6] add(variance, epsilon_bcast)
norm_factor = f32[1,4,6] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[1,4,6,8] broadcast(norm_factor), dimensions={0,1,2}
input_mean_bcast = f32[1,4,6,8] broadcast(input_mean), dimensions={0,1,2}
input_center = f32[1,4,6,8] subtract(input, input_mean_bcast)
norm = f32[1,4,6,8] multiply(norm_factor_bcast, input_center)
scale = f32[8] parameter(1)
scale_bcast = f32[1,4,6,8] broadcast(scale), dimensions={3}
norm_scale = f32[1,4,6,8] multiply(norm, scale_bcast)
bias = f32[8] parameter(2)
bias_bcast = f32[1,4,6,8] broadcast(bias), dimensions={3}
ROOT out = f32[1,4,6,8] add(norm_scale, bias_bcast)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[1,4,6,8], {{.*}}: f32[8], {{.*}}: f32[8]) -> f32[1,4,6,8] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[1,4,6,8]{3,2,1,0} parameter(0)
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[24,8,1,1]{3,2,1,0} bitcast([[P0]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[8]{0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,8,1,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[8]{0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,8,1,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[24,8,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK: }
; CHECK-NEXT: [[GTE:%[^ ]+]] = f32[24,8,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=0
; CHECK-NEXT: ROOT [[GTE_BITCAST:%[^ ]+]] = f32[1,4,6,8]{3,2,1,0} bitcast([[GTE]])
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNorm4D2) {
#if (CUDA_VERSION < 12000 || CUDNN_VERSION < 8905)
GTEST_SKIP() << "Layer norm kernels require CUDA 12 and cuDNN 8.9.5.";
#endif
if (!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::AMPERE) &&
!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::HOPPER)) {
GTEST_SKIP()
<< "Layer norm kernels require Ampere or Hopper architectures.";
}
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,4,6,8] parameter(0)
input_square = f32[2,4,6,8] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2,4,8] reduce(input_square, c0), dimensions={2}, to_apply=apply
r_nelems = f32[] constant(0.166667)
r_nelems_bcast = f32[2,4,8] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2,4,8] multiply(input_square_sum, r_nelems_bcast)
reduce = f32[2,4,8] reduce(input, c0), dimensions={2}, to_apply=apply
input_mean = f32[2,4,8] multiply(reduce, r_nelems_bcast)
input_mean_square = f32[2,4,8] multiply(input_mean, input_mean)
variance = f32[2,4,8] subtract(input_square_mean, input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2,4,8] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2,4,8] add(variance, epsilon_bcast)
norm_factor = f32[2,4,8] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,4,6,8] broadcast(norm_factor), dimensions={0,1,3}
input_mean_bcast = f32[2,4,6,8] broadcast(input_mean), dimensions={0,1,3}
input_center = f32[2,4,6,8] subtract(input, input_mean_bcast)
norm = f32[2,4,6,8] multiply(norm_factor_bcast, input_center)
scale = f32[6] parameter(1)
scale_bcast = f32[2,4,6,8] broadcast(scale), dimensions={2}
norm_scale = f32[2,4,6,8] multiply(norm, scale_bcast)
bias = f32[6] parameter(2)
bias_broadcast = f32[2,4,6,8] broadcast(bias), dimensions={2}
ROOT out = f32[2,4,6,8] add(norm_scale, bias_broadcast)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,6,8], {{.*}}: f32[6], {{.*}}: f32[6]) -> f32[2,4,6,8] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} parameter(0)
; CHECK-NEXT: [[TRANSPOSE:%[^ ]+]] = f32[2,4,8,6]{3,2,1,0} transpose([[P0]]), dimensions={0,1,3,2}
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[64,6,1,1]{3,2,1,0} bitcast([[TRANSPOSE]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[6]{0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,6,1,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[6]{0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,6,1,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[64,6,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK: }
; CHECK-NEXT: [[GTE:%[^ ]+]] = f32[64,6,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=0
; CHECK-NEXT: ROOT [[FUSION:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} fusion([[GTE]]), kind=kLoop, calls=[[FUSED_COMPUTATION:%[^ ]+]]
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNorm4D2Degenerate1) {
#if (CUDA_VERSION < 12000 || CUDNN_VERSION < 8905)
GTEST_SKIP() << "Layer norm kernels require CUDA 12 and cuDNN 8.9.5.";
#endif
if (!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::AMPERE) &&
!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::HOPPER)) {
GTEST_SKIP()
<< "Layer norm kernels require Ampere or Hopper architectures.";
}
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,1,6,8] parameter(0)
input_square = f32[2,1,6,8] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2,1,8] reduce(input_square, c0), dimensions={2}, to_apply=apply
r_nelems = f32[] constant(0.166667)
r_nelems_bcast = f32[2,1,8] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2,1,8] multiply(input_square_sum, r_nelems_bcast)
reduce = f32[2,1,8] reduce(input, c0), dimensions={2}, to_apply=apply
input_mean = f32[2,1,8] multiply(reduce, r_nelems_bcast)
input_mean_square = f32[2,1,8] multiply(input_mean, input_mean)
variance = f32[2,1,8] subtract(input_square_mean, input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2,1,8] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2,1,8] add(variance, epsilon_bcast)
norm_factor = f32[2,1,8] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,1,6,8] broadcast(norm_factor), dimensions={0,1,3}
input_mean_bcast = f32[2,1,6,8] broadcast(input_mean), dimensions={0,1,3}
input_center = f32[2,1,6,8] subtract(input, input_mean_bcast)
norm = f32[2,1,6,8] multiply(norm_factor_bcast, input_center)
scale = f32[6] parameter(1)
scale_bcast = f32[2,1,6,8] broadcast(scale), dimensions={2}
norm_scale = f32[2,1,6,8] multiply(norm, scale_bcast)
bias = f32[6] parameter(2)
bias_broadcast = f32[2,1,6,8] broadcast(bias), dimensions={2}
ROOT out = f32[2,1,6,8] add(norm_scale, bias_broadcast)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,1,6,8], {{.*}}: f32[6], {{.*}}: f32[6]) -> f32[2,1,6,8] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,1,6,8]{3,2,1,0} parameter(0)
; CHECK-NEXT: [[TRANSPOSE:%[^ ]+]] = f32[1,2,8,6]{3,2,1,0} transpose([[P0]]), dimensions={1,0,3,2}
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[16,6,1,1]{3,2,1,0} bitcast([[TRANSPOSE]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[6]{0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,6,1,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[6]{0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,6,1,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[16,6,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK: }
; CHECK-NEXT: [[GTE:%[^ ]+]] = f32[16,6,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=0
; CHECK-NEXT: ROOT [[FUSION:%[^ ]+]] = f32[2,1,6,8]{3,2,1,0} fusion([[GTE]]), kind=kLoop, calls=[[FUSED_COMPUTATION:%[^ ]+]]
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNorm4D12) {
#if (CUDA_VERSION < 12000 || CUDNN_VERSION < 8905)
GTEST_SKIP() << "Layer norm kernels require CUDA 12 and cuDNN 8.9.5.";
#endif
if (!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::AMPERE) &&
!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::HOPPER)) {
GTEST_SKIP()
<< "Layer norm kernels require Ampere or Hopper architectures.";
}
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,4,6,8] parameter(0)
input_square = f32[2,4,6,8] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2,8] reduce(input_square, c0), dimensions={1,2}, to_apply=apply
r_nelems = f32[] constant(0.041667)
r_nelems_bcast = f32[2,8] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2,8] multiply(input_square_sum, r_nelems_bcast)
reduce = f32[2,8] reduce(input, c0), dimensions={1,2}, to_apply=apply
input_mean = f32[2,8] multiply(reduce, r_nelems_bcast)
input_mean_square = f32[2,8] multiply(input_mean, input_mean)
variance = f32[2,8] subtract(input_square_mean, input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2,8] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2,8] add(variance, epsilon_bcast)
norm_factor = f32[2,8] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,4,6,8] broadcast(norm_factor), dimensions={0,3}
input_mean_bcast = f32[2,4,6,8] broadcast(input_mean), dimensions={0,3}
input_center = f32[2,4,6,8] subtract(input, input_mean_bcast)
norm = f32[2,4,6,8] multiply(norm_factor_bcast, input_center)
scale = f32[4,6] parameter(1)
scale_bcast = f32[2,4,6,8] broadcast(scale), dimensions={1,2}
norm_scale = f32[2,4,6,8] multiply(norm, scale_bcast)
bias = f32[4,6] parameter(2)
bias_broadcast = f32[2,4,6,8] broadcast(bias), dimensions={1,2}
ROOT out = f32[2,4,6,8] add(norm_scale, bias_broadcast)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,6,8], {{.*}}: f32[4,6], {{.*}}: f32[4,6]) -> f32[2,4,6,8] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} parameter(0)
; CHECK-NEXT: [[TRANSPOSE:%[^ ]+]] = f32[2,8,4,6]{3,2,1,0} transpose([[P0]]), dimensions={0,3,1,2}
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[16,4,6,1]{3,2,1,0} bitcast([[TRANSPOSE]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4,6]{1,0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,4,6,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4,6]{1,0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,4,6,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[16,4,6,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK: }
; CHECK-NEXT: [[GTE:%[^ ]+]] = f32[16,4,6,1]{3,2,1,0} get-tuple-element([[CC]]), index=0
; CHECK-NEXT: ROOT [[FUSION:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} fusion([[GTE]]), kind=kLoop, calls=[[FUSED_COMPUTATION:%[^ ]+]]
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNorm4D12Degenerate2) {
#if (CUDA_VERSION < 12000 || CUDNN_VERSION < 8905)
GTEST_SKIP() << "Layer norm kernels require CUDA 12 and cuDNN 8.9.5.";
#endif
if (!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::AMPERE) &&
!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::HOPPER)) {
GTEST_SKIP()
<< "Layer norm kernels require Ampere or Hopper architectures.";
}
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,4,1,8] parameter(0)
input_square = f32[2,4,1,8] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2,8] reduce(input_square, c0), dimensions={1,2}, to_apply=apply
r_nelems = f32[] constant(0.25)
r_nelems_bcast = f32[2,8] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2,8] multiply(input_square_sum, r_nelems_bcast)
reduce = f32[2,8] reduce(input, c0), dimensions={1,2}, to_apply=apply
input_mean = f32[2,8] multiply(reduce, r_nelems_bcast)
input_mean_square = f32[2,8] multiply(input_mean, input_mean)
variance = f32[2,8] subtract(input_square_mean, input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2,8] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2,8] add(variance, epsilon_bcast)
norm_factor = f32[2,8] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,4,1,8] broadcast(norm_factor), dimensions={0,3}
input_mean_bcast = f32[2,4,1,8] broadcast(input_mean), dimensions={0,3}
input_center = f32[2,4,1,8] subtract(input, input_mean_bcast)
norm = f32[2,4,1,8] multiply(norm_factor_bcast, input_center)
scale = f32[4,1] parameter(1)
scale_bcast = f32[2,4,1,8] broadcast(scale), dimensions={1,2}
norm_scale = f32[2,4,1,8] multiply(norm, scale_bcast)
bias = f32[4,1] parameter(2)
bias_broadcast = f32[2,4,1,8] broadcast(bias), dimensions={1,2}
ROOT out = f32[2,4,1,8] add(norm_scale, bias_broadcast)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,1,8], {{.*}}: f32[4,1], {{.*}}: f32[4,1]) -> f32[2,4,1,8] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,1,8]{3,2,1,0} parameter(0)
; CHECK-NEXT: [[TRANSPOSE:%[^ ]+]] = f32[1,2,8,4]{3,2,1,0} transpose([[P0]]), dimensions={2,0,3,1}
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[16,4,1,1]{3,2,1,0} bitcast([[TRANSPOSE]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4,1]{1,0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4,1]{1,0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[16,4,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK: }
; CHECK-NEXT: [[GTE:%[^ ]+]] = f32[16,4,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=0
; CHECK-NEXT: ROOT [[FUSION:%[^ ]+]] = f32[2,4,1,8]{3,2,1,0} fusion([[GTE]]), kind=kLoop, calls=[[FUSED_COMPUTATION:%[^ ]+]]
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNorm4D3IncorrectScaleBroadcast) {
#if (CUDA_VERSION < 12000 || CUDNN_VERSION < 8905)
GTEST_SKIP() << "Layer norm kernels require CUDA 12 and cuDNN 8.9.5.";
#endif
if (!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::AMPERE) &&
!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::HOPPER)) {
GTEST_SKIP()
<< "Layer norm kernels require Ampere or Hopper architectures.";
}
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,2,2,2] parameter(0)
input_square = f32[2,2,2,2] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2,2,2] reduce(input_square, c0), dimensions={3}, to_apply=apply
r_nelems = f32[] constant(0.5)
r_nelems_bcast = f32[2,2,2] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2,2,2] multiply(input_square_sum, r_nelems_bcast)
input_sum = f32[2,2,2] reduce(input, c0), dimensions={3}, to_apply=apply
input_mean = f32[2,2,2] multiply(input_sum, r_nelems_bcast)
input_mean_square = f32[2,2,2] multiply(input_mean, input_mean)
variance = f32[2,2,2] subtract(input_square_mean, input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2,2,2] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2,2,2] add(variance, epsilon_bcast)
norm_factor = f32[2,2,2] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,2,2,2] broadcast(norm_factor), dimensions={0,1,2}
input_mean_bcast = f32[2,2,2,2] broadcast(input_mean), dimensions={0,1,2}
input_center = f32[2,2,2,2] subtract(input, input_mean_bcast)
norm = f32[2,2,2,2] multiply(norm_factor_bcast, input_center)
scale = f32[2] parameter(1)
scale_bcast = f32[2,2,2,2] broadcast(scale), dimensions={2}
norm_scale = f32[2,2,2,2] multiply(norm, scale_bcast)
bias = f32[2] parameter(2)
bias_bcast = f32[2,2,2,2] broadcast(bias), dimensions={3}
ROOT out = f32[2,2,2,2] add(norm_scale, bias_bcast)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,2,2,2], {{.*}}: f32[2], {{.*}}: f32[2]) -> f32[2,2,2,2] {
; CHECK-NOT: custom_call_target="__cudnn$norm"
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNormTrain2D1) {
#if (CUDA_VERSION < 12000 || CUDNN_VERSION < 8905)
GTEST_SKIP() << "Layer norm kernels require CUDA 12 and cuDNN 8.9.5.";
#endif
if (!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::AMPERE) &&
!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::HOPPER)) {
GTEST_SKIP()
<< "Layer norm kernels require Ampere or Hopper architectures.";
}
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,4] parameter(0)
input_square = f32[2,4] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2] reduce(input_square, c0), dimensions={1}, to_apply=apply
r_nelems = f32[] constant(0.25)
r_nelems_bcast = f32[2] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2] multiply(input_square_sum,r_nelems_bcast)
reduce = f32[2] reduce(input, c0), dimensions={1}, to_apply=apply
input_mean = f32[2] multiply(reduce,r_nelems_bcast)
input_mean_square = f32[2] multiply(input_mean,input_mean)
variance = f32[2] subtract(input_square_mean,input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2] add(variance, epsilon_bcast)
norm_factor = f32[2] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,4] broadcast(norm_factor), dimensions={0}
input_mean_bcast = f32[2,4] broadcast(input_mean), dimensions={0}
input_center = f32[2,4] subtract(input,input_mean_bcast)
norm = f32[2,4] multiply(norm_factor_bcast,input_center)
scale = f32[4] parameter(1)
scale_bcast = f32[2,4] broadcast(scale), dimensions={1}
norm_scale = f32[2,4] multiply(norm,scale_bcast)
bias = f32[4] parameter(2)
bias_broadcast = f32[2,4] broadcast(bias), dimensions={1}
norm_scale_bias = f32[2,4] add(norm_scale, bias_broadcast)
norm_factor_cube = f32[2] divide(norm_factor, variance_plus_epsilon)
ROOT out = (f32[2,4], f32[2], f32[2], f32[2]) tuple(norm_scale_bias, input_mean, norm_factor, norm_factor_cube)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4], {{.*}}: f32[4], {{.*}}: f32[4]) -> (f32[2,4], f32[2], f32[2], f32[2]) {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4]{1,0} parameter(0)
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[2,4,1,1]{3,2,1,0} bitcast([[P0]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4]{0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4]{0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[2,4,1,1]{3,2,1,0}, f32[2,1,1,1]{3,2,1,0}, f32[2,1,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK: }
; CHECK-NEXT: [[GTE0:%[^ ]+]] = f32[2,4,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=0
; CHECK-NEXT: [[GTE0_BITCAST:%[^ ]+]] = f32[2,4]{1,0} bitcast([[GTE0]])
; CHECK-NEXT: [[GTE1:%[^ ]+]] = f32[2,1,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=1
; CHECK-NEXT: [[GTE1_BITCAST:%[^ ]+]] = f32[2]{0} bitcast([[GTE1]])
; CHECK-NEXT: [[GTE2:%[^ ]+]] = f32[2,1,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=2
; CHECK-NEXT: [[GTE2_BITCAST:%[^ ]+]] = f32[2]{0} bitcast([[GTE2]])
; CHECK-NEXT: [[FUSION:%[^ ]+]] = f32[2]{0} fusion([[GTE2]]), kind=kLoop, calls=[[FUSED_COMPUTATION:%[^ ]+]]
; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = (f32[2,4]{1,0}, f32[2]{0}, f32[2]{0}, f32[2]{0}) tuple([[GTE0_BITCAST]], [[GTE1_BITCAST]], [[GTE2_BITCAST]], [[FUSION]])
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNormTrain4D3) {
#if (CUDA_VERSION < 12000 || CUDNN_VERSION < 8905)
GTEST_SKIP() << "Layer norm kernels require CUDA 12 and cuDNN 8.9.5.";
#endif
if (!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::AMPERE) &&
!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::HOPPER)) {
GTEST_SKIP()
<< "Layer norm kernels require Ampere or Hopper architectures.";
}
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,4,6,8] parameter(0)
input_square = f32[2,4,6,8] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2,4,6] reduce(input_square, |
2,053 | cpp | tensorflow/tensorflow | pipelined_p2p_rewriter | third_party/xla/xla/service/gpu/transforms/pipelined_p2p_rewriter.cc | third_party/xla/xla/service/gpu/transforms/pipelined_p2p_rewriter_test.cc | #ifndef XLA_SERVICE_GPU_PIPELINED_P2P_REWRITER_H_
#define XLA_SERVICE_GPU_PIPELINED_P2P_REWRITER_H_
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class PipelinedP2PRewriter : public HloModulePass {
public:
absl::string_view name() const override { return "pipelined-p2p-rewriter"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
}
#endif
#include "xla/service/gpu/pipelined_p2p_rewriter.h"
#include <cstdint>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using CollectiveInComputation =
absl::flat_hash_map<const HloComputation*, bool>;
using InstructionVector = HloInstruction::InstructionVector;
struct PipelinedP2PInfo {
int64_t opnd_start;
int64_t opnd_end;
};
bool IsCollectiveOp(const HloInstruction* op) {
HloOpcode opcode = op->opcode();
if (opcode == HloOpcode::kCustomCall) {
return true;
}
return hlo_query::IsCollectiveCommunicationOp(opcode) ||
opcode == HloOpcode::kSend || opcode == HloOpcode::kRecv;
}
bool MayInvokeCollectiveOp(
const HloInstruction* hlo,
const CollectiveInComputation& collective_in_computation) {
if (IsCollectiveOp(hlo)) {
return true;
}
for (HloComputation* callee : hlo->called_computations()) {
auto collective_in_comp = collective_in_computation.find(callee);
CHECK(collective_in_comp != collective_in_computation.end());
if (collective_in_comp->second) {
return true;
}
}
return false;
}
HloInstruction* FindUniqueGTEUserWithIndex(const HloInstruction* op,
int64_t idx) {
CHECK(op->shape().IsTuple());
HloInstruction* gte = nullptr;
for (auto user : op->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
continue;
}
if (user->tuple_index() == idx) {
if (gte == nullptr) {
gte = user;
} else {
return nullptr;
}
}
}
return gte;
}
bool HasGTEUserWithIndex(const HloInstruction* op, int64_t idx) {
CHECK(op->shape().IsTuple());
for (auto user : op->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
continue;
}
if (user->tuple_index() == idx) {
return true;
}
}
return false;
}
HloInstruction* MaySkipTrivialTuple(HloInstruction* op) {
if (op->opcode() != HloOpcode::kTuple) {
return op;
}
HloInstruction* hidden_op = nullptr;
for (auto opnd : op->mutable_operands()) {
if (opnd->opcode() != HloOpcode::kGetTupleElement) {
return op;
}
if (hidden_op == nullptr) {
hidden_op = opnd->mutable_operand(0);
} else if (opnd->mutable_operand(0) != hidden_op) {
return op;
}
}
return hidden_op;
}
const HloInstruction* MaySkipTrivialTuple(const HloInstruction* op) {
return MaySkipTrivialTuple(const_cast<HloInstruction*>(op));
}
std::optional<PipelinedP2PInfo>
FindConsecutiveAndBalanceBlockOfSendDoneRecvDone(
const HloInstruction* while_init) {
PipelinedP2PInfo pipelined_p2p_info{0, 0};
auto has_started = [&]() {
return pipelined_p2p_info.opnd_start != pipelined_p2p_info.opnd_end;
};
int difference = 0;
for (int64_t i = 0; i < while_init->operand_count(); ++i) {
const HloInstruction* op = while_init->operand(i);
if ((op->opcode() == HloOpcode::kRecvDone ||
op->opcode() == HloOpcode::kSendDone) &&
op->frontend_attributes().map().count(kSendRecvPipelineAttr) > 0) {
if (op->opcode() == HloOpcode::kRecvDone) {
difference++;
} else {
difference--;
}
if (!has_started()) {
pipelined_p2p_info.opnd_start = i;
}
pipelined_p2p_info.opnd_end = i + 1;
} else {
if (has_started()) {
VLOG(10) << "End a consecutive block";
break;
}
}
}
if (difference != 0) {
VLOG(10) << "Mismatch number of SendDone and RecvDone: " << difference;
return std::nullopt;
}
if (has_started()) {
for (int64_t i = pipelined_p2p_info.opnd_end;
i < while_init->operand_count(); ++i) {
const HloInstruction* op = while_init->operand(i);
if (op->opcode() == HloOpcode::kRecvDone ||
op->opcode() == HloOpcode::kSendDone) {
VLOG(10) << "SendDone/RecvDone outside the consecutive block";
return std::nullopt;
break;
}
}
}
if (!has_started()) {
VLOG(10) << "No SendDone/RecvDone in while-init ";
return std::nullopt;
}
return pipelined_p2p_info;
}
std::optional<PipelinedP2PInfo> FindPipelinedP2P(
const HloInstruction* while_op) {
VLOG(10) << "while_op: " << while_op->ToString();
const HloInstruction* while_init = while_op->while_init();
if (while_init->opcode() != HloOpcode::kTuple ||
while_init->user_count() != 1) {
return std::nullopt;
}
const HloComputation* while_body = while_op->while_body();
const HloComputation* while_condition = while_op->while_condition();
if (while_body->num_parameters() != 1 ||
while_condition->num_parameters() != 1) {
return std::nullopt;
}
std::optional<PipelinedP2PInfo> pipelined_p2p_info =
FindConsecutiveAndBalanceBlockOfSendDoneRecvDone(while_init);
if (!pipelined_p2p_info.has_value()) {
return std::nullopt;
}
VLOG(10) << "opnd_start " << pipelined_p2p_info->opnd_start << " opnd_end "
<< pipelined_p2p_info->opnd_end;
for (int64_t i = pipelined_p2p_info->opnd_start;
i < pipelined_p2p_info->opnd_end; ++i) {
const HloInstruction* op = while_init->operand(i);
if (op->opcode() == HloOpcode::kRecvDone) {
if (!FindUniqueGTEUserWithIndex(while_op, i)) {
VLOG(10) << "While result get-tuple-element user with index " << i
<< " not unique";
return std::nullopt;
}
if (!FindUniqueGTEUserWithIndex(while_body->parameter_instruction(0),
i)) {
VLOG(10) << "While-body parameter get-tuple-element user with index "
<< i << " not unique";
return std::nullopt;
}
} else {
CHECK(op->opcode() == HloOpcode::kSendDone);
if (HasGTEUserWithIndex(while_op, i) ||
HasGTEUserWithIndex(while_body->parameter_instruction(0), i)) {
VLOG(10) << "SendDone with index " << i << " has unexpected users";
return std::nullopt;
}
}
}
const HloInstruction* root = while_body->root_instruction();
for (int64_t i = pipelined_p2p_info->opnd_start;
i < pipelined_p2p_info->opnd_end; ++i) {
const HloInstruction* op_init = while_init->operand(i);
const HloInstruction* op_root = root->operand(i);
op_root = MaySkipTrivialTuple(op_root);
if (op_init->opcode() != op_root->opcode()) {
VLOG(10) << "Mismatching opcode, op_init: " << op_init->ToString()
<< " op_root: " << op_root->ToString();
return std::nullopt;
}
}
return pipelined_p2p_info.value();
}
absl::Status RemoveOpFromParent(HloInstruction* op) {
TF_RETURN_IF_ERROR(op->DropAllControlDeps());
TF_RETURN_IF_ERROR(op->parent()->RemoveInstruction(op));
return absl::OkStatus();
}
absl::Status ReplaceOpInSequence(HloInstruction* old_op, HloInstruction* new_op,
HloInstructionSequence& instruction_sequence) {
VLOG(10) << "old_op: " << old_op->ToString();
VLOG(10) << "new_op: " << new_op->ToString();
instruction_sequence.replace_instruction(old_op, new_op);
return RemoveOpFromParent(old_op);
}
absl::Status ReplaceUsesAndUpdateSequence(
HloInstruction* old_op, HloInstruction* new_op,
HloInstructionSequence& instruction_sequence, bool diff_shape = false) {
VLOG(10) << "old_op: " << old_op->ToString();
VLOG(10) << "new_op: " << new_op->ToString();
if (diff_shape) {
TF_RETURN_IF_ERROR(old_op->ReplaceAllUsesWithDifferentShape(new_op));
} else {
TF_RETURN_IF_ERROR(old_op->ReplaceAllUsesWith(new_op));
}
return ReplaceOpInSequence(old_op, new_op, instruction_sequence);
}
absl::Status ReplaceUsesAndUpdateSequence(
const InstructionVector& old_ops, const InstructionVector& new_ops,
HloInstructionSequence& instruction_sequence) {
CHECK(old_ops.size() == new_ops.size());
for (int64_t i = 0; i < old_ops.size(); ++i) {
TF_RETURN_IF_ERROR(ReplaceUsesAndUpdateSequence(old_ops[i], new_ops[i],
instruction_sequence));
}
return absl::OkStatus();
}
absl::Status RemoveDoneOpsAndUpdateSequence(
const InstructionVector& ops,
HloInstructionSequence& instruction_sequence) {
auto remove_op = [&](HloInstruction* op) {
VLOG(10) << "op: " << op->ToString();
TF_RETURN_IF_ERROR(RemoveOpFromParent(op));
instruction_sequence.remove_instruction(op);
return absl::OkStatus();
};
for (auto op : ops) {
if (op->opcode() == HloOpcode::kTuple) {
InstructionVector to_remove;
HloInstruction* tuple_op = op;
op = MaySkipTrivialTuple(tuple_op);
to_remove.push_back(tuple_op);
for (auto opnd : tuple_op->mutable_operands()) {
to_remove.push_back(opnd);
}
for (auto opnd : to_remove) {
TF_RETURN_IF_ERROR(remove_op(opnd));
}
}
TF_RETURN_IF_ERROR(remove_op(op));
}
return absl::OkStatus();
}
bool InsertBeforeFirstCollectiveOp(
const InstructionVector& ops,
const CollectiveInComputation& collective_in_computation,
HloInstructionSequence& instruction_sequence, int64_t& idx,
int64_t& idx_tot) {
bool inserted = false;
while (idx < idx_tot) {
HloInstruction* hlo = instruction_sequence.instructions()[idx];
if (MayInvokeCollectiveOp(hlo, collective_in_computation)) {
for (auto op : ops) {
instruction_sequence.insert_instruction(op, idx);
idx++;
idx_tot++;
}
inserted = true;
break;
}
idx++;
}
return inserted;
}
void CopyInstructionInfo(const HloInstruction* old_op, HloInstruction* new_op) {
new_op->set_metadata(old_op->metadata());
new_op->add_frontend_attributes(old_op->frontend_attributes());
new_op->CopyBackendConfigFrom(old_op);
}
HloInstruction* CreateRecvDoneFrom(const HloInstruction* old_recv_done,
HloInstruction* recv,
HloComputation* computation) {
HloInstruction* recv_done =
computation->AddInstruction(HloInstruction::CreateRecvDone(
recv, old_recv_done->channel_id().value()));
CopyInstructionInfo(old_recv_done, recv_done);
return recv_done;
}
HloInstruction* CreateSendDoneFrom(const HloInstruction* old_send_done,
HloInstruction* send,
HloComputation* computation) {
HloInstruction* send_done =
computation->AddInstruction(HloInstruction::CreateSendDone(
send, old_send_done->channel_id().value()));
CopyInstructionInfo(old_send_done, send_done);
return send_done;
}
absl::Status RewritePipelinedP2PWhileBody(
const CollectiveInComputation& collective_in_computation,
const std::vector<Shape>& new_parameter_shapes, HloInstruction* while_op,
int64_t opnd_start, int64_t opnd_end) {
HloComputation* computation = while_op->while_body();
HloInstruction* while_init = while_op->while_init();
HloInstruction* root = computation->root_instruction();
HloInstructionSequence& instruction_sequence =
computation->parent()->schedule().GetOrCreateSequence(computation);
HloInstruction* param = computation->parameter_instruction(0);
*param->mutable_shape() = ShapeUtil::MakeTupleShape(new_parameter_shapes);
InstructionVector recv_dones;
InstructionVector new_recv_dones;
InstructionVector new_send_dones;
for (int64_t i = opnd_start; i < opnd_end; ++i) {
const HloInstruction* op = root->operand(i);
op = MaySkipTrivialTuple(op);
if (op->opcode() == HloOpcode::kRecvDone) {
HloInstruction* gte = FindUniqueGTEUserWithIndex(param, i);
CHECK(gte != nullptr);
recv_dones.push_back(gte);
HloInstruction* recv = computation->AddInstruction(
HloInstruction::CreateGetTupleElement(param, i));
HloInstruction* recv_done = CreateRecvDoneFrom(op, recv, computation);
new_recv_dones.push_back(recv_done);
continue;
}
CHECK(op->opcode() == HloOpcode::kSendDone);
HloInstruction* send = computation->AddInstruction(
HloInstruction::CreateGetTupleElement(param, i));
HloInstruction* send_done = CreateSendDoneFrom(op, send, computation);
new_send_dones.push_back(send_done);
}
TF_RETURN_IF_ERROR(ReplaceUsesAndUpdateSequence(recv_dones, new_recv_dones,
instruction_sequence));
InstructionVector done_ops;
InstructionVector new_opnds;
for (int64_t i = 0; i < while_init->operand_count(); ++i) {
HloInstruction* op = root->mutable_operand(i);
if (i >= opnd_start && i < opnd_end) {
new_opnds.push_back(MaySkipTrivialTuple(op)->mutable_operand(0));
done_ops.push_back(op);
} else {
new_opnds.push_back(op);
}
}
HloInstruction* new_root =
computation->AddInstruction(HloInstruction::CreateTuple(new_opnds));
computation->set_root_instruction(new_root,
true);
TF_RETURN_IF_ERROR(computation->RemoveInstruction(root));
instruction_sequence.replace_instruction(root, new_root);
TF_RETURN_IF_ERROR(
RemoveDoneOpsAndUpdateSequence(done_ops, instruction_sequence));
int64_t idx = 0;
int64_t idx_end = instruction_sequence.size();
bool inserted =
InsertBeforeFirstCollectiveOp(new_send_dones, collective_in_computation,
instruction_sequence, idx, idx_end);
CHECK(inserted);
CHECK(idx_end == instruction_sequence.size());
return absl::OkStatus();
}
void RewritePipelinedP2PWhileCond(
const std::vector<Shape>& new_parameter_shapes, HloInstruction* while_op) {
HloComputation* computation = while_op->while_condition();
HloInstruction* param = computation->parameter_instruction(0);
*param->mutable_shape() = ShapeUtil::MakeTupleShape(new_parameter_shapes);
VLOG(10) << computation->ToString();
}
absl::Status TransformLoop(
const PipelinedP2PInfo& pipelined_info,
const CollectiveInComputation& collective_in_computation, int64_t& idx,
int64_t& idx_end, HloInstructionSequence& instruction_sequence,
HloInstruction* while_op) {
HloComputation* computation = while_op->parent();
int64_t opnd_start = pipelined_info.opnd_start;
int64_t opnd_end = pipelined_info.opnd_end;
VLOG(10) << "Transform pipelined while-op " << while_op->ToString();
HloInstruction* while_init = while_op->while_init();
InstructionVector new_while_init_opnds;
std::vector<Shape> new_parameter_shapes;
for (int64_t i = 0; i < while_init->operand_count(); ++i) {
HloInstruction* op = while_init->mutable_operand(i);
if (i >= opnd_start && i < opnd_end) {
new_while_init_opnds.push_back(op->mutable_operand(0));
} else {
new_while_init_opnds.push_back(op);
}
new_parameter_shapes.push_back(new_while_init_opnds.back()->shape());
}
RewritePipelinedP2PWhileCond(new_parameter_shapes, while_op);
TF_RETURN_IF_ERROR(RewritePipelinedP2PWhileBody(
collective_in_computation, new_parameter_shapes, while_op, opnd_start,
opnd_end));
HloInstruction* new_while_init = computation->AddInstruction(
HloInstruction::CreateTuple(new_while_init_opnds), "while-init");
VLOG(10) << "new_while_init: " << new_while_init->ToString();
HloInstruction* new_while_op = computation->AddInstruction(
HloInstruction::CreateWhile(
while_op->while_body()->root_instruction()->shape(),
while_op->while_condition(), while_op->while_body(), new_while_init),
"while-result");
CopyInstructionInfo(while_op, new_while_op);
VLOG(10) << "new_while_op: " << new_while_op->ToString();
InstructionVector recv_dones;
InstructionVector new_recv_dones;
InstructionVector new_send_dones;
InstructionVector done_ops;
for (int64_t i = opnd_start; i < opnd_end; ++i) {
HloInstruction* op = while_init->mutable_operand(i);
done_ops.push_back(op);
if (op->opcode() == HloOpcode::kRecvDone) {
HloInstruction* gte = FindUniqueGTEUserWithIndex(while_op, i);
CHECK(gte != nullptr);
recv_dones.push_back(gte);
HloInstruction* recv = computation->AddInstruction(
HloInstruction::CreateGetTupleElement(new_while_op, i));
HloInstruction* recv_done = computation->AddInstruction(
HloInstruction::CreateRecvDone(recv, op->channel_id().value()));
new_recv_dones.push_back(recv_done);
CopyInstructionInfo(op, recv_done);
continue;
}
CHECK(op->opcode() == HloOpcode::kSendDone);
HloInstruction* send = computation->AddInstruction(
HloInstruction::CreateGetTupleElement(new_while_op, i));
HloInstruction* send_done = computation->AddInstruction(
HloInstruction::CreateSendDone(send, op->channel_id().value()));
new_send_dones.push_back(send_done);
CopyInstructionInfo(op, send_done);
}
TF_RETURN_IF_ERROR(ReplaceUsesAndUpdateSequence(
while_op, new_while_op, instruction_sequence, true));
TF_RETURN_IF_ERROR(
ReplaceOpInSequence(while_init, new_while_init, instruction_sequence));
TF_RETURN_IF_ERROR(ReplaceUsesAndUpdateSequence(recv_dones, new_recv_dones,
instruction_sequence));
TF_RETURN_IF_ERROR(
RemoveDoneOpsAndUpdateSequence(done_ops, instruction_sequence));
int64_t opnd_tot = opnd_end - opnd_start;
CHECK(idx_end == instruction_sequence.size() + opnd_tot);
CHECK(instruction_sequence.instructions()[idx - opnd_tot] == new_while_op);
idx_end -= opnd_tot;
idx = idx - opnd_tot + 1;
bool inserted =
InsertBeforeFirstCollectiveOp(new_send_dones, collective_in_computation,
instruction_sequence, idx, idx_end);
CHECK(idx_end == instruction_sequence.size());
if (!inserted) {
CHECK(idx_end == idx);
idx--;
for (auto send_done : new_send_dones) {
instruction_sequence.insert_instruction(send_done, idx++);
}
}
return absl::OkStatus();
}
absl::StatusOr<bool> ProcessComputation(
HloModule* module, HloComputation* computation,
CollectiveInComputation& collective_in_computation) {
VLOG(10) << "Process compuation " << computation->name();
bool changed = false;
HloInstructionSequence& instruction_sequence =
module->schedule().GetOrCreateSequence(computation);
int64_t idx = 0;
int64_t idx_end = instruction_sequence.size();
while (idx < idx_end) {
HloInstruction* hlo = instruction_sequence.instructions()[idx];
if (MayInvokeCollectiveOp(hlo, collective_in_computation)) {
collective_in_computation[computation] = true;
}
if (hlo->opcode() != HloOpcode::kWhile) {
idx++;
continue;
}
std::optional<PipelinedP2PInfo> pipelined_info = FindPipelinedP2P(hlo);
if (!pipelined_info.has_value()) {
idx++;
continue;
}
TF_RETURN_IF_ERROR(TransformLoop(pipelined_info.value(),
collective_in_computation, idx, idx_end,
instruction_sequence, hlo));
changed = true;
}
return changed;
}
}
absl::StatusOr<bool> PipelinedP2PRewriter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
if (!module->has_schedule()) return changed;
CollectiveInComputation collective_in_computation;
for (auto* computation :
module->MakeComputationPostOrder(execution_threads)) {
if (computation->IsFusionComputation()) {
collective_in_computation[computation] = false;
continue;
}
TF_ASSIGN_OR_RETURN(
bool cur_changed,
ProcessComputation(module, computation, collective_in_computation));
changed |= cur_changed;
}
if (changed) {
TF_RETURN_IF_ERROR(module->schedule().Update());
}
return changed;
}
}
} | #include "xla/service/gpu/pipelined_p2p_rewriter.h"
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
class PipelinedP2pRewriterTest : public HloTestBase {
protected:
void DoFileCheck(const HloModule* module, absl::string_view expected) {
HloPrintOptions options;
options.set_print_operand_shape(false);
options.set_print_result_shape(false);
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_matched,
RunFileCheck(module->ToString(options), expected));
EXPECT_TRUE(filecheck_matched);
}
};
TEST_F(PipelinedP2pRewriterTest, SendRecUnpipelinedNotTransform) {
const char* kModuleStr = R"(
HloModule test
cond {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(%param), index=0
ub = u32[] constant(11)
ROOT result = pred[] compare(count, ub), direction=LT
}
body {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(param), index=0
send-data = u32[2] get-tuple-element(param), index=1
after-all.0.n = token[] after-all()
recv.0 = (u32[2], u32[], token[]) recv(after-all.0.n), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
send.0 = (u32[2], u32[], token[]) send(send-data, after-all.0.n),
channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
recv-done.0 = (u32[2], token[]) recv-done(recv.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send-done.0 = token[] send-done(send.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
recv-data = u32[2] get-tuple-element(recv-done.0), index=0
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
r = u32[2] broadcast(c1), dimensions={}
s = u32[2] add(r, recv-data)
ROOT result = (u32[], u32[2]) tuple(new_count, s)
}
ENTRY test_computation {
c0 = u32[] constant(0)
c1 = u32[] constant(1)
r = u32[] replica-id()
a = u32[] add(c1, r)
init = u32[2] broadcast(a), dimensions={}
while_init = (u32[], u32[2]) tuple(c0, init)
while_result = (u32[], u32[2]) while(while_init), body=body, condition=cond,
backend_config={"known_trip_count":{"n":"11"}}
ROOT recv-data = u32[2] get-tuple-element(while_result), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
PipelinedP2PRewriter rewriter;
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(PipelinedP2pRewriterTest, SendRecvPipelined1) {
const char* kModuleStr = R"(
HloModule test, is_scheduled=true
while-cond {
param = (u32[], (f32[1,1024,1024], token[]), token[]) parameter(0)
count = get-tuple-element(param), index=0
ub = u32[] constant(25)
ROOT cond-result = pred[] compare(count, ub), direction=LT
}
while-body {
param = (u32[], (f32[1,1024,1024], token[]), token[]) parameter(0)
count = get-tuple-element(param), index=0
recv-done.q = (f32[1,1024,1024], token[]) get-tuple-element(param), index=1
recv-data = f32[1, 1024, 1024] get-tuple-element(recv-done.q), index=0
c1 = u32[] constant(1)
new-count = u32[] add(count, c1)
replica = u32[] replica-id()
c10 = u32[] constant(10)
sum = u32[] add(replica, c10)
sum2 = u32[] add(sum, count)
conv = f32[] convert(sum2)
p = f32[1, 1024, 1024] broadcast(conv), dimensions={}
b = f32[1, 1024, 1024] add(p, recv-data)
c = f32[1, 1024, 1024] multiply(b, b)
d = f32[1, 1024, 1024] tan(c)
s = f32[1, 1024, 1024] dot(c, d), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
send-data = f32[1, 1024, 1024] add(c, s)
after-all = token[] after-all()
recv = (f32[1, 1024, 1024], u32[], token[]) recv(after-all), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
send = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
recv-done.p = (f32[1,1024,1024], token[]) recv-done(recv), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send-done.p = token[] send-done(send), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
gte.0 = f32[1,1024,1024] get-tuple-element(recv-done.p), index=0
gte.1 = token[] get-tuple-element(recv-done.p), index=1
recv-done-tuple = (f32[1,1024,1024], token[]) tuple(gte.0, gte.1)
ROOT body-result = (u32[], (f32[1,1024,1024], token[]), token[])
tuple(new-count, recv-done-tuple, send-done.p)
}
ENTRY main {
c0 = u32[] constant(0)
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
after-all.1 = token[] after-all()
recv.1 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.1), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
send.1 = (f32[1, 1024, 1024], u32[], token[]) send(init, after-all.1), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
recv-done.1.p = (f32[1,1024,1024], token[]) recv-done(recv.1), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send-done.1.p = token[] send-done(send.1), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
while-init.p = (u32[], (f32[1,1024,1024], token[]), token[])
tuple(c0, recv-done.1.p, send-done.1.p)
while-result.p = (u32[], (f32[1,1024,1024], token[]), token[])
while(while-init.p),
body=while-body, condition=while-cond,
backend_config={"known_trip_count":{"n":"25"}}
recv-done.1.q = (f32[1,1024,1024], token[]) get-tuple-element(while-result.p), index=1
ROOT entry-result = f32[1, 1024, 1024] get-tuple-element(recv-done.1.q), index=0
}
)";
const char* kExpected = R"(
CHECK: %while-body (param.1: (u32[], (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[]))) -> (u32[], (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[])) {
CHECK: %param.1 = parameter(0)
CHECK: %get-tuple-element = get-tuple-element(%param.1), index=1
CHECK: %get-tuple-element.1 = get-tuple-element(%param.1), index=2
CHECK: %count.1 = get-tuple-element(%param.1), index=0
CHECK: %recv-done = recv-done(%get-tuple-element), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"}
CHECK: %recv-data = get-tuple-element(%recv-done), index=0
CHECK: %c1 = constant(1)
CHECK: %new-count = add(%count.1, %c1)
CHECK: %replica = replica-id()
CHECK: %c10 = constant(10)
CHECK: %sum = add(%replica, %c10)
CHECK: %sum2 = add(%sum, %count.1)
CHECK: %conv = convert(%sum2)
CHECK: %p = broadcast(%conv), dimensions={}
CHECK: %b = add(%p, %recv-data)
CHECK: %c = multiply(%b, %b)
CHECK: %d = tan(%c)
CHECK: %s = dot(%c, %d), lhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
CHECK: %send-data = add(%c, %s)
CHECK: %after-all = after-all()
CHECK: %send-done = send-done(%get-tuple-element.1), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"}
CHECK{LITERAL}: %recv = recv(%after-all), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0",_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}"}
CHECK{LITERAL}: %send = send(%send-data, %after-all), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0",_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}"}
CHECK: ROOT %tuple = tuple(%new-count, %recv, %send)
CHECK: }
CHECK: %while-cond (param: (u32[], (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[]))) -> pred[] {
CHECK: %param = parameter(0)
CHECK: %count = get-tuple-element(%param), index=0
CHECK: %ub = constant(25)
CHECK: ROOT %cond-result = compare(%count, %ub), direction=LT
CHECK: }
CHECK: ENTRY %main () -> f32[1,1024,1024] {
CHECK: %c0 = constant(0)
CHECK: %f0 = constant(0)
CHECK: %init = broadcast(%f0), dimensions={}
CHECK: %after-all.1 = after-all()
CHECK{LITERAL}: %recv.1 = recv(%after-all.1), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0",_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}"}
CHECK{LITERAL}: %send.1 = send(%init, %after-all.1), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0",_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}"}
CHECK: %while-init = tuple(%c0, %recv.1, %send.1)
CHECK: %while-result = while(%while-init), condition=%while-cond, body=%while-body,
CHECK-SAME{LITERAL}: backend_config={"known_trip_count":{"n":"25"}}
CHECK: %get-tuple-element.2 = get-tuple-element(%while-result), index=1
CHECK: %get-tuple-element.3 = get-tuple-element(%while-result), index=2
CHECK: %recv-done.1 = recv-done(%get-tuple-element.2), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"}
CHECK: %send-done.1 = send-done(%get-tuple-element.3), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"}
CHECK: ROOT %entry-result = get-tuple-element(%recv-done.1), index=0
CHECK: })";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
PipelinedP2PRewriter rewriter;
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
EXPECT_TRUE(changed);
DoFileCheck(module.get(), kExpected);
}
TEST_F(PipelinedP2pRewriterTest, SendRecvTwoPipelinedWhileLoops) {
const char* kModuleStr = R"(
HloModule test, is_scheduled=true
while-cond {
param = (u32[], (f32[1,1024,1024], token[]), token[]) parameter(0)
count = get-tuple-element(param), index=0
ub = u32[] constant(25)
ROOT cond-result = pred[] compare(count, ub), direction=LT
}
while-body {
param = (u32[], (f32[1,1024,1024], token[]), token[]) parameter(0)
count = get-tuple-element(param), index=0
recv-done.q = (f32[1,1024,1024], token[]) get-tuple-element(param), index=1
send-data = f32[1, 1024, 1024] get-tuple-element(recv-done.q), index=0
c1 = u32[] constant(1)
new-count = u32[] add(count, c1)
after-all = token[] after-all()
recv = (f32[1, 1024, 1024], u32[], token[]) recv(after-all), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
send = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
recv-done.p = (f32[1,1024,1024], token[]) recv-done(recv), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send-done.p = token[] send-done(send), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
gte.0 = f32[1,1024,1024] get-tuple-element(recv-done.p), index=0
gte.1 = token[] get-tuple-element(recv-done.p), index=1
recv-done-tuple = (f32[1,1024,1024], token[]) tuple(gte.0, gte.1)
ROOT body-result = (u32[], (f32[1,1024,1024], token[]), token[])
tuple(new-count, recv-done-tuple, send-done.p)
}
while-cond-2 {
param = (u32[], (f32[1,1024,1024], token[]), token[]) parameter(0)
count = get-tuple-element(param), index=0
ub = u32[] constant(25)
ROOT cond-result = pred[] compare(count, ub), direction=LT
}
while-body-2 {
param = (u32[], (f32[1,1024,1024], token[]), token[]) parameter(0)
count = get-tuple-element(param), index=0
recv-done.q = (f32[1,1024,1024], token[]) get-tuple-element(param), index=1
send-data = f32[1, 1024, 1024] get-tuple-element(recv-done.q), index=0
c1 = u32[] constant(1)
new-count = u32[] add(count, c1)
after-all = token[] after-all()
recv = (f32[1, 1024, 1024], u32[], token[]) recv(after-all), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
send = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
recv-done.p = (f32[1,1024,1024], token[]) recv-done(recv), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send-done.p = token[] send-done(send), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
gte.0 = f32[1,1024,1024] get-tuple-element(recv-done.p), index=0
gte.1 = token[] get-tuple-element(recv-done.p), index=1
recv-done-tuple = (f32[1,1024,1024], token[]) tuple(gte.0, gte.1)
ROOT body-result = (u32[], (f32[1,1024,1024], token[]), token[])
tuple(new-count, recv-done-tuple, send-done.p)
}
ENTRY main {
c0 = u32[] constant(0)
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
after-all.1 = token[] after-all()
recv.1 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.1), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
send.1 = (f32[1, 1024, 1024], u32[], token[]) send(init, after-all.1), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
recv-done.1.p = (f32[1,1024,1024], token[]) recv-done(recv.1), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send-done.1.p = token[] send-done(send.1), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
while-init.p = (u32[], (f32[1,1024,1024], token[]), token[])
tuple(c0, recv-done.1.p, send-done.1.p)
while-result.p = (u32[], (f32[1,1024,1024], token[]), token[])
while(while-init.p),
body=while-body, condition=while-cond,
backend_config={"known_trip_count":{"n":"25"}}
recv-done.1.q = (f32[1,1024,1024], token[]) get-tuple-element(while-result.p), index=1
after-all-2.1 = token[] after-all()
recv-2.1 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all-2.1), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
send-2.1 = (f32[1, 1024, 1024], u32[], token[]) send(recv-done.1.q, after-all-2.1), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
recv-done-2.1.p = (f32[1,1024,1024], token[]) recv-done(recv-2.1), channel_id=2,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send-done-2.1.p = token[] send-done(send-2.1), channel_id=2,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
while-init-2.p = (u32[], (f32[1,1024,1024], token[]), token[])
tuple(c0, recv-done-2.1.p, send-done-2.1.p)
while-result-2.p = (u32[], (f32[1,1024,1024], token[]), token[])
while(while-init-2.p),
body=while-body-2, condition=while-cond-2,
backend_config={"known_trip_count":{"n":"25"}}
recv-done-2.1.q = (f32[1,1024,1024], token[]) get-tuple-element(while-result-2.p), index=1
ROOT entry-result = f32[1, 1024, 1024] get-tuple-element(recv-done-2.1.q), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
PipelinedP2PRewriter rewriter;
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
EXPECT_TRUE(changed);
}
TEST_F(PipelinedP2pRewriterTest, SendRecvPipelined2) {
const char* kModuleStr = R"(
HloModule test, is_scheduled=true
while-cond {
param = (u32[], (f32[1,1024,1024], token[]), token[],
(f32[1,1024,1024], token[]), token[]) parameter(0)
count = get-tuple-element(param), index=0
ub = u32[] constant(25)
ROOT cond-result = pred[] compare(count, ub), direction=LT
}
while-body {
param = (u32[], (f32[1,1024,1024], token[]), token[],
(f32[1,1024,1024], token[]), token[]) parameter(0)
count = get-tuple-element(param), index=0
recv-done.0.q = (f32[1,1024,1024], token[]) get-tuple-element(param), index=1
recv-data.0 = f32[1, 1024, 1024] get-tuple-element(recv-done.0.q), index=0
recv-done.1.q = (f32[1,1024,1024], token[]) get-tuple-element(param), index=3
recv-data.1 = f32[1, 1024, 1024] get-tuple-element(recv-done.1.q), index=0
replica = u32[] replica-id()
constant0 = u32[] constant(0)
compare0 = pred[] compare(replica, constant0), direction=EQ
compare = pred[1, 1024, 1024] broadcast(compare0), dimensions={}
recv-data = f32[1, 1024, 1024] select(compare, recv-data.0, recv-data.1)
c1 = u32[] constant(1)
new-count = u32[] add(count, c1)
c10 = u32[] constant(10)
sum = u32[] add(replica, c10)
sum2 = u32[] add(sum, count)
conv = f32[] convert(sum2)
p = f32[1, 1024, 1024] broadcast(conv), dimensions={}
b = f32[1, 1024, 1024] add(p, recv-data)
c = f32[1, 1024, 1024] multiply(b, b)
d = f32[1, 1024, 1024] tan(c)
s = f32[1, 1024, 1024] dot(c, d), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
send-data = f32[1, 1024, 1024] add(c, s)
after-all = token[] after-all()
recv = (f32[1, 1024, 1024], u32[], token[]) recv(after-all), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
send = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
recv-done.p = (f32[1,1024,1024], token[]) recv-done(recv), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send-done.p = token[] send-done(send), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
after-all.1 = token[] after-all()
recv.1 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.1), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}}",
_xla_send_recv_pipeline="1"
}
send.1 = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all.1),
channel_id=2, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}}",
_xla_send_recv_pipeline="1"
}
recv-done.1.p = (f32[1,1024,1024], token[]) recv-done(recv.1), channel_id=2,
frontend_attributes={
_xla_send_recv_pipeline="1"
}
send-done.1.p = token[] send-done(send.1), channel_id=2,
frontend_attributes={
_xla_send_recv_pipeline="1"
}
ROOT body-result = (u32[], (f32[1,1024,1024], token[]), token[],
(f32[1,1024,1024], token[]), token[])
tuple(new-count, recv-done.p, send-done.p, recv-done.1.p, send-done.1.p)
}
ENTRY main {
c0 = u32[] constant(0)
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
after-all.2 = token[] after-all()
recv.2 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.2), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
send.2 = (f32[1, 1024, 1024], u32[], token[]) send(init, after-all.2), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
recv-done.2.p = (f32[1,1024,1024], token[]) recv-done(recv.2), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send-done.2.p = token[] send-done(send.2), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
after-all.3 = token[] after-all()
recv.3 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.3), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}}",
_xla_send_recv_pipeline="1"
}
send.3 = (f32[1, 1024, 1024], u32[], token[]) send(init, after-all.3), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}}",
_xla_send_recv_pipeline="1"
}
recv-done.3.p = (f32[1,1024,1024], token[]) recv-done(recv.3), channel_id=2,
frontend_attributes={
_xla_send_recv_pipeline="1"
}
send-done.3.p = token[] send-done(send.3), channel_id=2,
frontend_attributes={
_xla_send_recv_pipeline="1"
}
while-init.p = (u32[], (f32[1,1024,1024], token[]), token[],
(f32[1,1024,1024], token[]), token[]) tuple(c0, recv-done.2.p, send-done.2.p, recv-done.3.p, send-done.3.p)
while-result.p = (u32[], (f32[1,1024,1024], token[]), token[],
(f32[1,1024,1024], token[]), token[]) while(while-init.p),
body=while-body, condition=while-cond,
backend_config={"known_trip_count":{"n":"25"}}
recv-done.2.q = (f32[1,1024,1024], token[]) get-tuple-element(while-result.p), index=1
recv-data.2 = f32[1, 1024, 1024] get-tuple-element(recv-done.2.q), index=0
recv-done.3.q = (f32[1,1024,1024], token[]) get-tuple-element(while-result.p), index=3
recv-data.3 = f32[1, 1024, 1024] get-tuple-element(recv-done.3.q), index=0
replica = u32[] replica-id()
constant0 = u32[] constant(0)
compare0 = pred[] compare(replica, constant0), direction=EQ
compare = pred[1, 1024, 1024] broadcast(compare0), dimensions={}
ROOT entry-result = f32[1, 1024, 1024] select(compare, recv-data.2, recv-data.3)
}
)";
const char* kExpected = R"(
CHECK: %while-body (param.1: (u32[], (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[]))) -> (u32[], (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[])) {
CHECK: %param.1 = parameter(0)
CHECK: %get-tuple-element = get-tuple-element(%param.1), index=1
CHECK: %get-tuple-element.1 = get-tuple-element(%param.1), index=2
CHECK: %get-tuple-element.2 = get-tuple-element(%param.1), index=3
CHECK: %get-tuple-element.3 = get-tuple-element(%param.1), index=4
CHECK: %count.1 = get-tuple-element(%param.1), index=0
CHECK: %recv-done = recv-done(%get-tuple-element), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"}
CHECK: %recv-data.0 = get-tuple-element(%recv-done), index=0
CHECK: %recv-done.1 = recv-done(%get-tuple-element.2), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1"}
CHECK: %recv-data.1 = get-tuple-element(%recv-done.1), index=0
CHECK: %replica = replica-id()
CHECK: %constant0 = constant(0)
CHECK: %compare0 = compare(%replica, %constant0), direction=EQ
CHECK: %compare = broadcast(%compare0), dimensions={}
CHECK: %recv-data.2 = select(%compare, %recv-data.0, %recv-data.1)
CHECK: %c1 = constant(1)
CHECK: %new-count = add(%count.1, %c1)
CHECK: %c10 = constant(10)
CHECK: %sum = add(%replica, %c10)
CHECK: %sum2 = add(%sum, %count.1)
CHECK: %conv = convert(%sum2)
CHECK: %p = broadcast(%conv), dimensions={}
CHECK: %b = add(%p, %recv-data.2)
CHECK: %c = multiply(%b, %b)
CHECK: %d = tan(%c)
CHECK: %s = dot(%c, %d), lhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
CHECK: %send-data = add(%c, %s)
CHECK: %after-all = after-all()
CHECK: %send-done = send-done(%get-tuple-element.1), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"}
CHECK: %send-done.1 = send-done(%get-tuple-element.3), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1"}
CHECK{LITERAL}: %recv = recv(%after-all), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0",_xla_send_recv_source_target_pairs="{{3,0}}"}
CHECK{LITERAL}: %send = send(%send-data, %after-all), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0",_xla_send_recv_source_target_pairs="{{3,0}}"}
CHECK: %after-all.1 = after-all()
CHECK{LITERAL}: %recv.1 = recv(%after-all.1), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1",_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}}"}
CHECK{LITERAL}: %send.1 = send(%send-data, %after-all.1), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1",_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}}"}
CHECK: ROOT %tuple = tuple(%new-count, %recv, %send, %recv.1, %send.1)
CHECK: }
CHECK: %while-cond (param: (u32[], (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[]))) -> pred[] {
CHECK: %param = parameter(0)
CHECK: %count = get-tuple-element(%param), index=0
CHECK: %ub = constant(25)
CHECK: ROOT %cond-result = compare(%count, %ub), direction=LT
CHECK: }
CHECK: ENTRY %main () -> f32[1,1024,1024] {
CHECK: %c0 = constant(0)
CHECK: %f0 = constant(0)
CHECK: %init = broadcast(%f0), dimensions={}
CHECK: %after-all.2 = after-all()
CHECK{LITERAL}: %recv.2 = recv(%after-all.2), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0",_xla_send_recv_source_target_pairs="{{3,0}}"}
CHECK{LITERAL}: %send.2 = send(%init, %after-all.2), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0",_xla_send_recv_source_target_pairs="{{3,0}}"}
CHECK: %after-all.3 = after-all()
CHECK{LITERAL}: %recv.3 = recv(%after-all.3), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1",_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}}"}
CHECK{LITERAL}: %send.3 = send(%init, %after-all.3), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1",_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}}"}
CHECK: %while-init = tuple(%c0, %recv.2, %send.2, %recv.3, %send.3)
CHECK{LITERAL}: %while-result = while(%while-init), condition=%while-cond, body=%while-body, backend_config={"known_trip_count":{"n":"25"}}
CHECK: %get-tuple-element.4 = get-tuple-element(%while-result), index=1
CHECK: %get-tuple-element.5 = get-tuple-element(%while-result), index=2
CHECK: %get-tuple-element.6 = get-tuple-element(%while-result), index=3
CHECK: %get-tuple-element.7 = get-tuple-element(%while-result), index=4
CHECK: %recv-done.2 = recv-done(%get-tuple-element.4), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"}
CHECK: %recv-data.3 = get-tuple-element(%recv-done.2), index=0
CHECK: %recv-done.3 = recv-done(%get-tuple-element.6), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1"}
CHECK: %recv-data.4 = get-tuple-element(%recv-done.3), index=0
CHECK: %replica.1 = replica-id()
CHECK: %constant0.1 = constant(0)
CHECK: %compare0.1 = compare(%replica.1, %constant0.1), direction=EQ
CHECK: %compare.1 = broadcast(%compare0.1), dimensions={}
CHECK: %send-done.2 = send-done(%get-tuple-element.5), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"}
CHECK: %send-done.3 = send-done(%get-tuple-element.7), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1"}
CHECK: ROOT %entry-result = select(%compare.1, %recv-data.3, %recv-data.4)
CHECK: })";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
PipelinedP2PRewriter rewriter;
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
EXPECT_TRUE(changed);
DoFileCheck(module.get(), kExpected);
}
}
}
} |
2,054 | cpp | tensorflow/tensorflow | runtime_intrinsics | third_party/xla/xla/service/gpu/runtime_intrinsics.cc | third_party/xla/xla/service/gpu/runtime_intrinsics_test.cc | #ifndef XLA_SERVICE_GPU_RUNTIME_INTRINSICS_H_
#define XLA_SERVICE_GPU_RUNTIME_INTRINSICS_H_
#include "absl/strings/string_view.h"
namespace xla {
inline constexpr absl::string_view kXlaGpuAssertCustomCallTag =
"__xla_gpu_assert";
}
#endif
#include "xla/service/gpu/runtime_intrinsics.h"
#include <cstdint>
#include <string>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/ascii.h"
#include "absl/strings/string_view.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/custom_call_status.h"
#include "xla/service/custom_call_target_registry.h"
#include "xla/service/platform_util.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
std::string GetGpuPlatformName() {
return absl::AsciiStrToUpper(
PlatformUtil::CanonicalPlatformName("gpu").value());
}
absl::Status AssertOnGpu(void* stream_handle, void* buffer,
absl::string_view error_msg) {
TF_ASSIGN_OR_RETURN(
se::Platform * platform,
se::PlatformManager::PlatformWithName(GetGpuPlatformName()));
se::StreamExecutorConfig config;
config.gpu_stream = stream_handle;
TF_ASSIGN_OR_RETURN(se::StreamExecutor * executor,
platform->GetExecutor(config));
se::Stream* stream = executor->FindAllocatedStream(stream_handle);
if (!stream) {
return Internal("Stream not found for: %p", stream_handle);
}
int8_t expected = false;
int64_t byte_size = sizeof(int8_t);
CHECK_EQ(byte_size, ShapeUtil::ByteSizeOfPrimitiveType(PrimitiveType::PRED));
TF_RETURN_IF_ERROR(stream->Memcpy(
&expected, se::DeviceMemoryBase{buffer, static_cast<uint64_t>(byte_size)},
byte_size));
TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());
if (!static_cast<bool>(expected)) {
return Internal("%s", error_msg);
}
return absl::OkStatus();
}
void AssertionCustomCall(void* stream_handle, void** buffers,
const char* opaque, int opaque_len,
XlaCustomCallStatus* status) {
absl::Status s =
AssertOnGpu(stream_handle, buffers[0],
absl::string_view{opaque, static_cast<uint64_t>(opaque_len)});
if (!s.ok()) {
auto msg = s.message();
XlaCustomCallStatusSetFailure(status, msg.data(), msg.size());
}
}
void NopReturnTokenCustomCall(void* stream_handle, void** buffers,
const char* opaque, int opaque_len,
XlaCustomCallStatus* status) {
VLOG(1) << "NopReturnTokenCustomCall called.";
}
}
XLA_REGISTER_CUSTOM_CALL_TARGET_WITH_SYM(
std::string(kXlaGpuAssertCustomCallTag), AssertionCustomCall,
GetGpuPlatformName());
XLA_REGISTER_CUSTOM_CALL_TARGET_WITH_SYM(
std::string(kNopReturnTokenCustomCallTarget), NopReturnTokenCustomCall,
GetGpuPlatformName());
} | #include <memory>
#include <utility>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using RuntimeIntrinsicsTest = HloTestBase;
TEST_F(RuntimeIntrinsicsTest, NopReturnTokenWorks) {
constexpr absl::string_view kHloText = R"(
HloModule m
ENTRY e {
constant = u32[2]{0} constant({0, 1})
ROOT nop_return_token = token[] custom-call(constant), custom_call_target="NopReturnToken", custom_call_has_side_effect=true
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
GetOptimizedModule(kHloText));
EXPECT_EQ(module->entry_computation()->instruction_count(), 2);
EXPECT_TRUE(Run(std::move(module), false));
}
}
}
} |
2,055 | cpp | tensorflow/tensorflow | reduction_layout_normalizer | third_party/xla/xla/service/gpu/transforms/reduction_layout_normalizer.cc | third_party/xla/xla/service/gpu/transforms/reduction_layout_normalizer_test.cc | #ifndef XLA_SERVICE_GPU_REDUCTION_LAYOUT_NORMALIZER_H_
#define XLA_SERVICE_GPU_REDUCTION_LAYOUT_NORMALIZER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class ReductionLayoutNormalizer : public HloModulePass {
public:
absl::string_view name() const override {
return "reduction-layout-normalizer";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
}
#endif
#include "xla/service/gpu/reduction_layout_normalizer.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
class EnforceMinorToMajorReduceOpVisitor : public DfsHloRewriteVisitor {
absl::Status HandleReduce(HloInstruction *hlo) override {
auto reduce = Cast<HloReduceInstruction>(hlo);
VLOG(5) << "Input: " << reduce->ToString();
int operand_idx = -1;
absl::InlinedVector<HloInstruction *, 2> canonical_reduce_inputs;
absl::InlinedVector<Shape, 2> new_reduce_shapes;
DimensionVector out_reduce_dimensions;
const Shape &first_instruction_shape = reduce->inputs()[0]->shape();
for (HloInstruction *operand : reduce->inputs()) {
operand_idx++;
if (operand_idx != 0 &&
operand->shape().layout() != first_instruction_shape.layout()) {
HloInstruction *copy =
reduce->parent()->AddInstruction(HloInstruction::CreateUnary(
operand->shape(), HloOpcode::kCopy, operand));
LayoutUtil::ClearLayout(copy->mutable_shape());
TF_RETURN_IF_ERROR(LayoutUtil::CopyLayoutBetweenShapes(
first_instruction_shape, copy->mutable_shape()));
copy->set_metadata(operand->metadata());
operand = copy;
VLOG(3) << "Copying to establish consistent inputs layout: "
<< copy->ToString();
}
const Shape &operand_shape = operand->shape();
const Layout &operand_layout = operand_shape.layout();
const Shape &reduce_shape =
reduce->shape().IsTuple() ? reduce->shape().tuple_shapes(operand_idx)
: reduce->shape();
DimensionVector new_reduce_dimensions;
DimensionVector new_operand_shape_data;
DimensionVector new_reduce_shape_data;
DimensionVector new_reduce_shape_layout(reduce_shape.rank());
std::vector<int64_t> reduce_shape_logical_to_physical =
LayoutUtil::MakeLogicalToPhysical(reduce_shape.layout());
auto to_reduce_logical_dim = [&](int64_t op_logical_dim) {
return op_logical_dim -
absl::c_count_if(reduce->dimensions(), [&](int64_t dim) {
CHECK(dim != op_logical_dim);
return dim < op_logical_dim;
});
};
for (int i = 0; i < operand_shape.rank(); i++) {
int64_t major_to_minor_dim_idx = operand_shape.rank() - i - 1;
int64_t logical_dim =
operand_layout.minor_to_major(major_to_minor_dim_idx);
int64_t dim_size = operand_shape.dimensions(logical_dim);
VLOG(5) << "Processing logical dimension " << logical_dim << " of size "
<< dim_size;
new_operand_shape_data.push_back(dim_size);
if (absl::c_linear_search(reduce->dimensions(), logical_dim)) {
new_reduce_dimensions.push_back(i);
} else {
new_reduce_shape_data.push_back(dim_size);
int64_t logical_reduce_dim = to_reduce_logical_dim(logical_dim);
int64_t physical_reduce_dim =
reduce_shape_logical_to_physical[logical_reduce_dim];
VLOG(5) << "logical_reduce_dim = " << logical_reduce_dim << ", "
<< "physical_reduce_dim = " << physical_reduce_dim;
new_reduce_shape_layout[reduce_shape.rank() - physical_reduce_dim -
1] = new_reduce_shape_data.size() - 1;
}
}
Shape new_operand_shape = ShapeUtil::MakeShape(
operand_shape.element_type(), new_operand_shape_data);
Shape new_reduce_shape = ShapeUtil::MakeShapeWithDenseLayout(
reduce_shape.element_type(), new_reduce_shape_data,
new_reduce_shape_layout);
if (new_operand_shape == operand_shape && reduce->inputs().size() == 1) {
return absl::OkStatus();
}
HloInstruction *canonical_reduce_input =
new_operand_shape != operand_shape
? reduce->parent()->AddInstruction(
HloInstruction::CreateBitcast(new_operand_shape, operand))
: operand;
canonical_reduce_input->set_metadata(operand->metadata());
VLOG(5) << "Reduction input: " << canonical_reduce_input->ToString();
new_reduce_shapes.push_back(new_reduce_shape);
canonical_reduce_inputs.push_back(canonical_reduce_input);
if (out_reduce_dimensions.empty()) {
out_reduce_dimensions = new_reduce_dimensions;
} else {
TF_RET_CHECK(out_reduce_dimensions == new_reduce_dimensions);
}
}
Shape new_reduce_shape = ShapeUtil::MakeMaybeTupleShape(new_reduce_shapes);
std::unique_ptr<HloInstruction> new_reduce = HloInstruction::CreateReduce(
new_reduce_shape, canonical_reduce_inputs, reduce->init_values(),
out_reduce_dimensions, reduce->to_apply());
VLOG(5) << "Generated new reduction: " << new_reduce->ToString();
const Shape &orig_reduce_shape = reduce->shape();
if (new_reduce_shape != orig_reduce_shape) {
HloInstruction *wrapped_reduce =
reduce->parent()->AddInstruction(std::move(new_reduce));
if (!new_reduce_shape.IsTuple()) {
new_reduce =
HloInstruction::CreateBitcast(reduce->shape(), wrapped_reduce);
} else {
absl::InlinedVector<HloInstruction *, 2> out;
for (int oidx = 0; oidx < reduce->input_count(); oidx++) {
HloInstruction *gte = reduce->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(wrapped_reduce, oidx));
out.push_back(
reduce->parent()->AddInstruction(HloInstruction::CreateBitcast(
orig_reduce_shape.tuple_shapes(oidx), gte)));
}
new_reduce = HloInstruction::CreateTuple(out);
}
}
VLOG(5) << "Generated output: " << new_reduce->ToString();
return ReplaceWithNewInstruction(reduce, std::move(new_reduce));
}
};
absl::StatusOr<bool> ReductionLayoutNormalizer::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
TF_ASSIGN_OR_RETURN(bool changed,
EnforceMinorToMajorReduceOpVisitor().RunOnModule(
module, execution_threads));
return changed;
}
}
} | #include "xla/service/gpu/reduction_layout_normalizer.h"
#include <optional>
#include "absl/strings/string_view.h"
#include "xla/error_spec.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class ReductionLayoutNormalizerTest : public HloTestBase {
public:
void CheckReductionLayoutNormalizer(
absl::string_view hlo, std::optional<absl::string_view> expected) {
RunAndFilecheckHloRewrite(hlo, gpu::ReductionLayoutNormalizer{}, expected);
}
};
TEST_F(ReductionLayoutNormalizerTest, LayoutCanonicalizerTest) {
const char* hlo = R"(
HloModule ReduceWithLayoutChange
add {
x0 = f32[] parameter(0)
y0 = f32[] parameter(1)
ROOT add0 = f32[] add(x0, y0)
}
ENTRY main {
arg0 = f32[4,5,5,16,12,12,3,3]{2,3,5,4,0,7,6,1} parameter(0)
constant0 = f32[] constant(0)
ROOT reduce0 = f32[4,5,16,12,12]{4,3,2,1,0} reduce(arg0, constant0),
dimensions={1,6,7}, to_apply=add
}
)";
CheckReductionLayoutNormalizer(hlo,
R"(
)");
}
TEST_F(ReductionLayoutNormalizerTest, LayoutCanonicalizerTestVariadic) {
const char* hlo = R"(
HloModule ReduceWithLayoutChangeVariadic
argmax {
running_max = f32[] parameter(0)
running_max_idx = u32[] parameter(1)
current_value = f32[] parameter(2)
current_value_idx = u32[] parameter(3)
current = (f32[], u32[]) tuple(running_max, running_max_idx)
potential = (f32[], u32[]) tuple(current_value, current_value_idx)
cmp_code = pred[] compare(current_value, running_max), direction=GT
new_max = f32[] select(cmp_code, current_value, running_max)
new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx)
ROOT out = (f32[], u32[]) tuple(new_max, new_idx)
}
ENTRY main {
arg0 = f32[4,5,5,16,12,12,3,3]{2,3,5,4,0,7,6,1} parameter(0)
idxs = u32[4,5,5,16,12,12,3,3]{2,3,5,4,0,7,6,1} parameter(1)
constant0 = f32[] constant(0)
constant1 = u32[] constant(0)
ROOT reduce0 = (
f32[4,5,16,12,12]{4,3,2,1,0},
u32[4,5,16,12,12]{4,3,2,1,0}
) reduce(arg0, idxs, constant0,constant1), dimensions={1,6,7}, to_apply=argmax
}
)";
CheckReductionLayoutNormalizer(hlo,
R"(
)");
}
TEST_F(ReductionLayoutNormalizerTest,
LayoutCanonicalizerTestVariadicDifferentLayouts) {
const char* hlo = R"(
HloModule ReduceWithLayoutChangeVariadicDifferent
argmax {
running_max = f32[] parameter(0)
running_max_idx = u32[] parameter(1)
current_value = f32[] parameter(2)
current_value_idx = u32[] parameter(3)
current = (f32[], u32[]) tuple(running_max, running_max_idx)
potential = (f32[], u32[]) tuple(current_value, current_value_idx)
cmp_code = pred[] compare(current_value, running_max), direction=GT
new_max = f32[] select(cmp_code, current_value, running_max)
new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx)
ROOT out = (f32[], u32[]) tuple(new_max, new_idx)
}
ENTRY main {
arg0 = f32[2,3,4,7]{2,1,0,3} parameter(0)
idxs = u32[2,3,4,7]{3,2,1,0} parameter(1)
constant0 = f32[] constant(0)
constant1 = u32[] constant(0)
ROOT reduce0 = (
f32[2,3,4]{2,1,0},
u32[2,3,4]{2,1,0}
) reduce(arg0, idxs, constant0,constant1), dimensions={3}, to_apply=argmax
}
)";
CheckReductionLayoutNormalizer(hlo,
R"(
)");
EXPECT_TRUE(RunAndCompare(hlo, ErrorSpec{1e-5, 1e-5}));
}
}
} |
2,056 | cpp | tensorflow/tensorflow | gpu_sanitize_constant_names | null | null | #ifndef XLA_SERVICE_GPU_GPU_SANITIZE_CONSTANT_NAMES_H_
#define XLA_SERVICE_GPU_GPU_SANITIZE_CONSTANT_NAMES_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class GpuSanitizeConstantNames : public HloModulePass {
public:
absl::string_view name() const override { return "sanitize-constant-names"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
}
#endif
#include "xla/service/gpu/gpu_sanitize_constant_names.h"
#include <string>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/llvm_ir/buffer_assignment_util.h"
#include "xla/service/name_uniquer.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace gpu {
absl::StatusOr<bool> GpuSanitizeConstantNames::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
NameUniquer instr_name_uniquer("_");
for (HloComputation* computation : module->computations(execution_threads)) {
for (HloInstruction* instr : computation->instructions()) {
if (instr->opcode() == HloOpcode::kConstant) {
continue;
}
instr_name_uniquer.GetUniqueName(instr->name());
}
}
for (HloComputation* computation : module->computations(execution_threads)) {
for (HloInstruction* instr : computation->instructions()) {
if (instr->opcode() != HloOpcode::kConstant) {
continue;
}
std::string sanitized_name = llvm_ir::SanitizeConstantName(*instr);
instr->SetAndSanitizeName(sanitized_name);
instr->UniquifyName(&instr_name_uniquer);
module->instruction_name_uniquer().GetUniqueName(instr->name());
changed = true;
}
}
return changed;
}
}
} | #include "xla/service/gpu/gpu_sanitize_constant_names.h"
#include <cstdint>
#include <memory>
#include <utility>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/literal_util.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
using SanitizeConstantNamesTest = HloTestBase;
TEST_F(SanitizeConstantNamesTest, InstructionNameWithHyphenSanitized) {
const char *const kHloString = R"(
HloModule HyphenInInstructionName
ENTRY kernelEntry {
ROOT equal-to = s32[2]{0} constant({42, 73})
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_TRUE(GpuSanitizeConstantNames().Run(module.get()).value());
HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->name(), "equal_to");
}
TEST_F(SanitizeConstantNamesTest, InstructionNameWithDotSanitized) {
const char *const kHloString = R"(
HloModule HyphenInInstructionName
ENTRY kernelEntry {
ROOT equal.to = s32[2]{0} constant({42, 73})
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_TRUE(GpuSanitizeConstantNames().Run(module.get()).value());
HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->name(), "equal_to");
}
TEST_F(SanitizeConstantNamesTest, NewInstructionNameRegisteredWithModule) {
const char *const kHloString = R"(
HloModule HyphenInInstructionName
ENTRY kernelEntry {
ROOT equal.to = s32[2]{0} constant({42, 73})
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_TRUE(GpuSanitizeConstantNames().Run(module.get()).value());
HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->name(), "equal_to");
auto constant_instr =
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1));
constant_instr->SetAndSanitizeName("equal_to");
module->entry_computation()->AddInstruction(std::move(constant_instr));
EXPECT_THAT(FindInstruction(module.get(), "equal_to.1"),
GmockMatch(m::Constant()));
}
TEST_F(SanitizeConstantNamesTest, BufferSanitizedNameCollisionResolved) {
const char *const kHloString = R"(
HloModule BufferSanitizedName
ENTRY kernelEntry {
equal.to = s32[2]{0} constant({42, 73})
equal-to = s32[2]{0} constant({67, 3})
ROOT equal_to = s32[2]{0} add(equal.to, equal-to)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_TRUE(GpuSanitizeConstantNames().Run(module.get()).value());
EXPECT_THAT(FindInstruction(module.get(), "equal_to_1"),
GmockMatch(m::Constant()));
EXPECT_THAT(FindInstruction(module.get(), "equal_to_2"),
GmockMatch(m::Constant()));
}
}
}
} |
2,057 | cpp | tensorflow/tensorflow | dot_dimension_sorter | third_party/xla/xla/service/gpu/transforms/dot_dimension_sorter.cc | third_party/xla/xla/service/gpu/transforms/dot_dimension_sorter_test.cc | #ifndef XLA_SERVICE_GPU_DOT_DIMENSION_SORTER_H_
#define XLA_SERVICE_GPU_DOT_DIMENSION_SORTER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class DotDimensionSorter : public HloModulePass {
public:
absl::string_view name() const override { return "dot_dimension_sorter"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
}
#endif
#include "xla/service/gpu/dot_dimension_sorter.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/permutation_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace gpu {
namespace {
absl::Status SortDotDimensions(HloDotInstruction* dot) {
const DotDimensionNumbers& dims = dot->dot_dimension_numbers();
DotDimensionNumbers new_dims(dims);
new_dims.clear_lhs_contracting_dimensions();
new_dims.clear_rhs_contracting_dimensions();
const bool sort_by_lhs =
DistinctNumbersAreConsecutiveIfSorted(dims.lhs_contracting_dimensions());
const absl::Span<const int64_t>& sort_key =
sort_by_lhs ? dims.lhs_contracting_dimensions()
: dims.rhs_contracting_dimensions();
std::vector<int64_t> permutation;
for (const int64_t a : sort_key) {
permutation.push_back(a - *absl::c_min_element(sort_key));
}
const std::vector<int64_t> sorted_lhs =
Permute(dims.lhs_contracting_dimensions(), permutation);
*new_dims.mutable_lhs_contracting_dimensions() = {sorted_lhs.begin(),
sorted_lhs.end()};
const std::vector<int64_t> sorted_rhs =
Permute(dims.rhs_contracting_dimensions(), permutation);
*new_dims.mutable_rhs_contracting_dimensions() = {sorted_rhs.begin(),
sorted_rhs.end()};
std::unique_ptr<HloInstruction> new_dot = HloInstruction::CreateDot(
dot->shape(), dot->mutable_operand(0), dot->mutable_operand(1), new_dims,
dot->precision_config(), {dot->sparsity().begin(), dot->sparsity().end()},
absl::MakeSpan(dot->operands()).subspan(HloDotInstruction::kOperands));
dot->SetupDerivedInstruction(new_dot.get());
VLOG(3) << "Sorted dot() dimensions:\n"
<< "\t before: " << dot->ToString() << "\n"
<< "\t after: " << new_dot->ToString();
return dot->parent()->ReplaceWithNewInstruction(dot, std::move(new_dot));
}
}
absl::StatusOr<bool> DotDimensionSorter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
std::vector<HloInstruction*> dots_to_process;
for (const HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instr : computation->instructions()) {
if (instr->opcode() != HloOpcode::kDot) {
continue;
}
if ((instr->operand(0)->shape().has_layout() &&
!LayoutUtil::IsMonotonicWithDim0Major(
instr->operand(0)->shape().layout())) ||
(instr->operand(1)->shape().has_layout() &&
!LayoutUtil::IsMonotonicWithDim0Major(
instr->operand(1)->shape().layout()))) {
continue;
}
const DotDimensionNumbers& dims = instr->dot_dimension_numbers();
if (dims.lhs_contracting_dimensions_size() == 0) {
continue;
}
const bool cons_lhs = DistinctNumbersAreConsecutiveIfSorted(
dims.lhs_contracting_dimensions());
const bool cons_rhs = DistinctNumbersAreConsecutiveIfSorted(
dims.rhs_contracting_dimensions());
const bool sorted_lhs =
absl::c_is_sorted(dims.lhs_contracting_dimensions());
const bool sorted_rhs =
absl::c_is_sorted(dims.rhs_contracting_dimensions());
if ((cons_lhs && !sorted_lhs && !cons_rhs) ||
(cons_rhs && !sorted_rhs && !cons_lhs) ||
(cons_lhs && !sorted_lhs && cons_rhs && !sorted_rhs)) {
dots_to_process.push_back(instr);
}
}
}
if (dots_to_process.empty()) {
return false;
}
for (HloInstruction* dot : dots_to_process) {
TF_RETURN_IF_ERROR(SortDotDimensions(Cast<HloDotInstruction>(dot)));
}
return true;
}
}
} | #include "xla/service/gpu/dot_dimension_sorter.h"
#include <memory>
#include <gtest/gtest.h>
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/tests/gpu_codegen_test.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
class WithoutDotDimensionSorterTest : public GpuCodegenTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = GpuCodegenTest::GetDebugOptionsForTest();
debug_options.add_xla_disable_hlo_passes("dot_dimension_sorter");
return debug_options;
}
};
TEST_F(WithoutDotDimensionSorterTest, UnsortedDimsCreateTransposes) {
const char* hlo_text = R"(
HloModule m
ENTRY e {
p0 = f16[1,14,9,32] parameter(0)
p1 = f16[12,9,32] parameter(1)
ROOT _ = f16[1,14,12] dot(p0, p1),
lhs_contracting_dims={3,2}, rhs_contracting_dims={2,1}
}
)";
MatchOptimizedHlo(hlo_text, R"(
; CHECK: transpose
)");
}
TEST_F(WithoutDotDimensionSorterTest, SortedDimsDoNotCreateTransposes) {
const char* hlo_text = R"(
HloModule m
ENTRY e {
p0 = f16[1,14,9,32] parameter(0)
p1 = f16[12,9,32] parameter(1)
ROOT _ = f16[1,14,12] dot(p0, p1),
lhs_contracting_dims={2,3}, rhs_contracting_dims={1,2}
}
)";
MatchOptimizedHlo(hlo_text, R"(
; CHECK-NOT: transpose
)");
}
TEST_F(WithoutDotDimensionSorterTest, DimOrderCanBeChanged) {
const char* hlo_text_ref = R"(
HloModule m
ENTRY e {
p0 = f16[1,14,9,32] parameter(0)
p1 = f16[12,9,32] parameter(1)
ROOT _ = f16[1,14,12] dot(p0, p1),
lhs_contracting_dims={3,2}, rhs_contracting_dims={2,1}
}
)";
const char* hlo_text_modified = R"(
HloModule m
ENTRY e {
p0 = f16[1,14,9,32] parameter(0)
p1 = f16[12,9,32] parameter(1)
ROOT _ = f16[1,14,12] dot(p0, p1),
lhs_contracting_dims={2,3}, rhs_contracting_dims={1,2}
}
)";
EXPECT_TRUE(RunAndCompareTwoModules(hlo_text_ref, hlo_text_modified,
ErrorSpec{1e-5, 1e-3},
true));
}
using DotDimensionSorterTest = GpuCodegenTest;
TEST_F(DotDimensionSorterTest, SortContractingDims) {
const char* module_string = R"(
HloModule m
ENTRY e {
p0 = f16[1,144,96,32] parameter(0)
p1 = f16[122,96,32] parameter(1)
ROOT _ = f16[1,144,122] dot(p0, p1),
lhs_contracting_dims={3,2}, rhs_contracting_dims={2,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
const auto& dims =
module->entry_computation()->root_instruction()->dot_dimension_numbers();
EXPECT_EQ(dims.lhs_contracting_dimensions(0), 3);
EXPECT_EQ(dims.lhs_contracting_dimensions(1), 2);
EXPECT_EQ(dims.rhs_contracting_dimensions(0), 2);
EXPECT_EQ(dims.rhs_contracting_dimensions(1), 1);
TF_ASSERT_OK_AND_ASSIGN(bool modified,
DotDimensionSorter().Run(module.get()));
EXPECT_TRUE(modified);
const auto& dims2 =
module->entry_computation()->root_instruction()->dot_dimension_numbers();
EXPECT_EQ(dims2.lhs_contracting_dimensions(0), 2);
EXPECT_EQ(dims2.lhs_contracting_dimensions(1), 3);
EXPECT_EQ(dims2.rhs_contracting_dimensions(0), 1);
EXPECT_EQ(dims2.rhs_contracting_dimensions(1), 2);
}
TEST_F(DotDimensionSorterTest, NothingToReorder) {
const char* module_string = R"(
HloModule m
ENTRY e {
p0 = f16[1,144,96,32] parameter(0)
p1 = f16[122,96,32] parameter(1)
ROOT _ = f16[1,144,122] dot(p0, p1),
lhs_contracting_dims={2,3}, rhs_contracting_dims={1,2}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool modified,
DotDimensionSorter().Run(module.get()));
EXPECT_FALSE(modified);
}
TEST_F(DotDimensionSorterTest, SparseDotSortContractingDims) {
const char* module_string = R"(
HloModule m
ENTRY e {
p0 = f16[1,144,96,16] parameter(0)
p1 = f16[122,96,32] parameter(1)
meta = u16[1,144,96,2] parameter(2)
ROOT _ = f16[1,144,122] dot(p0, p1, meta), sparsity=L.3@2:4,
lhs_contracting_dims={3,2}, rhs_contracting_dims={2,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool modified,
DotDimensionSorter().Run(module.get()));
EXPECT_TRUE(modified);
HloDotInstruction* dot = DynCast<HloDotInstruction>(
module->entry_computation()->root_instruction());
EXPECT_TRUE(dot != nullptr && dot->sparse_operands() == 1);
}
}
}
} |
2,058 | cpp | tensorflow/tensorflow | nvptx_compiler | third_party/xla/xla/service/gpu/nvptx_compiler.cc | third_party/xla/xla/service/gpu/nvptx_compiler_test.cc | #ifndef XLA_SERVICE_GPU_NVPTX_COMPILER_H_
#define XLA_SERVICE_GPU_NVPTX_COMPILER_H_
#include <cstdint>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/node_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "llvm/IR/Module.h"
#include "xla/autotune_results.pb.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/gpu/gpu_compiler.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/dnn.h"
#include "xla/xla.pb.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace gpu {
void WarnIfBadDriverJITVersion();
class NVPTXCompiler : public GpuCompiler {
public:
NVPTXCompiler();
int32_t GetToolkitVersion() const override;
absl::Status OptimizeHloConvolutionCanonicalization(
HloModule* hlo_module, se::GpuComputeCapability gpu_version,
se::dnn::VersionInfo dnn_version,
se::DeviceMemoryAllocator* device_allocator) override;
absl::Status OptimizeHloPostLayoutAssignment(
HloModule* hlo_module, se::StreamExecutor* stream_exec,
const CompileOptions& options, const TargetConfig& gpu_target_config,
tsl::thread::ThreadPool* thread_pool) override;
bool RequiresCollectiveScheduleLinearizer(
const HloModule* module, se::StreamExecutor* stream_exec) override;
absl::Status AddConvAndGemmAutotuningPasses(
HloPassPipeline* pipeline, HloModule* hlo_module,
AutotuneConfig& autotune_config,
tsl::thread::ThreadPool* thread_pool) override;
absl::Status AddGemmFusionAutotuningPasses(
HloPassPipeline* pipeline, HloModule* hlo_module,
AutotuneConfig& autotune_config, tsl::thread::ThreadPool* thread_pool,
const MultiProcessKeyValueStore& key_value_store) override;
absl::Status AddCustomKernelReplacementPasses(
HloPassPipeline* pipeline, const DebugOptions& debug_options) override;
absl::Status RunCudnnFusionCompilerPass(
HloModule* module, se::StreamExecutor* stream_exec,
Thunk::BinaryMap* dnn_compiled_graphs) override;
HloDataflowAnalysis::CanShareBuffer GetCanShareBuffer() const override;
absl::StatusOr<BackendCompileResult> CompileTargetBinary(
const HloModuleConfig& module_config, llvm::Module* llvm_module,
se::GpuComputeCapability gpu_version, bool relocatable,
const HloModule* debug_module, const CompileOptions& options) override;
enum class LinkingMethod {
kNone,
kNvLink,
kDriver,
};
absl::StatusOr<bool> CanUseLinkModules(
const HloModuleConfig& module_config) override;
private:
absl::StatusOr<std::vector<uint8_t>> LinkModules(
se::StreamExecutor* stream_exec,
std::vector<std::vector<uint8_t>> modules,
const DebugOptions& debug_options) override;
absl::Mutex mutex_;
absl::flat_hash_map<std::string, LinkingMethod> linking_methods_
ABSL_GUARDED_BY(mutex_);
absl::StatusOr<LinkingMethod> ChooseLinkingMethod(
const DebugOptions& debug_options);
absl::StatusOr<std::vector<uint8_t>> CompileGpuAsmOrGetCachedResult(
const std::string& ptx, se::CudaComputeCapability cc,
const HloModuleConfig& hlo_module_config, absl::string_view module_name,
bool relocatable, const CompileOptions& options);
struct CompilationCacheFlags {
template <typename H>
friend H AbslHashValue(H h, const CompilationCacheFlags& flags) {
return H::combine(std::move(h),
flags.filter_kernels_spilling_registers_on_autotuning);
}
friend bool operator==(const CompilationCacheFlags& a,
const CompilationCacheFlags& b) {
return a.filter_kernels_spilling_registers_on_autotuning ==
b.filter_kernels_spilling_registers_on_autotuning;
}
bool filter_kernels_spilling_registers_on_autotuning;
};
struct CompilationCacheKey {
CompilationCacheKey(std::string ptx, int cc_major, int cc_minor,
bool relocatable, CompilationCacheFlags flags)
: ptx(std::move(ptx)),
cc_major(cc_major),
cc_minor(cc_minor),
relocatable(relocatable),
flags(std::move(flags)) {}
template <typename H>
friend H AbslHashValue(H h, const CompilationCacheKey& key) {
return H::combine(std::move(h), key.ptx, key.cc_major, key.cc_minor,
key.relocatable, key.flags);
}
friend bool operator==(const CompilationCacheKey& a,
const CompilationCacheKey& b) {
return a.cc_major == b.cc_major && a.cc_minor == b.cc_minor &&
a.ptx == b.ptx && a.relocatable == b.relocatable &&
a.flags == b.flags;
}
std::string ptx;
int cc_major;
int cc_minor;
bool relocatable;
CompilationCacheFlags flags;
};
struct CompilationCacheValue {
bool compilation_done = false;
absl::StatusOr<std::vector<uint8_t>> maybe_cubin;
absl::Mutex mutex;
absl::CondVar compilation_done_cv;
};
absl::node_hash_map<CompilationCacheKey, CompilationCacheValue>
compilation_cache_ ABSL_GUARDED_BY(mutex_);
NVPTXCompiler(const NVPTXCompiler&) = delete;
NVPTXCompiler& operator=(const NVPTXCompiler&) = delete;
};
}
}
#endif
#include "xla/service/gpu/nvptx_compiler.h"
#include <array>
#include <cstdint>
#include <fstream>
#include <iterator>
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/call_once.h"
#include "absl/cleanup/cleanup.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "third_party/gpus/cuda/include/cuda.h"
#include "llvm/IRReader/IRReader.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/raw_ostream.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/pjrt/distributed/key_value_store_interface.h"
#include "xla/service/call_inliner.h"
#include "xla/service/convert_mover.h"
#include "xla/service/dot_dimension_merger.h"
#include "xla/service/dump.h"
#include "xla/service/float_normalization.h"
#include "xla/service/float_support.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/gpu/buffer_sharing.h"
#include "xla/service/gpu/conv_algorithm_picker.h"
#include "xla/service/gpu/cublas_pad_for_gemms.h"
#include "xla/service/gpu/cublas_padding_requirements.h"
#include "xla/service/gpu/cudnn_fused_conv_rewriter.h"
#include "xla/service/gpu/cudnn_fused_mha_rewriter.h"
#include "xla/service/gpu/cudnn_fused_mha_transpose_fusion.h"
#include "xla/service/gpu/cudnn_fusion_compiler.h"
#include "xla/service/gpu/cudnn_norm_rewriter.h"
#include "xla/service/gpu/cudnn_pad_for_convolutions.h"
#include "xla/service/gpu/cudnn_simplify_padding.h"
#include "xla/service/gpu/cudnn_vectorize_convolutions.h"
#include "xla/service/gpu/cudnn_workspace_rewriter.h"
#include "xla/service/gpu/cusolver_rewriter.h"
#include "xla/service/gpu/dot_sparsity_rewriter.h"
#include "xla/service/gpu/gemm_algorithm_picker.h"
#include "xla/service/gpu/gemm_fusion_autotuner.h"
#include "xla/service/gpu/gpu_algebraic_simplifier.h"
#include "xla/service/gpu/gpu_asm_opts_util.h"
#include "xla/service/gpu/gpu_compiler.h"
#include "xla/service/gpu/gpu_conv_padding_legalization.h"
#include "xla/service/gpu/gpu_conv_rewriter.h"
#include "xla/service/gpu/gpu_sort_rewriter.h"
#include "xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.h"
#include "xla/service/gpu/metrics.h"
#include "xla/service/gpu/move_copy_to_users.h"
#include "xla/service/gpu/target_constants.h"
#include "xla/service/gpu/triangular_solve_rewriter.h"
#include "xla/service/hlo_constant_folding.h"
#include "xla/service/hlo_cse.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_pass_fix.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/hlo_verifier.h"
#include "xla/service/layout_normalization.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/service/reshape_decomposer.h"
#include "xla/service/reshape_mover.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/stream_executor/cuda/cuda_asm_compiler.h"
#include "xla/stream_executor/cuda/cuda_diagnostics.h"
#include "xla/stream_executor/cuda/cuda_platform_id.h"
#include "xla/stream_executor/cuda/ptx_compiler.h"
#include "xla/stream_executor/cuda/ptx_compiler_support.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/gpu/asm_compiler.h"
#include "xla/stream_executor/gpu/gpu_asm_opts.h"
#include "xla/stream_executor/gpu/gpu_driver.h"
#include "xla/stream_executor/gpu/gpu_executor.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/util/env_var.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tsl/profiler/lib/traceme.h"
namespace xla {
namespace gpu {
namespace {
class ConvBfloat16Support : public FloatSupport {
public:
explicit ConvBfloat16Support(
se::dnn::VersionInfo cudnn_version,
se::CudaComputeCapability cuda_compute_capability)
: FloatSupport(BF16),
is_conv_bf16_supported_((cudnn_version.major_version() > 8 ||
(cudnn_version.major_version() == 8 &&
cudnn_version.minor_version() >= 2)) &&
cuda_compute_capability.IsAtLeast(
se::CudaComputeCapability::AMPERE)) {}
bool SupportsLowPrecisionOperand(const HloInstruction& hlo,
int64_t operand_index) const override {
return (hlo.opcode() != HloOpcode::kConvolution) || is_conv_bf16_supported_;
}
bool SupportsLowPrecisionOutput(const HloInstruction& hlo) const override {
return (hlo.opcode() != HloOpcode::kConvolution) || is_conv_bf16_supported_;
}
bool SupportsMixedPrecisions(const HloInstruction& hlo) const override {
return (hlo.opcode() != HloOpcode::kConvolution);
}
private:
bool is_conv_bf16_supported_;
};
class MatmulBfloat16Support : public FloatSupport {
public:
explicit MatmulBfloat16Support(
se::CudaComputeCapability cuda_compute_capability)
: FloatSupport(BF16),
is_matmul_bf16_supported_(cuda_compute_capability.IsAtLeast(
se::CudaComputeCapability::AMPERE)) {}
bool SupportsLowPrecisionOperand(const HloInstruction& hlo,
int64_t operand_index) const override {
return (hlo.opcode() != HloOpcode::kDot) || is_matmul_bf16_supported_;
}
bool SupportsLowPrecisionOutput(const HloInstruction& hlo) const override {
return (hlo.opcode() != HloOpcode::kDot) || is_matmul_bf16_supported_;
}
bool SupportsMixedPrecisions(const HloInstruction& hlo) const override {
return true;
}
private:
bool is_matmul_bf16_supported_;
};
}
int32_t NVPTXCompiler::GetToolkitVersion() const { return CUDA_VERSION; }
absl::Status NVPTXCompiler::OptimizeHloConvolutionCanonicalization(
HloModule* hlo_module, se::GpuComputeCapability gpu_version,
se::dnn::VersionInfo dnn_version,
se::DeviceMemoryAllocator* device_allocator) {
auto cuda_compute_capability =
std::get<se::CudaComputeCapability>(gpu_version);
HloPassPipeline pipeline("conv_canonicalization");
pipeline.AddInvariantCheckerDebug<HloVerifier>(
false,
false);
ConvBfloat16Support conv_bf16_support(dnn_version, cuda_compute_capability);
pipeline.AddPass<FloatNormalization>(&conv_bf16_support);
MatmulBfloat16Support matmul_bf16_support(cuda_compute_capability);
pipeline.AddPass<FloatNormalization>(&matmul_bf16_support);
pipeline.AddPass<GpusolverRewriter>();
pipeline.AddPass<GpuConvRewriter>(cuda_compute_capability);
pipeline.AddPass<CudnnFusedConvRewriter>(cuda_compute_capability, dnn_version,
GetToolkitVersion());
pipeline.AddPass<GpuConvPaddingLegalization>();
pipeline.AddPass<CudnnPadForConvolutions>(cuda_compute_capability);
pipeline.AddPass<CudnnVectorizeConvolutions>(cuda_compute_capability,
dnn_version);
pipeline.AddPass<CallInliner>();
pipeline.AddPass<TupleSimplifier>();
AlgebraicSimplifierOptions algsimp_options =
GetAlgebraicSimplifierOptions(hlo_module->config());
algsimp_options.set_enable_conv_operand_swap(false);
algsimp_options.set_enable_unconditional_reduce_of_concat_replacement(false);
pipeline.AddPass<HloPassFix<GpuAlgebraicSimplifier>>(algsimp_options,
gpu_version);
pipeline.AddPass<CudnnSimplifyPadding>();
[&, &pipeline = pipeline.AddPass<HloPassFix<HloPassPipeline>>(
"reshape_mover_after_conv_canonicalization")] {
ReshapeMoverOptions reshape_mover_options;
reshape_mover_options.reshape_of_1d_broadcast_is_cheap = true;
pipeline.AddPass<ReshapeMover>(reshape_mover_options);
pipeline.AddPass<GpuAlgebraicSimplifier>(algsimp_options, gpu_version);
}();
[&, &pipeline = pipeline.AddPass<HloPassFix<HloPassPipeline>>(
"simplify_after_conv_canonicalization")] {
pipeline.AddPass<ConvertMover>();
pipeline.AddPass<GpuAlgebraicSimplifier>(algsimp_options, gpu_version);
}();
pipeline.AddPass<HloConstantFolding>();
TF_RETURN_IF_ERROR(pipeline.Run(hlo_module).status());
return absl::OkStatus();
}
absl::Status NVPTXCompiler::OptimizeHloPostLayoutAssignment(
HloModule* hlo_module, se::StreamExecutor* stream_exec,
const CompileOptions& options, const TargetConfig& gpu_target_config,
tsl::thread::ThreadPool* thread_pool) {
auto cuda_compute_capability = std::get<se::CudaComputeCapability>(
gpu_target_config.device_description.gpu_compute_capability());
if (hlo_module->config().debug_options().xla_gpu_enable_cudnn_fmha()) {
HloPassPipeline mha_fusion_pipeline(
"nvptx cudnn multi-headed attention fusion");
AlgebraicSimplifierOptions alg_sim_options =
GetAlgebraicSimplifierOptions(hlo_module->config());
alg_sim_options.set_supports_non_canonical_dots(false);
alg_sim_options.set_is_layout_sensitive(true);
alg_sim_options.set_enable_conv_operand_swap(false);
alg_sim_options.set_minmax_propagate_nan(
!hlo_module->config().debug_options().xla_gpu_enable_fast_min_max());
alg_sim_options.set_enable_unconditional_reduce_of_concat_replacement(
false);
mha_fusion_pipeline.AddPass<HloCSE>(true);
se::GpuComputeCapability gpu_version =
gpu_target_config.device_description.gpu_compute_capability();
mha_fusion_pipeline.AddPass<HloPassFix<GpuAlgebraicSimplifier>>(
alg_sim_options, gpu_version);
mha_fusion_pipeline.AddPass<HloCSE>(true);
if (stream_exec) {
mha_fusion_pipeline.AddPass<CudnnFusedMHARewriter>(
cuda_compute_capability, stream_exec);
} else {
mha_fusion_pipeline.AddPass<CudnnFusedMHARewriter>(
cuda_compute_capability, gpu_target_config.dnn_version_info);
}
mha_fusion_pipeline.AddPass<GpuAlgebraicSimplifier>(alg_sim_options,
gpu_version);
mha_fusion_pipeline.AddPass<CudnnFusedMHATransposeFusion>();
mha_fusion_pipeline.AddPass<HloDCE>();
mha_fusion_pipeline.AddPass<HloCSE>(true);
TF_RETURN_IF_ERROR(mha_fusion_pipeline.Run(hlo_module).status());
}
HloPassPipeline pre_pipeline("nvptx post-layout_assignment part 1");
if (hlo_module->config().debug_options().xla_gpu_enable_cudnn_layer_norm()) {
pre_pipeline.AddPass<CudnnNormRewriter>(cuda_compute_capability);
}
pre_pipeline.AddPass<DotDimensionMerger>();
pre_pipeline.AddPass<DotSparsityRewriter>();
for (const CublasPaddingRequirement& requirement :
CublasPaddingRequirements) {
if (cuda_compute_capability.IsAtLeast(requirement.min_compute_capability)) {
pre_pipeline.AddPass<CublasPadForGemms>(cuda_compute_capability,
requirement.data_type,
requirement.multiple_of);
}
}
pre_pipeline.AddPass<HloConstantFolding>();
TF_RETURN_IF_ERROR(pre_pipeline.Run(hlo_module).status());
TF_RETURN_IF_ERROR(GpuCompiler::OptimizeHloPostLayoutAssignment(
hlo_module, stream_exec, options, gpu_target_config, thread_pool));
HloPassPipeline post_pipeline("nvptx post-layout_assignment part 2");
post_pipeline.AddPass<TriangularSolveRewriter>();
if (stream_exec) {
post_pipeline.AddPass<CuDnnWorkspaceRewriter>(*stream_exec);
}
TF_RETURN_IF_ERROR(post_pipeline.Run(hlo_module).status());
return absl::OkStatus();
}
bool NVPTXCompiler::RequiresCollectiveScheduleLinearizer(
const HloModule* module, se::StreamExecutor* stream_exec) {
if (stream_exec == nullptr || !GpuConvAlgorithmPicker::IsEnabled(module)) {
return false;
}
for (const HloComputation* comp : module->MakeNonfusionComputations()) {
for (const HloInstruction* inst : comp->instructions()) {
if (GpuConvAlgorithmPicker::IsCandidate(inst)) {
return true;
}
}
}
return false;
}
absl::Status NVPTXCompiler::AddConvAndGemmAutotuningPasses(
HloPassPipeline* pipeline, HloModule* hlo_module,
AutotuneConfig& autotune_config, tsl::thread::ThreadPool* thread_pool) {
if (GpuConvAlgorithmPicker::IsEnabled(hlo_module)) {
pipeline->AddPass<GpuConvAlgorithmPicker>(autotune_config);
}
pipeline->AddPass<GemmAlgorithmPicker>(autotune_config);
return absl::OkStatus();
}
absl::Status NVPTXCompiler::AddGemmFusionAutotuningPasses(
HloPassPipeline* pipeline, HloModule* hlo_module,
AutotuneConfig& autotune_config, tsl::thread::ThreadPool* thread_pool,
const MultiProcessKeyValueStore& key_value_store) {
pipeline->AddPass<GemmFusionAutotuner>(autotune_config, GetToolkitVersion(),
thread_pool, key_value_store);
return absl::OkStatus();
}
absl::Status NVPTXCompiler::AddCustomKernelReplacementPasses(
HloPassPipeline* pipeline, const DebugOptions& debug_options) {
if (debug_options.xla_gpu_enable_cub_radix_sort()) {
pipeline->AddPass<GpuSortRewriter>();
}
return absl::OkStatus();
}
absl::Status NVPTXCompiler::RunCudnnFusionCompilerPass(
HloModule* module, se::StreamExecutor* stream_exec,
Thunk::BinaryMap* dnn_compiled_graphs) {
tsl::profiler::ScopedAnnotation annotation([&] {
return absl::StrFormat("XlaCompileCudnnFusion:#module=%s,program_id=%d#",
module->name(), module->unique_id());
});
CuDnnFusionCompiler cudnn_compiler(*stream_exec, *dnn_compiled_graphs);
return cudnn_compiler.Run(module).status();
}
namespace {
bool MaybeLoadPtxFromFile(const HloModuleConfig module_config,
const HloModule* module, std::string* ptx) {
std::string prefix = xla::FilenameFor(*module, "", *ptx);
std::string matched_filename;
for (const std::string& full_filename :
module_config.debug_options().xla_gpu_ptx_file()) {
auto filename = tsl::io::Basename(full_filename);
if (absl::StartsWith(filename, prefix)) {
matched_filename = full_filename;
VLOG(1) << "RunBackend() - Will load PTX from file: " << full_filename;
break;
}
}
if (!module_config.debug_options().xla_gpu_ptx_file().empty() &&
matched_filename.empty()) {
VLOG(1) << "RunBackend() - For module with prefix '" << prefix
<< "', we did not found a PTX file to load.";
}
if (!matched_filename.empty()) {
std::ifstream ifs(matched_filename, std::ifstream::in);
*ptx = std::string(std::istreambuf_iterator<char>(ifs),
std::istreambuf_iterator<char>());
CHECK(!ptx->empty()) << "Empty or non existing PTX file: "
<< matched_filename;
return true;
}
return false;
}
std::unique_ptr<llvm::Module> MaybeLoadLLVMFromFile(const HloModule* module,
llvm::Module* llvm_module) {
if (module == nullptr) {
return nullptr;
}
std::string prefix = xla::FilenameFor(*module, "", "");
auto xla_gpu_llvm_ir_file =
module->config().debug_options().xla_gpu_llvm_ir_file();
auto matched_filename = absl::c_find_if(
xla_gpu_llvm_ir_file, [prefix](const std::string& full_filename) {
return absl::StartsWith(tsl::io::Basename(full_filename), prefix);
});
if (!xla_gpu_llvm_ir_file.empty() &&
matched_filename == std::end(xla_gpu_llvm_ir_file)) {
VLOG(1) << "RunBackend() - For module with prefix '" << prefix
<< "', we did not found a LLVM file to load.";
}
if (matched_filename != std::end(xla_gpu_llvm_ir_file)) {
VLOG(1) << "RunBackend() - Will load LLVM from file: " << *matched_filename;
llvm::LLVMContext& context = llvm_module->getContext();
llvm::SMDiagnostic err;
std::unique_ptr<llvm::Module> loaded_module =
llvm::parseIRFile(*matched_filename, err, context);
if (!loaded_module) {
err.print("ERR", llvm::errs());
LOG(FATAL) << "Failed to load an LLVM file. It is probably invalid LLVM.";
}
llvm_ir::DumpIrIfEnabled(*module, *loaded_module, false);
return loaded_module;
}
return nullptr;
}
}
void WarnIfBadDriverJITVersion() {
static absl::once_flag run_once;
absl::call_once(run_once, [] {
auto version_or_status = se::cuda::Diagnostician::FindKernelDriverVersion();
if (!version_or_status.ok()) {
LOG(WARNING) << "Couldn't read CUDA driver version.";
return;
}
se::cuda::DriverVersion version = version_or_status.value();
if (version < std::make_tuple(396, 20, 0)) {
LOG(WARNING)
<< "*** WARNING *** Invoking the PTX->SASS JIT from driver version "
<< se::cuda::DriverVersionToString(version)
<< ", which is older than 396.20.0. These versions are known to "
"miscompile XLA code, leading to incorrect results or "
"invalid-address errors.\nXLA only uses the driver JIT if it "
"cannot find ptxas; you don't need to update your driver if "
"you can point XLA to ptxas 9.2.88 or newer.";
}
});
}
NVPTXCompiler::NVPTXCompiler()
: GpuCompiler(stream_executor::cuda::kCudaPlatformId, nvptx::TargetTriple(),
nvptx::DataLayout()) {}
HloDataflowAnalysis::CanShareBuffer NVPTXCompiler::GetCanShareBuffer() const {
return &CanShareBufferHint;
}
absl::StatusOr<GpuCompiler::BackendCompileResult>
NVPTXCompiler::CompileTargetBinary(const HloModuleConfig& module_config,
llvm::Module* llvm_module,
se::GpuComputeCapability gpu_version,
bool relocatable,
const HloModule* debug_module,
const CompileOptions& options) {
std::unique_ptr<llvm::Module> loaded_module =
MaybeLoadLLVMFromFile(debug_module, llvm_module);
llvm::Module* selected_module = nullptr;
if (loaded_module) {
selected_module = loaded_module.get();
} else {
selected_module = llvm_module;
}
std::string ptx;
if (!(debug_module &&
MaybeLoadPtxFromFile(module_config, debug_module, &ptx))) {
XLA_SCOPED_LOGGING_TIMER_IF(
absl::StrCat(
"NVPTXCompiler::CompileTargetBinary - CompileToPtx for ",
(debug_module != nullptr ? debug_module->name() : "(unknown")),
!options.is_autotuning_compilation);
uint64_t start_usecs = tsl::Env::Default()->NowMicros();
TF_ASSIGN_OR_RETURN(ptx,
nvptx::CompileToPtx(selected_module, gpu_version,
module_config.debug_options()));
uint64_t end_usecs = tsl::Env::Default()->NowMicros();
RecordLlvmPassesAndLlvmToPtxDuration(end_usecs - start_usecs);
}
absl::StatusOr<std::vector<uint8_t>> maybe_cubin =
CompileGpuAsmOrGetCachedResult(
ptx, std::get<se::CudaComputeCapability>(gpu_version), module_config,
(debug_module != nullptr ? debug_module->name() : "(unknown)"),
relocatable, options);
if (!maybe_cubin.ok()) {
return maybe_cubin.status();
}
return BackendCompileResult{std::move(ptx), std::move(maybe_cubin.value())};
}
static absl::StatusOr<std::vector<uint8_t>> AssembleOptionsAndCompile(
const std::string& ptx, se::CudaComputeCapability cc,
const HloModuleConfig& hlo_module_config,
GpuCompiler::CompileOptions options, bool relocatable) {
if (ptx.empty()) {
return std::vector<uint8_t>();
}
se::GpuAsmOpts ptxas_config =
PtxOptsFromDebugOptions(hlo_module_config.debug_options());
if (relocatable) {
ptxas_config.extra_flags.push_back("-c");
}
uint64_t start_usecs = tsl::Env::Default()->NowMicros();
bool cancel_if_reg_spill =
hlo_module_config.debug_options()
.xla_gpu_filter_kernels_spilling_registers_on_autotuning() &&
options.is_autotuning_compilat | #include "xla/service/gpu/nvptx_compiler.h"
#include <cstdint>
#include <memory>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/backend.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/buffer_value.h"
#include "xla/service/gpu/gpu_constants.h"
#include "xla/service/gpu/gpu_hlo_schedule.h"
#include "xla/service/gpu/gpu_latency_hiding_scheduler.h"
#include "xla/service/hlo_ordering.h"
#include "xla/service/logical_buffer.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
int64_t CountCopies(const HloComputation& computation) {
int64_t count = 0;
for (const auto& instruction : computation.instructions()) {
if (instruction->opcode() == HloOpcode::kCopy) {
count++;
}
}
return count;
}
int64_t CountCopies(const HloModule& module) {
int64_t count = 0;
for (const auto& computation : module.computations()) {
count += CountCopies(*computation);
}
return count;
}
class NVPTXCompilerTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<BufferAssignment>> AssignBuffers(
HloModule* module) {
constexpr uint64_t pointer_size = 4;
const se::DeviceDescription& gpu_device_info =
backend().default_stream_executor()->GetDeviceDescription();
TF_RETURN_IF_ERROR(
ScheduleGpuModule(module, pointer_size, gpu_device_info).status());
auto buffer_size_bytes_function =
[](const BufferValue& buffer_value) -> int64_t {
return GetSizeOfShape(buffer_value.shape(), pointer_size);
};
return BufferAssigner::Run(
module, std::make_unique<SequentialHloOrdering>(module->schedule()),
buffer_size_bytes_function,
[](LogicalBuffer::Color) { return kXlaAllocatedBufferAlignBytes; });
}
};
class NVPTXCompilerTestTriton : public NVPTXCompilerTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_gpu_cublas_fallback(false);
return debug_options;
}
};
TEST_F(NVPTXCompilerTest, AllReducePerformedInplace) {
const absl::string_view hlo_string = R"(
HloModule Module, input_output_alias={ {}: (0, {}, may-alias) }
summit {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY entry {
param0 = f32[128] parameter(0)
ROOT allreduce = f32[128] all-reduce(param0),
replica_groups={}, to_apply=summit
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(auto buffer_assignment, AssignBuffers(module.get()));
HloInstruction* all_reduce = module->entry_computation()->root_instruction();
EXPECT_TRUE(buffer_assignment->SharesTopLevelSlice(all_reduce,
all_reduce->operand(0)));
}
TEST_F(NVPTXCompilerTest, AllReducePerformedInplaceTwoOperands) {
const absl::string_view hlo_string = R"(
HloModule Module,
input_output_alias={ {0}: (0, {}, may-alias), {1}: (1, {}, may-alias) }
summit {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY entry {
param0 = f32[128] parameter(0)
param1 = f32[128] parameter(1)
ROOT allreduce = (f32[128], f32[128]) all-reduce(param0, param1),
replica_groups={}, to_apply=summit
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(auto buffer_assignment, AssignBuffers(module.get()));
HloInstruction* all_reduce = module->entry_computation()->root_instruction();
EXPECT_TRUE(buffer_assignment->SharesSliceAtIndex(
all_reduce, {0}, all_reduce->operand(0), {}));
EXPECT_TRUE(buffer_assignment->SharesSliceAtIndex(
all_reduce, {1}, all_reduce->operand(1), {}));
}
TEST_F(NVPTXCompilerTestTriton,
DotDimensionAreSortedBeforePaddingForCublasEnablingTritonFusion) {
const absl::string_view hlo_string = R"(
ENTRY e {
p0 = f16[11,22,33,44] parameter(0)
p1 = s8[11,22,33,44] parameter(1)
p1c = f16[11,22,33,44] convert(p1)
ROOT d = f16[11,22,44,44] dot(p0, p1c),
lhs_batch_dims={0,1}, lhs_contracting_dims={2},
rhs_batch_dims={0,1}, rhs_contracting_dims={2}
})";
se::CudaComputeCapability cc = backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
if (cc.IsAtLeastAmpere()) {
MatchOptimizedHlo(hlo_string, R"(
; CHECK: ENTRY
; CHECK-NEXT: parameter
; CHECK-NEXT: parameter
; CHECK-NEXT: __triton_gemm
)");
} else {
MatchOptimizedHlo(hlo_string, R"(
; CHECK-NOT: triton
)");
}
}
TEST_F(NVPTXCompilerTest, RemovesUnnecessaryCopyInPostSchedulingPipelines) {
const absl::string_view hlo_text = R"(
HloModule all_gather_overlapping, is_scheduled=true
condition {
input_tuple = (f32[1,128], f32[2,128], pred[]) parameter(0)
ROOT cond = pred[] get-tuple-element(input_tuple), index=2
}
body {
c0 = f32[] constant(0)
splat_c0 = f32[1,128] broadcast(c0), dimensions={}
input_tuple = (f32[1,128], f32[2,128], pred[]) parameter(0)
param_0 = f32[1,128] get-tuple-element(input_tuple), index=0
add = f32[1,128] add(splat_c0, param_0)
param_1 = f32[2,128] get-tuple-element(input_tuple), index=1
c1_s32 = s32[] constant(1)
c0_s32 = s32[] constant(0)
dynamic-slice = f32[1,128] dynamic-slice(param_1, c1_s32, c0_s32), dynamic_slice_sizes={1,128}
all-gather-start = (f32[1,128], f32[2,128]) all-gather-start(add), channel_id=1337, replica_groups={{0,1}}, dimensions={0}, use_global_device_ids=true
all-gather-done = f32[2,128] all-gather-done(all-gather-start)
copy = f32[2,128] copy(all-gather-done)
cond = pred[] get-tuple-element(input_tuple), index=2
ROOT output_tuple = (f32[1,128], f32[2,128], pred[]) tuple(dynamic-slice, copy, cond)
}
ENTRY main {
param_0 = f32[1,128] parameter(0)
param_1 = f32[2,128] parameter(1)
param_2 = pred[] parameter(2)
copy_param_0 = f32[1,128] copy(param_0)
copy_param_1 = f32[2,128] copy(param_1)
tuple = (f32[1,128], f32[2,128], pred[]) tuple(copy_param_0, copy_param_1, param_2)
while = (f32[1,128], f32[2,128], pred[]) while(tuple), condition=condition, body=body
get-tuple-element = f32[1,128]{1,0} get-tuple-element((f32[1,128]{1,0}, f32[2,128]{1,0}, pred[]) while), index=0
get-tuple-element.1 = f32[2,128]{1,0} get-tuple-element((f32[1,128]{1,0}, f32[2,128]{1,0}, pred[]) while), index=1
get-tuple-element.2 = pred[] get-tuple-element((f32[1,128]{1,0}, f32[2,128]{1,0}, pred[]) while), index=2
copy.3 = pred[] copy(pred[] get-tuple-element.2)
ROOT tuple.2 = (f32[1,128]{1,0}, f32[2,128]{1,0}, pred[]) tuple(f32[1,128]{1,0} get-tuple-element, f32[2,128]{1,0} get-tuple-element.1, pred[] copy.3)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_text).value();
EXPECT_EQ(CountCopies(*module), 4);
const HloInstruction* while_op = hlo_query::GetFirstInstructionWithOpcode(
*module->entry_computation(), HloOpcode::kWhile);
EXPECT_EQ(while_op->while_body()->root_instruction()->operand(1)->opcode(),
HloOpcode::kCopy);
NVPTXCompiler compiler;
TF_EXPECT_OK(compiler.RunPostSchedulingPipelines(
module.get(), 100000,
backend().default_stream_executor()->GetDeviceDescription()));
EXPECT_EQ(CountCopies(*module), 3);
while_op = hlo_query::GetFirstInstructionWithOpcode(
*module->entry_computation(), HloOpcode::kWhile);
EXPECT_EQ(while_op->while_body()->root_instruction()->operand(1)->opcode(),
HloOpcode::kAllGatherDone);
}
}
}
} |
2,059 | cpp | tensorflow/tensorflow | horizontal_input_fusion | third_party/xla/xla/service/gpu/transforms/horizontal_input_fusion.cc | third_party/xla/xla/service/gpu/transforms/horizontal_input_fusion_test.cc | #ifndef XLA_SERVICE_GPU_HORIZONTAL_INPUT_FUSION_H_
#define XLA_SERVICE_GPU_HORIZONTAL_INPUT_FUSION_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
class GpuHorizontalInputFusion : public HloModulePass {
public:
explicit GpuHorizontalInputFusion(const se::DeviceDescription& d)
: device_info_(d) {}
absl::string_view name() const override {
return "gpu_horizontal_input_fusion";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
absl::StatusOr<bool> RunOnComputation(HloComputation*);
const se::DeviceDescription& device_info_;
};
}
}
#endif
#include "xla/service/gpu/horizontal_input_fusion.h"
#include <algorithm>
#include <cstddef>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
Shape GetInputShapeForMultiOutputFusion(const HloInstruction& instr) {
const HloInstruction* real_hero = GetRealHeroForMultiOutputFusion(instr);
if (real_hero->operands().empty()) {
return Shape();
} else {
return real_hero->operand(0)->shape();
}
}
class HorizontalInputFusionImpl {
public:
explicit HorizontalInputFusionImpl(HloComputation* computation,
const se::DeviceDescription& d)
: computation_(computation), device_info_(d) {}
~HorizontalInputFusionImpl() = default;
absl::StatusOr<bool> Run();
private:
HloComputation* computation_;
const se::DeviceDescription& device_info_;
};
bool CompareShapeDimsFromLeftToRight(const Shape& shape_a,
const Shape& shape_b) {
if (shape_a.rank() != shape_b.rank()) {
return shape_a.rank() < shape_b.rank();
}
auto dims_a = shape_a.dimensions();
auto dims_b = shape_b.dimensions();
for (size_t i = 0; i < dims_a.size(); ++i) {
if (dims_a[i] != dims_b[i]) {
return dims_a[i] < dims_b[i];
}
}
return true;
}
std::vector<HloInstruction*> FindAndSortFusionCandidates(
HloInstruction* consumer) {
absl::flat_hash_set<HloInstruction*> fusion_instr_set;
std::vector<HloInstruction*> fusion_instrs;
for (HloInstruction* opnd : consumer->operands()) {
HloInstruction* predecessor = opnd->LatestNonGteAncestor();
if (IsInputFusibleReduction(*predecessor) &&
IsConsumerTheOnlyNonRootUser(*predecessor, *consumer)) {
if (fusion_instr_set.insert(predecessor).second) {
fusion_instrs.push_back(predecessor);
}
}
}
std::sort(fusion_instrs.begin(), fusion_instrs.end(),
[&](const HloInstruction* a, const HloInstruction* b) {
Shape shape_a = GetInputShapeForMultiOutputFusion(*a);
Shape shape_b = GetInputShapeForMultiOutputFusion(*b);
if (!ShapeUtil::EqualIgnoringElementType(shape_a, shape_b)) {
return CompareShapeDimsFromLeftToRight(shape_a, shape_b);
}
return GetInstrCountOfFusible(*a) < GetInstrCountOfFusible(*b);
});
return fusion_instrs;
}
absl::StatusOr<bool> HorizontalInputFusionImpl::Run() {
bool changed = false;
XLA_VLOG_LINES(3, computation_->ToString());
std::vector<HloInstruction*> def_to_use_order =
computation_->MakeInstructionPostOrder();
for (HloInstruction* consumer : def_to_use_order) {
auto candidates = FindAndSortFusionCandidates(consumer);
if (candidates.size() <= 1) {
continue;
}
for (size_t j = 0; j < candidates.size(); ++j) {
if (candidates[j]->opcode() != HloOpcode::kFusion) {
TF_ASSIGN_OR_RETURN(
HloInstruction * fusion_instr,
MakeFusionInstruction(candidates[j],
HloInstruction::FusionKind::kInput));
candidates[j] = fusion_instr;
changed = true;
}
}
size_t fusion_anchor_id = 0;
for (size_t j = 1; j < candidates.size(); ++j) {
HloInstruction* fusion_anchor = candidates[fusion_anchor_id];
HloInstruction* fused = candidates[j];
if (ShapesCompatibleForMultiOutputFusion(*fusion_anchor, *fused) &&
FusionFitsInBudget(*fusion_anchor, *fused, device_info_)) {
VLOG(3) << "Fuse " << fused->ToString() << " into "
<< fusion_anchor->ToString();
fusion_anchor->MergeFusionInstructionIntoMultiOutput(fused);
changed = true;
} else {
VLOG(3) << j - fusion_anchor_id - 1 << " instructions are fused.";
fusion_anchor_id = j;
}
}
}
return changed;
}
}
absl::StatusOr<bool> GpuHorizontalInputFusion::RunOnComputation(
HloComputation* computation) {
HorizontalInputFusionImpl horizontal_fusion_impl(computation, device_info_);
return horizontal_fusion_impl.Run();
}
absl::StatusOr<bool> GpuHorizontalInputFusion::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
VLOG(2) << "Run horizontal input fusion.";
for (HloComputation* comp :
module->MakeNonfusionComputations(execution_threads)) {
TF_ASSIGN_OR_RETURN(changed, RunOnComputation(comp));
}
return changed;
}
}
} | #include "xla/service/gpu/horizontal_input_fusion.h"
#include <cstdint>
#include <utility>
#include <vector>
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/tests/gpu_codegen_test.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/test.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
class HorizontalInputFusionTest : public GpuCodegenTest {
public:
se::DeviceDescription device_description_{
TestGpuDeviceInfo::RTXA6000DeviceInfo()};
GpuHorizontalInputFusion horizontal_input_fusion_{device_description_};
};
TEST_F(HorizontalInputFusionTest, BasicTest) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule BasicTest
%add_f16 {
%x = f16[] parameter(0)
%y = f16[] parameter(1)
ROOT %add = f16[] add(%x, %y)
}
fused_computation.1 {
arg.1 = f16[1024]{0} parameter(0)
constant0 = f16[] constant(0)
ROOT reduce1 = f16[] reduce(arg.1, constant0), dimensions={0}, to_apply=%add_f16
}
fused_computation.2 {
arg.1 = f16[1024]{0} parameter(0)
constant0 = f16[] constant(0)
ROOT reduce1 = f16[] reduce(arg.1, constant0), dimensions={0}, to_apply=%add_f16
}
ENTRY entry_computation {
arg.1 = f16[1024]{0} parameter(0)
arg.2 = f16[1024]{0} parameter(1)
fusion.1 = f16[] fusion(arg.1), kind=kInput, calls=fused_computation.1
fusion.2 = f16[] fusion(arg.2), kind=kInput, calls=fused_computation.2
ROOT tuple.1 = (f16[], f16[]) tuple(fusion.1, fusion.2)
}
)")
.value();
EXPECT_TRUE(horizontal_input_fusion_.Run(module.get()).value());
const HloInstruction* entry_root =
module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(entry_root,
GmockMatch(m::Tuple((m::GetTupleElement(m::Fusion(&fusion))),
(m::GetTupleElement(m::Fusion())))));
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Reduce(), m::Reduce())));
}
TEST_F(HorizontalInputFusionTest, ManyInputFusions) {
auto module = CreateNewVerifiedModule();
HloComputation* reduce_computation;
{
auto embedded_builder = HloComputation::Builder("add");
auto lhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "lhs"));
auto rhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "rhs"));
embedded_builder.AddInstruction(
HloInstruction::CreateBinary(lhs->shape(), HloOpcode::kAdd, lhs, rhs));
reduce_computation =
module->AddEmbeddedComputation(embedded_builder.Build());
}
HloComputation::Builder builder(TestName());
std::vector<HloInstruction*> var_outs;
auto input_shape = ShapeUtil::MakeShape(F32, {1024, 1024});
auto output_shape = ShapeUtil::MakeShape(F32, {1024});
for (int64_t i = 0; i < 130; ++i) {
HloInstruction* param_var_in = builder.AddInstruction(
HloInstruction::CreateParameter(i * 2 + 0, input_shape, "var.in"));
HloInstruction* param_alpha =
builder.AddInstruction(HloInstruction::CreateParameter(
i * 2 + 1, ShapeUtil::MakeShape(F32, {}), "alpha"));
auto alpha_broadcasted = builder.AddInstruction(
HloInstruction::CreateBroadcast(input_shape, param_alpha, {}));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
input_shape, HloOpcode::kMultiply, param_var_in, alpha_broadcasted));
HloInstruction* const0 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0)));
auto reduce = builder.AddInstruction(HloInstruction::CreateReduce(
output_shape, mul, const0, {1}, reduce_computation));
var_outs.push_back(reduce);
}
builder.AddInstruction(HloInstruction::CreateTuple(var_outs));
module->AddEntryComputation(builder.Build());
CompileAndVerifyIr(module->Clone(), R"(CHECK: reduce-group-6)",
false);
EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{1e-5, 1e-5}));
}
TEST_F(HorizontalInputFusionTest, MultiOutputFusionTest) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule MultiOutputFusionTest
%add_f16 {
%x = f16[] parameter(0)
%y = f16[] parameter(1)
ROOT %add = f16[] add(%x, %y)
}
fused_computation.1 {
arg.1 = f16[1024]{0} parameter(0)
constant0 = f16[] constant(0)
reduce.1 = f16[] reduce(arg.1, constant0), dimensions={0}, to_apply=%add_f16
add.0 = f16[1024] add(arg.1, arg.1)
ROOT tuple.1 = (f16[], f16[1024]) tuple(reduce.1, add.0)
}
fused_computation.2 {
arg.1 = f16[1024]{0} parameter(0)
constant0 = f16[] constant(0)
reduce.1 = f16[] reduce(arg.1, constant0), dimensions={0}, to_apply=%add_f16
add.0 = f16[1024] add(arg.1, arg.1)
ROOT tuple.1 = (f16[], f16[1024]) tuple(reduce.1, add.0)
}
fused_computation.3 {
arg.0 = f16[1024]{0} parameter(0)
arg.1 = f16[1024]{0} parameter(1)
add.0 = f16[1024] add(arg.0, arg.1)
mul.0 = f16[1024] multiply(arg.0, arg.1)
ROOT tuple.1 = (f16[1024], f16[1024]) tuple(add.0, mul.0)
}
ENTRY entry_computation {
arg.1 = f16[1024]{0} parameter(0)
arg.2 = f16[1024]{0} parameter(1)
fusion.1 = (f16[],f16[1024]) fusion(arg.1), kind=kInput, calls=fused_computation.1
fusion.2 = (f16[],f16[1024]) fusion(arg.2), kind=kInput, calls=fused_computation.2
gte.3 = f16[] get-tuple-element(fusion.1), index=0
gte.1 = f16[1024]{0} get-tuple-element(fusion.1), index=1
gte.2 = f16[1024]{0} get-tuple-element(fusion.2), index=1
gte.6 = f16[] get-tuple-element(fusion.2), index=0
fusion.3 = (f16[1024],f16[1024]) fusion(gte.1, gte.2),
kind=kLoop, calls=fused_computation.3
gte.4 = f16[1024] get-tuple-element(fusion.3), index=0
gte.5 = f16[1024]{0} get-tuple-element(fusion.3), index=1
ROOT tuple.1 = (f16[], f16[1024], f16[1024]{0}, f16[])
tuple(gte.3, gte.4, gte.5, gte.6)
}
)")
.value();
EXPECT_TRUE(horizontal_input_fusion_.Run(module.get()).value());
}
TEST_F(HorizontalInputFusionTest, NonfusionInstrs) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule NonfusionInstrs
%add_f16 {
%x = f16[] parameter(0)
%y = f16[] parameter(1)
ROOT %add = f16[] add(%x, %y)
}
ENTRY entry_computation {
arg.0 = f16[1024]{0} parameter(0)
arg.1 = f16[1024]{0} parameter(1)
constant0 = f16[] constant(0)
reduce.0 = f16[] reduce(arg.0, constant0), dimensions={0}, to_apply=%add_f16
reduce.1 = f16[] reduce(arg.1, constant0), dimensions={0}, to_apply=%add_f16
ROOT tuple.0 = (f16[], f16[]) tuple(reduce.0, reduce.1)
}
)")
.value();
EXPECT_TRUE(horizontal_input_fusion_.Run(module.get()).value());
const HloInstruction* entry_root =
module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(entry_root,
GmockMatch(m::Tuple((m::GetTupleElement(m::Fusion(&fusion))),
(m::GetTupleElement(m::Fusion())))));
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Reduce(), m::Reduce())));
}
}
}
} |
2,060 | cpp | tensorflow/tensorflow | autotuner_util | third_party/xla/xla/service/gpu/autotuning/autotuner_util.cc | third_party/xla/xla/service/gpu/autotuning/autotuner_util_test.cc | #ifndef XLA_SERVICE_GPU_AUTOTUNER_UTIL_H_
#define XLA_SERVICE_GPU_AUTOTUNER_UTIL_H_
#include <algorithm>
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "xla/autotune_results.pb.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/shape.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/gpu/redzone_allocator.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/stream_executor/stream_executor_memory_allocator.h"
#include "xla/xla.pb.h"
namespace xla {
namespace gpu {
struct DeviceConfig {
se::StreamExecutor* stream_exec;
se::DeviceMemoryAllocator* allocator = nullptr;
};
struct DevicelessConfig {
std::string model_str;
se::GpuComputeCapability gpu_compute_capability{
se::CudaComputeCapability{0, 0}};
};
class AutotuneCacheKey {
public:
AutotuneCacheKey(absl::string_view model_str,
const HloInstruction& instruction);
explicit AutotuneCacheKey(absl::string_view model_str,
absl::string_view hlo_canonical)
: model_str_(model_str), hlo_canonical_(hlo_canonical) {}
absl::string_view GetModelStr() const { return model_str_; }
absl::string_view GetHlo() const { return hlo_canonical_; }
template <typename H>
friend H AbslHashValue(H h, const AutotuneCacheKey& w) {
return H::combine(std::move(h), w.model_str_, w.hlo_canonical_);
}
bool operator==(const AutotuneCacheKey& w) const {
return model_str_ == w.model_str_ && hlo_canonical_ == w.hlo_canonical_;
}
std::string ToString() const {
return absl::StrFormat("<key model='%s', hlo='%s'>", model_str_,
hlo_canonical_);
}
private:
std::string model_str_;
std::string hlo_canonical_;
};
class AutotuneConfig {
public:
bool should_init_buffers() const { return autotune_level_ >= 2; }
bool should_reinit_output_buffer() const { return autotune_level_ >= 3; }
bool should_check_correctness() const { return autotune_level_ >= 4; }
bool should_crash_on_check_failure() const {
return should_crash_on_check_failure_;
}
bool should_require_complete_aot_autotune_results() const {
return require_complete_aot_autotune_results_;
}
const std::string& autotune_cache_dir() const { return autotune_cache_dir_; }
AutotuneConfig(const AutotuneConfig& right)
: config_(right.config_),
autotune_level_(right.autotune_level_),
should_crash_on_check_failure_(right.should_crash_on_check_failure_),
exhaustive_tiling_search_(right.exhaustive_tiling_search_),
require_complete_aot_autotune_results_(
right.require_complete_aot_autotune_results_),
autotune_cache_dir_(right.autotune_cache_dir_) {}
AutotuneConfig(const std::variant<DeviceConfig, DevicelessConfig>& config,
const DebugOptions& debug_options)
: config_(config),
autotune_level_(debug_options.xla_gpu_autotune_level()),
should_crash_on_check_failure_(
debug_options.xla_gpu_crash_on_verification_failures()),
exhaustive_tiling_search_(
debug_options.xla_gpu_exhaustive_tiling_search()),
require_complete_aot_autotune_results_(
debug_options.xla_gpu_require_complete_aot_autotune_results()),
autotune_cache_dir_(
debug_options.xla_gpu_per_fusion_autotune_cache_dir()) {}
absl::string_view GetModelStr() const {
if (auto deviceless_config = std::get_if<DevicelessConfig>(&config_)) {
return deviceless_config->model_str;
}
const auto& device_config = std::get<DeviceConfig>(config_);
return device_config.stream_exec->GetDeviceDescription().model_str();
}
se::StreamExecutor* GetExecutor() const {
CHECK(std::holds_alternative<DeviceConfig>(config_));
return std::get<DeviceConfig>(config_).stream_exec;
}
se::DeviceMemoryAllocator* GetAllocator() const {
CHECK(std::holds_alternative<DeviceConfig>(config_));
auto& cf = std::get<DeviceConfig>(config_);
if (cf.allocator != nullptr) {
return cf.allocator;
}
if (allocator_ == nullptr) {
allocator_ =
std::make_unique<se::StreamExecutorMemoryAllocator>(GetExecutor());
}
return allocator_.get();
}
absl::StatusOr<se::Stream*> GetStream() const {
CHECK(std::holds_alternative<DeviceConfig>(config_));
return GetAllocator()->GetStream(GetExecutor()->device_ordinal());
}
const se::GpuComputeCapability& GetGpuComputeCapability() const {
if (auto c = std::get_if<DeviceConfig>(&config_)) {
return c->stream_exec->GetDeviceDescription().gpu_compute_capability();
}
return std::get<DevicelessConfig>(config_).gpu_compute_capability;
}
bool IsDeviceless() const {
return std::holds_alternative<DevicelessConfig>(config_);
}
bool ExhaustiveTilingSearch() const { return exhaustive_tiling_search_; }
private:
std::variant<DeviceConfig, DevicelessConfig> config_;
int32_t autotune_level_;
bool should_crash_on_check_failure_;
bool exhaustive_tiling_search_;
bool require_complete_aot_autotune_results_;
mutable std::unique_ptr<se::DeviceMemoryAllocator> allocator_;
std::string autotune_cache_dir_;
};
using AutotuneNoCacheFn = std::function<absl::StatusOr<AutotuneResult>()>;
struct AutotunerUtil {
static absl::StatusOr<se::DeviceMemoryBase> CreateBuffer(
se::RedzoneAllocator& allocator, const Shape& shape,
const AutotuneConfig& config, int64_t& rng_state);
static absl::StatusOr<AutotuneResult> Autotune(
const HloInstruction* instr, const AutotuneConfig& config,
const AutotuneNoCacheFn& autotune_fn);
static AutotuneCacheKey GetKey(const HloInstruction* instr,
const AutotuneConfig& config);
static absl::StatusOr<bool> IsInCache(const AutotuneCacheKey& key,
const AutotuneConfig& config);
static absl::StatusOr<bool> AddResult(const AutotuneCacheKey& key,
AutotuneResult result,
const AutotuneConfig& config);
static absl::StatusOr<se::RedzoneAllocator> CreateRedzoneAllocator(
const AutotuneConfig& config, const DebugOptions& opts);
static absl::StatusOr<std::string> SerializeAutotuneResults(
bool as_textproto = false);
static absl::Status SerializeAutotuneResults(AutotuneResults* results);
static absl::Status LoadAutotuneResults(absl::string_view data,
bool as_textproto = false);
static absl::Status LoadAutotuneResults(const AutotuneResults& results);
static absl::Status SerializeAutotuneResultsToFile(
absl::string_view file_path);
static absl::Status SerializeAutotuneResultsToFile(
const AutotuneResults& results, absl::string_view file_path);
static absl::Status LoadAutotuneResultsFromFile(absl::string_view file_path);
static void ClearAutotuneResults();
static bool ResultCacheIsEmpty();
};
absl::StatusOr<std::string> AutotuneResultsToString(
const AutotuneResults& results, bool as_textproto);
absl::StatusOr<std::string> GetBase64EncodedSha256Hash(absl::string_view s);
}
}
#endif
#include "xla/service/gpu/autotuner_util.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <limits>
#include <optional>
#include <string>
#include <utility>
#include "absl/base/const_init.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/SHA256.h"
#include "xla/autotune_results.pb.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gpu_asm_opts_util.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/gpu/redzone_allocator.h"
#include "xla/stream_executor/stream.h"
#include "xla/util.h"
#include "tsl/platform/base64.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/path.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
constexpr int kVersion = 3;
}
using AutotuneCacheMap = absl::flat_hash_map<AutotuneCacheKey, AutotuneResult>;
static absl::Mutex autotune_cache_mu(absl::kConstInit);
static auto& autotune_cache ABSL_GUARDED_BY(autotune_cache_mu) =
*new AutotuneCacheMap();
absl::StatusOr<std::string> GetBase64EncodedSha256Hash(absl::string_view s) {
llvm::SHA256 sha256;
sha256.update(llvm::StringRef(s));
std::array<uint8_t, 32> hash = sha256.final();
absl::string_view hash_view(reinterpret_cast<const char*>(hash.data()),
hash.size());
std::string base64_encoded_hash;
TF_RETURN_IF_ERROR(tsl::Base64Encode(hash_view, &base64_encoded_hash));
return base64_encoded_hash;
}
namespace {
absl::StatusOr<std::string> GetCacheFilePath(absl::string_view cache_dir,
const AutotuneCacheKey& key) {
if (cache_dir.empty()) {
return absl::InvalidArgumentError("autotune_cache_dir should not be empty");
}
TF_ASSIGN_OR_RETURN(std::string key_hash,
GetBase64EncodedSha256Hash(key.ToString()));
return tsl::io::JoinPath(cache_dir, absl::StrCat(key_hash, ".textproto"));
}
struct ResultAndInserted {
AutotuneResult result;
bool inserted;
};
ResultAndInserted AddResultToInMemoryCache(const AutotuneCacheKey& key,
AutotuneResult result)
ABSL_LOCKS_EXCLUDED(autotune_cache_mu) {
absl::MutexLock lock(&autotune_cache_mu);
auto [it, inserted] = autotune_cache.emplace(key, std::move(result));
return {it->second, inserted};
}
absl::Status AddResultToFileBasedCacheIfEnabled(const AutotuneCacheKey& key,
AutotuneResult result,
std::string_view cache_dir)
ABSL_LOCKS_EXCLUDED(autotune_cache_mu) {
if (cache_dir.empty()) {
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(const std::string file_path,
GetCacheFilePath(cache_dir, key));
VLOG(1) << "Writing autotune result to file: " << file_path;
std::string result_str;
if (!tsl::protobuf::TextFormat::PrintToString(result, &result_str)) {
return absl::InternalError("Failed to serialize autotune result.");
}
std::string temp_file_path = tsl::io::GetTempFilename(".textproto");
tsl::Env* default_env = tsl::Env::Default();
TF_RETURN_IF_ERROR(
tsl::WriteStringToFile(default_env, temp_file_path, result_str));
return default_env->RenameFile(temp_file_path, file_path);
}
absl::StatusOr<ResultAndInserted> AddResultToCaches(const AutotuneCacheKey& key,
AutotuneResult result,
std::string_view cache_dir)
ABSL_LOCKS_EXCLUDED(autotune_cache_mu) {
ResultAndInserted result_and_inserted = AddResultToInMemoryCache(key, result);
if (result_and_inserted.inserted) {
TF_RETURN_IF_ERROR(AddResultToFileBasedCacheIfEnabled(
key, result_and_inserted.result, cache_dir));
}
return result_and_inserted;
}
std::optional<AutotuneResult> TryToFindInInMemoryCache(
const AutotuneCacheKey& key) ABSL_LOCKS_EXCLUDED(autotune_cache_mu) {
absl::MutexLock lock(&autotune_cache_mu);
auto it = autotune_cache.find(key);
if (it == autotune_cache.end()) {
return std::nullopt;
}
return it->second;
}
absl::StatusOr<std::optional<AutotuneResult>>
TryToFindInFileBasedCacheIfEnabled(const AutotuneCacheKey& key,
absl::string_view cache_dir)
ABSL_LOCKS_EXCLUDED(autotune_cache_mu) {
if (cache_dir.empty()) {
return std::nullopt;
}
TF_ASSIGN_OR_RETURN(const std::string file_path,
GetCacheFilePath(cache_dir, key));
if (!tsl::Env::Default()->FileExists(file_path).ok()) {
VLOG(1) << "Autotune result file not found: " << file_path;
return std::nullopt;
}
VLOG(1) << "Autotune result file found: " << file_path;
std::string autotune_result_str;
TF_RETURN_IF_ERROR(tsl::ReadFileToString(tsl::Env::Default(), file_path,
&autotune_result_str));
AutotuneResult result;
if (!tsl::protobuf::TextFormat::ParseFromString(autotune_result_str,
&result)) {
return absl::InvalidArgumentError("Failed to parse autotune result.");
}
return result;
}
void SortAutotuneResults(AutotuneResults* results) {
std::sort(results->mutable_results()->pointer_begin(),
results->mutable_results()->pointer_end(),
[](const auto* a, const auto* b) {
return std::make_pair(absl::string_view(a->device()),
absl::string_view(a->hlo())) <
std::make_pair(absl::string_view(b->device()),
absl::string_view(b->hlo()));
});
}
}
absl::StatusOr<std::string> AutotuneResultsToString(
const AutotuneResults& results, bool as_textproto) {
if (as_textproto) {
std::string textproto;
if (tsl::protobuf::TextFormat::PrintToString(results, &textproto)) {
return textproto;
} else {
return Internal("Failed to serialize autotune results.");
}
}
return results.SerializeAsString();
}
namespace {
void SerializeAutotuneEntry(AutotuneResults* results, const AutotuneCacheKey& k,
const AutotuneResult* res) {
auto& entry = *results->add_results();
entry.set_device(std::string(k.GetModelStr()));
entry.set_hlo(std::string(k.GetHlo()));
*entry.mutable_result() = *res;
}
}
absl::Status AutotunerUtil::SerializeAutotuneResults(
AutotuneResults* results) {
absl::MutexLock lock(&autotune_cache_mu);
for (const auto& [k, result] : autotune_cache) {
SerializeAutotuneEntry(results, k, &result);
}
results->set_version(kVersion);
SortAutotuneResults(results);
return absl::OkStatus();
}
absl::Status AutotunerUtil::LoadAutotuneResults(
const AutotuneResults& results) {
absl::MutexLock lock(&autotune_cache_mu);
for (const AutotuneResults::Entry& result : results.results()) {
if (auto [it, inserted] = autotune_cache.emplace(
AutotuneCacheKey(result.device(), result.hlo()), result.result());
!inserted) {
return absl::InternalError(absl::StrCat(
"Duplicate autotuning result for ", it->first.ToString()));
}
}
return absl::OkStatus();
}
void AutotunerUtil::ClearAutotuneResults() {
absl::MutexLock lock(&autotune_cache_mu);
autotune_cache.clear();
}
bool AutotunerUtil::ResultCacheIsEmpty() {
absl::MutexLock lock(&autotune_cache_mu);
return autotune_cache.empty();
}
absl::StatusOr<se::DeviceMemoryBase> AutotunerUtil::CreateBuffer(
se::RedzoneAllocator& allocator, const Shape& shape,
const AutotuneConfig& config, int64_t& rng_state) {
TF_ASSIGN_OR_RETURN(se::DeviceMemoryBase buffer,
allocator.AllocateBytes(ShapeUtil::ByteSizeOf(shape)));
if (config.should_init_buffers()) {
InitializeBuffer(allocator.stream(), shape.element_type(), &rng_state,
buffer);
}
return buffer;
}
namespace {
std::string ToCanonicalString(const HloInstruction* instr) {
auto options = HloPrintOptions::Canonical();
if (instr->opcode() != HloOpcode::kFusion) {
options.set_print_backend_config(true);
return instr->ToString(options);
}
options.set_print_subcomputation_mode(
HloPrintOptions::PrintSubcomputationMode::kOff);
options.set_print_infeed_outfeed_config(false);
options.set_print_only_essential_constants(true);
options.set_print_operand_shape(true);
options.set_print_ids(false);
options.set_canonicalize_computations(true);
return instr->called_computations()[0]->ToString(options);
}
}
AutotuneCacheKey::AutotuneCacheKey(absl::string_view model_str,
const HloInstruction& instr)
: AutotuneCacheKey(model_str, ToCanonicalString(&instr)) {}
namespace {
absl::StatusOr<std::optional<AutotuneResult>> TryFindInCache(
const AutotuneCacheKey& key, absl::string_view cache_dir)
ABSL_LOCKS_EXCLUDED(autotune_cache_mu) {
std::optional<AutotuneResult> opt_result = TryToFindInInMemoryCache(key);
if (opt_result.has_value()) {
if (VLOG_IS_ON(1)) {
LOG(INFO) << "In-memory autotune cache hit";
} else if (VLOG_IS_ON(2)) {
LOG(INFO) << "In-memory autotune cache hit: key = " << key.ToString();
}
return opt_result;
}
TF_ASSIGN_OR_RETURN(opt_result,
TryToFindInFileBasedCacheIfEnabled(key, cache_dir));
if (opt_result.has_value()) {
AddResultToInMemoryCache(key, opt_result.value());
if (VLOG_IS_ON(1)) {
LOG(INFO) << "File-based autotune cache hit";
} else if (VLOG_IS_ON(2)) {
LOG(INFO) << "File-based autotune cache hit: key = " << key.ToString();
}
return opt_result;
}
if (VLOG_IS_ON(1)) {
LOG(INFO) << "Autotune cache miss";
} else if (VLOG_IS_ON(2)) {
LOG(INFO) << "Autotune cache miss: key = " << key.ToString();
}
return std::nullopt;
}
}
AutotuneCacheKey AutotunerUtil::GetKey(
const HloInstruction* instr, const AutotuneConfig& config) {
return AutotuneCacheKey(config.GetModelStr(), *instr);
}
absl::StatusOr<bool> AutotunerUtil::IsInCache(
const AutotuneCacheKey& key, const AutotuneConfig& config) {
TF_ASSIGN_OR_RETURN(std::optional<AutotuneResult> opt_res,
TryFindInCache(key, config.autotune_cache_dir()));
return opt_res.has_value();
}
absl::StatusOr<bool> AutotunerUtil::AddResult(
const AutotuneCacheKey& key, AutotuneResult result,
const AutotuneConfig& config) {
TF_ASSIGN_OR_RETURN(
ResultAndInserted result_and_inserted,
AddResultToCaches(key, std::move(result), config.autotune_cache_dir()));
return result_and_inserted.inserted;
}
absl::StatusOr<AutotuneResult> AutotunerUtil::Autotune(
const HloInstruction* instr, const AutotuneConfig& config,
const AutotuneNoCacheFn& autotune_fn) {
const AutotuneCacheKey key = GetKey(instr, config);
TF_ASSIGN_OR_RETURN(std::optional<AutotuneResult> opt_res,
TryFindInCache(key, config.autotune_cache_dir()));
if (opt_res.has_value()) {
return opt_res.value();
}
if (config.should_require_complete_aot_autotune_results()) {
return NotFound(
"Complete XLA AOT autotuning results are required, but no AOT result "
"was found for key: %s",
key.ToString());
}
TF_ASSIGN_OR_RETURN(AutotuneResult autotune_result, autotune_fn());
TF_ASSIGN_OR_RETURN(ResultAndInserted result_and_inserted,
AddResultToCaches(key, std::move(autotune_result),
config.autotune_cache_dir()));
return result_and_inserted.result;
}
namespace {
bool IsTextProtoPath(absl::string_view file_path) {
return absl::EndsWith(file_path, ".txt") ||
absl::EndsWith(file_path, ".textproto") ||
absl::EndsWith(file_path, ".prototxt") ||
absl::EndsWith(file_path, ".pbtxt");
}
}
absl::Status AutotunerUtil::LoadAutotuneResults(
absl::string_view data, bool as_textproto) {
AutotuneResults results;
bool parse_success =
as_textproto ? tsl::protobuf::TextFormat::ParseFromString(
std::string(data), &results)
: results.ParseFromString(std::string(data));
if (!parse_success) {
return absl::InvalidArgumentError(
"Failed to parse autotune results string.");
}
if (results.version() != kVersion) {
return absl::InvalidArgumentError(absl::StrFormat(
"Version mismatch in autotune results. Expected %d but was %d",
kVersion, results.version()));
}
TF_RETURN_IF_ERROR(LoadAutotuneResults(results));
return absl::OkStatus();
}
absl::StatusOr<std::string> AutotunerUtil::SerializeAutotuneResults(
bool as_textproto) {
AutotuneResults results;
TF_RETURN_IF_ERROR(SerializeAutotuneResults(&results));
return AutotuneResultsToString(results, as_textproto);
}
absl::Status AutotunerUtil::SerializeAutotuneResultsToFile(
const AutotuneResults& results, absl::string_view file_path) {
TF_RET_CHECK(!file_path.empty());
TF_RET_CHECK(results.version() > 0)
<< "Did you call SerializeAutotuneResults to get this AutotuneResults?";
std::string resolved_path;
if (!tsl::io::ResolveTestPrefixes(file_path, resolved_path)) {
return FailedPrecondition("File path can not be resolved: %s", file_path);
}
TF_ASSIGN_OR_RETURN(
std::string autotune_results_str,
AutotuneResultsToString(results, IsTextProtoPath(resolved_path)));
TF_RETURN_IF_ERROR(tsl::WriteStringToFile(tsl::Env::Default(), resolved_path,
autotune_results_str));
LOG(INFO) << "Autotune results serialized to file: " << resolved_path;
return absl::OkStatus();
}
absl::Status AutotunerUtil::SerializeAutotuneResultsToFile(
absl::string_view file_path) {
AutotuneResults results;
TF_RETURN_IF_ERROR(SerializeAutotuneResults(&results));
return SerializeAutotuneResultsToFile(results, file_path);
}
absl::Status AutotunerUtil::LoadAutotuneResultsFromFile(
absl::string_view file_path) {
TF_RET_CHECK(!file_path.empty());
std::string resolved_path;
if (!tsl::io::ResolveTestPrefixes(file_path, resolved_path)) {
return FailedPrecondition("File path can not be resolved: %s", file_path);
}
if (!tsl::Env::Default()->FileExists(resolved_path).ok()) {
return FailedPrecondition("Autotune results file does not exist: %s",
resolved_path);
}
std::string autotune_results_str;
TF_RETURN_IF_ERROR(tsl::ReadFileToString(tsl::Env::Default(), resolved_path,
&autotune_results_str));
TF_RETURN_IF_ERROR(LoadAutotuneResults(autotune_results_str,
IsTextProtoPath(resolved_path)));
LOG(INFO) << "Autotune results loaded from file: " << resolved_path;
return absl::OkStatus();
}
absl::StatusOr<se::RedzoneAllocator>
AutotunerUtil::CreateRedzoneAllocator(const AutotuneConfig& config,
const DebugOptions& opts) {
TF_ASSIGN_OR_RETURN(se::Stream * stream, config.GetStream());
return se::RedzoneAllocator(
stream, config.GetAllocator(), PtxOptsFromDebugOptions(opts),
std::numeric_limits<int64_t>::max(),
config.should_check_correctness()
? opts.xla_gpu_redzone_padding_bytes()
: 0);
}
}
} | #include "xla/service/gpu/autotuner_util.h"
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/autotune_results.pb.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/path.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::Not;
using ::testing::TempDir;
using ::tsl::testing::StatusIs;
class AutotunerUtilTest : public HloTestBase {
protected:
static constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
p0 = f16[1,16,17,3] parameter(0)
p1 = s8[16,17,3] parameter(1)
cp1 = f16[16,17,3] convert(p1)
ROOT _ = f16[1,16,16] dot(p0, cp1),
lhs_contracting_dims={2,3}, rhs_contracting_dims={1,2}
})";
static constexpr absl::string_view kResultText = R"(
version: 3
results {
device: "sm_8.0 with 42331013120B RAM, 108 cores, 1410000KHz clock, 1215000KHz mem clock, 41943040B L2$"
hlo: "{\n tmp_0 = f16[1,16,17,3]{3,2,1,0} parameter(0)\n tmp_1 = f16[16,51]{1,0} bitcast(f16[1,16,17,3]{3,2,1,0} tmp_0)\n tmp_2 = s8[16,17,3]{2,1,0} parameter(1)\n tmp_3 = s8[51,16]{0,1} bitcast(s8[16,17,3]{2,1,0} tmp_2)\n tmp_4 = f16[51,16]{0,1} convert(s8[51,16]{0,1} tmp_3)\n tmp_5 = f16[16,16]{1,0} dot(f16[16,51]{1,0} tmp_1, f16[51,16]{0,1} tmp_4), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n ROOT tmp_6 = f16[1,16,16]{2,1,0} bitcast(f16[16,16]{1,0} tmp_5)\n}"
result {
run_time {
nanos: 31744
}
triton {
block_m: 32
block_n: 32
block_k: 32
split_k: 1
num_stages: 1
num_warps: 4
num_ctas: 1
}
}
})";
void SetUp() override { AutotunerUtil::ClearAutotuneResults(); }
std::string GetUniqueTempFilePath(absl::string_view suffix) {
std::string filename = TempDir();
CHECK(tsl::Env::Default()->CreateUniqueFileName(&filename,
std::string(suffix)));
return filename;
}
std::string ExpectToReadNonEmptyFile(absl::string_view file_path) {
std::string str;
tsl::Env* env = tsl::Env::Default();
TF_EXPECT_OK(tsl::ReadFileToString(env, std::string(file_path), &str));
EXPECT_THAT(str, Not(IsEmpty()));
return str;
}
static std::unique_ptr<stream_executor::StreamExecutor> NewStreamExecutor() {
stream_executor::Platform* platform =
stream_executor::PlatformManager::PlatformWithName("Host").value();
stream_executor::StreamExecutorConfig config(0);
return platform->GetUncachedExecutor(config).value();
}
absl::Status PopulateResultCache() {
EXPECT_TRUE(AutotunerUtil::ResultCacheIsEmpty());
TF_RETURN_IF_ERROR(AutotunerUtil::LoadAutotuneResults(kResultText, true));
EXPECT_FALSE(AutotunerUtil::ResultCacheIsEmpty());
return absl::OkStatus();
}
};
TEST_F(AutotunerUtilTest, SerializeAutotuneResultsToFile_TextProto1) {
TF_EXPECT_OK(PopulateResultCache());
std::string kFilePath = GetUniqueTempFilePath(".txt");
TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath));
std::string autotune_results_str = ExpectToReadNonEmptyFile(kFilePath);
AutotuneResults results;
EXPECT_TRUE(tsl::protobuf::TextFormat::ParseFromString(autotune_results_str,
&results));
EXPECT_GT(results.results_size(), 0);
}
TEST_F(AutotunerUtilTest, SerializeAutotuneResultsToFile_TextProto2) {
TF_EXPECT_OK(PopulateResultCache());
std::string kFilePath = GetUniqueTempFilePath(".textproto");
TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath));
std::string autotune_results_str = ExpectToReadNonEmptyFile(kFilePath);
AutotuneResults results;
EXPECT_TRUE(tsl::protobuf::TextFormat::ParseFromString(autotune_results_str,
&results));
}
TEST_F(AutotunerUtilTest, SerializeAutotuneResultsToFile_Protobuf) {
TF_EXPECT_OK(PopulateResultCache());
std::string kFilePath = GetUniqueTempFilePath(".pb");
TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath));
std::string autotune_results_str = ExpectToReadNonEmptyFile(kFilePath);
AutotuneResults results;
EXPECT_TRUE(results.ParseFromString(autotune_results_str));
}
TEST_F(AutotunerUtilTest, LoadAutotuneResultsFromFile_TextProto1) {
TF_EXPECT_OK(PopulateResultCache());
std::string kFilePath = GetUniqueTempFilePath(".txt");
TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath));
AutotunerUtil::ClearAutotuneResults();
EXPECT_TRUE(AutotunerUtil::ResultCacheIsEmpty());
TF_EXPECT_OK(AutotunerUtil::LoadAutotuneResultsFromFile(kFilePath));
EXPECT_FALSE(AutotunerUtil::ResultCacheIsEmpty());
}
TEST_F(AutotunerUtilTest, LoadAutotuneResultsFromFile_TextProto2) {
TF_EXPECT_OK(PopulateResultCache());
std::string kFilePath = GetUniqueTempFilePath(".textproto");
TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath));
AutotunerUtil::ClearAutotuneResults();
EXPECT_TRUE(AutotunerUtil::ResultCacheIsEmpty());
TF_EXPECT_OK(AutotunerUtil::LoadAutotuneResultsFromFile(kFilePath));
EXPECT_FALSE(AutotunerUtil::ResultCacheIsEmpty());
}
TEST_F(AutotunerUtilTest, LoadAutotuneResultsFromFile_Protobuf) {
TF_EXPECT_OK(PopulateResultCache());
std::string kFilePath = GetUniqueTempFilePath(".pb");
TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath));
AutotunerUtil::ClearAutotuneResults();
EXPECT_TRUE(AutotunerUtil::ResultCacheIsEmpty());
TF_EXPECT_OK(AutotunerUtil::LoadAutotuneResultsFromFile(kFilePath));
EXPECT_FALSE(AutotunerUtil::ResultCacheIsEmpty());
}
TEST_F(AutotunerUtilTest, ResultConflictsAreDetected) {
TF_EXPECT_OK(PopulateResultCache());
std::string kFilePath = GetUniqueTempFilePath(".pb");
TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath));
EXPECT_THAT(AutotunerUtil::LoadAutotuneResultsFromFile(kFilePath),
StatusIs(absl::StatusCode::kInternal,
HasSubstr("Duplicate autotuning result")));
}
TEST_F(AutotunerUtilTest, FailIfRequireCompleteAotAutotuning) {
std::string kFilePath = GetUniqueTempFilePath(".txt");
auto hlo_module = GetOptimizedModule(kHloText);
TF_EXPECT_OK(hlo_module.status());
std::vector<HloComputation*> computations =
(*hlo_module)
->MakeNonfusionComputations(absl::flat_hash_set<absl::string_view>());
EXPECT_THAT(computations, Not(IsEmpty()));
const HloInstruction* instruction = *computations[0]->instructions().begin();
std::unique_ptr<stream_executor::StreamExecutor> executor =
NewStreamExecutor();
auto options = DebugOptions();
options.set_xla_gpu_require_complete_aot_autotune_results(true);
AutotuneConfig config(DeviceConfig{executor.get()}, options);
EXPECT_THAT(
AutotunerUtil::Autotune(instruction, config,
[&] { return AutotuneResult(); }),
StatusIs(
absl::StatusCode::kNotFound,
HasSubstr("Complete XLA AOT autotuning results are required, but "
"no AOT result was found for key: <key model")));
}
TEST_F(AutotunerUtilTest, OkIfJitAutotuningDisabledButAlreadyLoadedAOT) {
auto hlo_module = GetOptimizedModule(kHloText);
std::vector<HloComputation*> computations =
(*hlo_module)
->MakeNonfusionComputations(absl::flat_hash_set<absl::string_view>());
EXPECT_THAT(computations, Not(IsEmpty()));
const HloInstruction* instruction = *computations[0]->instructions().begin();
std::unique_ptr<stream_executor::StreamExecutor> executor =
NewStreamExecutor();
{
AutotuneConfig config(DeviceConfig{executor.get()}, DebugOptions());
TF_EXPECT_OK(AutotunerUtil::Autotune(instruction, config, [&] {
return AutotuneResult();
}).status());
}
auto options = DebugOptions();
options.set_xla_gpu_require_complete_aot_autotune_results(true);
AutotuneConfig config(DeviceConfig{executor.get()}, options);
TF_EXPECT_OK(AutotunerUtil::Autotune(instruction, config, [&] {
return AutotuneResult();
}).status());
}
class FileBasedCacheTest : public AutotunerUtilTest {
public:
static std::string ToString(const proto2::Message& message) {
std::string textproto;
CHECK(tsl::protobuf::TextFormat::PrintToString(message, &textproto));
return textproto;
}
static std::vector<std::string> GetFilesInDir(
const absl::string_view cache_dir) {
std::vector<std::string> files_in_cache;
TF_CHECK_OK(tsl::Env::Default()->GetChildren(std::string(cache_dir),
&files_in_cache));
return files_in_cache;
}
static std::string Read(const absl::string_view filepath) {
std::string file_content;
TF_CHECK_OK(tsl::ReadFileToString(tsl::Env::Default(),
std::string(filepath), &file_content));
return file_content;
}
static void Write(const absl::string_view filepath,
const absl::string_view content) {
TF_CHECK_OK(tsl::WriteStringToFile(tsl::Env::Default(),
std::string(filepath), content));
}
std::unique_ptr<stream_executor::StreamExecutor> executor_ =
NewStreamExecutor();
std::unique_ptr<HloModule> module_ =
ParseAndReturnVerifiedModule(kHloText).value();
const HloInstruction* dot_ = hlo_query::GetFirstInstructionWithOpcode(
*module_->entry_computation(), HloOpcode::kDot);
std::string cache_dir_ = [] {
tsl::Env* default_env = tsl::Env::Default();
std::string cache_dir;
CHECK(default_env->LocalTempFilename(&cache_dir));
CHECK_OK(default_env->CreateDir(cache_dir));
return cache_dir;
}();
AutotuneConfig config_ = AutotuneConfig(DeviceConfig{executor_.get()}, [&] {
DebugOptions options;
options.set_xla_gpu_per_fusion_autotune_cache_dir(cache_dir_);
return options;
}());
AutotuneCacheKey cache_key_ = AutotunerUtil::GetKey(dot_, config_);
std::string cache_filename_ = [&] {
absl::StatusOr<std::string> key_hash =
GetBase64EncodedSha256Hash(cache_key_.ToString());
CHECK_OK(key_hash.status());
return absl::StrCat(key_hash.value(), ".textproto");
}();
std::string cache_file_path_ = tsl::io::JoinPath(cache_dir_, cache_filename_);
const AutotuneResult result1_ = [] {
AutotuneResult result;
result.set_scratch_bytes(1);
return result;
}();
const AutotuneResult result2_ = [] {
AutotuneResult result;
result.set_scratch_bytes(2);
return result;
}();
};
TEST_F(FileBasedCacheTest, AutotuneWritesResultToTheCacheDir) {
TF_ASSERT_OK_AND_ASSIGN(
AutotuneResult result,
AutotunerUtil::Autotune(dot_, config_, [&] { return result1_; }));
EXPECT_EQ(ToString(result), ToString(result1_));
ASSERT_THAT(GetFilesInDir(cache_dir_), ElementsAre(cache_filename_));
EXPECT_EQ(Read(cache_file_path_), ToString(result1_));
}
TEST_F(FileBasedCacheTest, AutotuneReadsResultFromTheCacheDir) {
Write(cache_file_path_, ToString(result1_));
bool cache_hit = true;
TF_ASSERT_OK_AND_ASSIGN(AutotuneResult result,
AutotunerUtil::Autotune(dot_, config_, [&] {
cache_hit = false;
return result2_;
}));
EXPECT_TRUE(cache_hit);
EXPECT_EQ(ToString(result), ToString(result1_));
}
TEST_F(FileBasedCacheTest,
RepeatedAutotuneCallsDontReadOrWriteTheCacheFileAgain) {
auto check_autotune_cache_hit = [](const HloInstruction* instr,
const AutotuneConfig& config,
const AutotuneResult& expected_result) {
bool cache_hit = true;
TF_ASSERT_OK_AND_ASSIGN(AutotuneResult result,
AutotunerUtil::Autotune(instr, config, [&] {
cache_hit = false;
AutotuneResult new_result;
new_result.set_scratch_bytes(2);
return new_result;
}));
EXPECT_TRUE(cache_hit);
EXPECT_EQ(ToString(result), ToString(expected_result));
};
Write(cache_file_path_, ToString(result1_));
check_autotune_cache_hit(dot_, config_, result1_);
constexpr absl::string_view kPlaceholderContent = "placeholder content";
Write(cache_file_path_, kPlaceholderContent);
check_autotune_cache_hit(dot_, config_, result1_);
EXPECT_EQ(Read(cache_file_path_), kPlaceholderContent);
}
TEST_F(FileBasedCacheTest,
IsInCacheReturnsTrueIfTheResultIsInTheFileBasedCache) {
Write(cache_file_path_, ToString(result1_));
TF_ASSERT_OK_AND_ASSIGN(bool is_in_cache,
AutotunerUtil::IsInCache(cache_key_, config_));
EXPECT_TRUE(is_in_cache);
}
TEST_F(FileBasedCacheTest, IsInCacheReturnsFalseIfTheResultIsNotInEitherCache) {
TF_ASSERT_OK_AND_ASSIGN(bool is_in_cache,
AutotunerUtil::IsInCache(cache_key_, config_));
EXPECT_FALSE(is_in_cache);
}
TEST_F(FileBasedCacheTest, AddResultAddsTheResultToTheFileBasedCache) {
TF_ASSERT_OK_AND_ASSIGN(
bool added, AutotunerUtil::AddResult(cache_key_, result1_, config_));
EXPECT_TRUE(added);
ASSERT_THAT(GetFilesInDir(cache_dir_), ElementsAre(cache_filename_));
EXPECT_EQ(Read(cache_file_path_), ToString(result1_));
}
TEST_F(FileBasedCacheTest, RepeatedAddResultDoesNotWriteTheFileAgain) {
{
TF_ASSERT_OK_AND_ASSIGN(
bool added, AutotunerUtil::AddResult(cache_key_, result1_, config_));
EXPECT_TRUE(added);
}
ASSERT_THAT(GetFilesInDir(cache_dir_), ElementsAre(cache_filename_));
EXPECT_EQ(Read(cache_file_path_), ToString(result1_));
constexpr absl::string_view kPlaceholderContent = "placeholder content";
Write(cache_file_path_, kPlaceholderContent);
{
TF_ASSERT_OK_AND_ASSIGN(
bool added, AutotunerUtil::AddResult(cache_key_, result1_, config_));
EXPECT_FALSE(added);
}
EXPECT_EQ(Read(cache_file_path_), kPlaceholderContent);
}
}
}
} |
2,061 | cpp | tensorflow/tensorflow | buffer_comparator | third_party/xla/xla/service/gpu/buffer_comparator.cc | third_party/xla/xla/service/gpu/buffer_comparator_test.cc | #ifndef XLA_SERVICE_GPU_BUFFER_COMPARATOR_H_
#define XLA_SERVICE_GPU_BUFFER_COMPARATOR_H_
#include "absl/status/statusor.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/stream_executor.h"
#if TENSORFLOW_USE_ROCM
#include "rocm/rocm_config.h"
#endif
namespace xla::gpu {
class BufferComparator {
public:
BufferComparator(const BufferComparator&) = delete;
BufferComparator(BufferComparator&&) = default;
BufferComparator(const Shape& shape, const HloModuleConfig& config,
double tolerance = 0.1);
absl::StatusOr<bool> CompareEqual(se::Stream* stream,
se::DeviceMemoryBase current,
se::DeviceMemoryBase expected) const;
private:
template <typename ElementT, typename ComparisonT>
absl::StatusOr<bool> CompareEqualParameterized(se::Stream* stream,
se::DeviceMemoryBase current,
se::DeviceMemoryBase expected,
std::string_view kernel_name,
void* kernel_symbol) const;
template <typename ElementType, typename ComparisonType>
absl::StatusOr<bool> HostCompare(se::Stream* stream,
se::DeviceMemoryBase current,
se::DeviceMemoryBase expected) const;
template <typename ElementT>
absl::StatusOr<bool> DeviceCompare(se::Stream* stream,
se::DeviceMemoryBase current,
se::DeviceMemoryBase expected,
std::string_view kernel_name,
void* kernel_symbol) const;
Shape shape_;
HloModuleConfig config_;
double tolerance_;
};
namespace buffer_comparator {
void* fp8_e4m3fn_comparison();
void* fp8_e5m2_comparison();
#if TENSORFLOW_USE_ROCM && TF_ROCM_VERSION >= 60200
void* fp8_e4m3fnuz_comparison();
void* fp8_e5m2fnuz_comparison();
#endif
void* fp16_comparison();
void* bf16_comparison();
void* fp32_comparison();
void* fp64_comparison();
void* int8_comparison();
void* int32_comparison();
}
}
#endif
#include "xla/service/gpu/buffer_comparator.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <string_view>
#include <type_traits>
#include <vector>
#include "Eigen/Core"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_handle.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/stream_executor/typed_kernel_factory.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/ml_dtypes.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
template <typename ElementT>
using ComparisonKernelT =
se::TypedKernel<se::DeviceMemory<ElementT>, se::DeviceMemory<ElementT>,
float, uint64_t, se::DeviceMemory<uint64_t>>;
template <typename ElementT>
absl::StatusOr<bool> BufferComparator::DeviceCompare(
se::Stream* stream, se::DeviceMemoryBase current,
se::DeviceMemoryBase expected, std::string_view kernel_name,
void* kernel_symbol) const {
se::StreamExecutor* executor = stream->parent();
se::DeviceMemoryHandle out_param(executor,
executor->AllocateScalar<uint64_t>());
TF_RETURN_IF_ERROR(stream->MemZero(out_param.memory_ptr(), sizeof(uint64_t)));
if (current.size() != expected.size()) {
return Internal("Mismatched buffer size: %d bytes vs. %d bytes",
current.size(), expected.size());
}
se::DeviceMemory<ElementT> current_typed(current);
se::DeviceMemory<ElementT> expected_typed(expected);
uint64_t buffer_size = current_typed.ElementCount();
TF_ASSIGN_OR_RETURN(
ComparisonKernelT<ElementT> comparison_kernel,
(se::TypedKernelFactory<
se::DeviceMemory<ElementT>, se::DeviceMemory<ElementT>, float,
uint64_t, se::DeviceMemory<uint64_t>>::Create(executor, kernel_name,
kernel_symbol)));
const se::DeviceDescription& gpu_device_info =
executor->GetDeviceDescription();
LaunchDimensions dim = CalculateLaunchDimensions(shape_, gpu_device_info);
se::DeviceMemory<uint64_t> as_uint64(out_param.memory());
TF_RETURN_IF_ERROR(stream->ThenLaunch(
dim.thread_counts_per_block(), dim.block_counts(), comparison_kernel,
current_typed, expected_typed, static_cast<float>(tolerance_),
buffer_size, as_uint64));
uint64_t result = -1;
CHECK_EQ(out_param.memory().size(), sizeof(result));
TF_RETURN_IF_ERROR(
stream->Memcpy(&result, out_param.memory(), sizeof(result)));
TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());
return result == 0;
}
template <typename ElementType, typename ComparisonType>
absl::StatusOr<bool> BufferComparator::HostCompare(
se::Stream* stream, se::DeviceMemoryBase current,
se::DeviceMemoryBase expected) const {
int64_t n = current.size() / sizeof(ElementType);
std::vector<ElementType> host_current(n), host_expected(n);
TF_RETURN_IF_ERROR(
stream->Memcpy(host_current.data(), current, current.size()));
TF_RETURN_IF_ERROR(
stream->Memcpy(host_expected.data(), expected, expected.size()));
TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());
const auto canonicalize = [](ComparisonType a) -> ComparisonType {
if (std::is_same<ElementType, Eigen::half>::value && a) {
constexpr ComparisonType kMaxFp16Value = 65505;
if (std::isnan(a)) {
return a;
}
return std::max(-kMaxFp16Value, std::min(a, kMaxFp16Value));
}
return a;
};
int differences_seen = 0;
for (int64_t i = 0; i < n && differences_seen < 10; ++i) {
auto current_value = static_cast<ComparisonType>(host_current[i]);
auto expected_value = static_cast<ComparisonType>(host_expected[i]);
ComparisonType current_value_canonical = canonicalize(current_value);
ComparisonType expected_value_canonical = canonicalize(expected_value);
if (std::isnan(current_value_canonical) &&
std::isnan(expected_value_canonical)) {
continue;
}
if (std::isinf(current_value_canonical) &&
std::isinf(expected_value_canonical) &&
current_value_canonical == expected_value_canonical) {
continue;
}
if (std::isfinite(current_value_canonical) !=
std::isfinite(expected_value_canonical) ||
!(std::abs(current_value_canonical - expected_value_canonical) /
(std::max(std::abs(current_value_canonical),
std::abs(expected_value_canonical)) +
1) <
tolerance_)) {
++differences_seen;
LOG(ERROR) << "Difference at " << i << ": " << current_value
<< ", expected " << expected_value;
}
}
return differences_seen == 0;
}
template <typename ElementT, typename ComparisonT>
absl::StatusOr<bool> BufferComparator::CompareEqualParameterized(
se::Stream* stream, se::DeviceMemoryBase current,
se::DeviceMemoryBase expected, std::string_view kernel_name,
void* kernel_symbol) const {
XLA_SCOPED_LOGGING_TIMER("BufferComparator::CompareEqual");
TF_ASSIGN_OR_RETURN(bool result,
DeviceCompare<ElementT>(stream, current, expected,
kernel_name, kernel_symbol));
if (result) {
return true;
}
TF_ASSIGN_OR_RETURN(bool host_return, (HostCompare<ElementT, ComparisonT>(
stream, current, expected)));
CHECK_EQ(host_return, result)
<< "Host comparison succeeded even though GPU comparison failed.";
return false;
}
absl::StatusOr<bool> BufferComparator::CompareEqual(
se::Stream* stream, se::DeviceMemoryBase current,
se::DeviceMemoryBase expected) const {
switch (shape_.element_type()) {
#if GOOGLE_CUDA
case xla::F8E4M3FN:
return CompareEqualParameterized<tsl::float8_e4m3fn, float>(
stream, current, expected, "fp8_e4m3fn_comparison",
buffer_comparator::fp8_e4m3fn_comparison());
case xla::F8E5M2:
return CompareEqualParameterized<tsl::float8_e5m2, float>(
stream, current, expected, "fp8_e5m2_comparison",
buffer_comparator::fp8_e5m2_comparison());
#endif
#if TENSORFLOW_USE_ROCM && TF_ROCM_VERSION >= 60200
case xla::F8E4M3FNUZ:
return CompareEqualParameterized<tsl::float8_e4m3fnuz, float>(
stream, current, expected, "fp8_e4m3fnuz_comparison",
buffer_comparator::fp8_e4m3fnuz_comparison());
case xla::F8E5M2FNUZ:
return CompareEqualParameterized<tsl::float8_e5m2fnuz, float>(
stream, current, expected, "fp8_e5m2fnuz_comparison",
buffer_comparator::fp8_e5m2fnuz_comparison());
#endif
case xla::F16:
return CompareEqualParameterized<Eigen::half, float>(
stream, current, expected, "fp16_comparison",
buffer_comparator::fp16_comparison());
case xla::BF16:
return CompareEqualParameterized<Eigen::bfloat16, float>(
stream, current, expected, "bf16_comparison",
buffer_comparator::bf16_comparison());
case xla::F32:
return CompareEqualParameterized<float, float>(
stream, current, expected, "fp32_comparison",
buffer_comparator::fp32_comparison());
case xla::F64:
return CompareEqualParameterized<double, double>(
stream, current, expected, "fp64_comparison",
buffer_comparator::fp64_comparison());
case xla::S8:
return CompareEqualParameterized<int8_t, float>(
stream, current, expected, "int8_comparison",
buffer_comparator::int8_comparison());
case xla::S32:
return CompareEqualParameterized<int32_t, float>(
stream, current, expected, "int32_comparison",
buffer_comparator::int32_comparison());
default:
return Unimplemented("Unimplemented element type");
}
}
BufferComparator::BufferComparator(const Shape& shape,
const HloModuleConfig& config,
double tolerance)
: shape_(shape), config_(config), tolerance_(tolerance) {
auto double_dim_size = [&]() {
int64_t prev_zero_dim_size = shape_.dimensions(0);
shape_.set_dimensions(0, prev_zero_dim_size * 2);
};
if (shape_.element_type() == PrimitiveType::C64) {
shape_.set_element_type(PrimitiveType::F32);
double_dim_size();
} else if (shape_.element_type() == PrimitiveType::C128) {
shape_.set_element_type(PrimitiveType::F64);
double_dim_size();
}
}
}
} | #include "xla/service/gpu/buffer_comparator.h"
#include <cmath>
#include <complex>
#include <cstdint>
#include <limits>
#include <vector>
#include "xla/primitive_util.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_handle.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream.h"
#include "xla/types.h"
#include "tsl/platform/ml_dtypes.h"
#include "tsl/platform/status.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
constexpr double kDefaultTolerance = 0.1;
class BufferComparatorTest : public testing::Test {
protected:
BufferComparatorTest()
#if GOOGLE_CUDA
: platform_(se::PlatformManager::PlatformWithName("CUDA").value()),
#elif TENSORFLOW_USE_ROCM
: platform_(se::PlatformManager::PlatformWithName("ROCM").value()),
#endif
stream_exec_(platform_->ExecutorForDevice(0).value()) {
}
template <typename ElementType>
bool CompareEqualBuffers(const std::vector<ElementType>& current,
const std::vector<ElementType>& expected,
double tolerance) {
auto stream = stream_exec_->CreateStream().value();
se::DeviceMemoryHandle current_buffer(
stream_exec_, stream_exec_->AllocateArray<ElementType>(current.size()));
se::DeviceMemoryHandle expected_buffer(
stream_exec_,
stream_exec_->AllocateArray<ElementType>(expected.size()));
TF_CHECK_OK(stream->Memcpy(current_buffer.memory_ptr(), current.data(),
current_buffer.memory().size()));
TF_CHECK_OK(stream->Memcpy(expected_buffer.memory_ptr(), expected.data(),
expected_buffer.memory().size()));
TF_CHECK_OK(stream->BlockHostUntilDone());
BufferComparator comparator(
ShapeUtil::MakeShape(
primitive_util::NativeToPrimitiveType<ElementType>(),
{static_cast<int64_t>(current.size())}),
HloModuleConfig(), tolerance);
return comparator
.CompareEqual(stream.get(), current_buffer.memory(),
expected_buffer.memory())
.value();
}
template <typename ElementType>
bool CompareEqualFloatBuffers(const std::vector<float>& lhs_float,
const std::vector<float>& rhs_float,
double tolerance = kDefaultTolerance) {
std::vector<ElementType> lhs(lhs_float.begin(), lhs_float.end());
std::vector<ElementType> rhs(rhs_float.begin(), rhs_float.end());
return CompareEqualBuffers(lhs, rhs, tolerance);
}
template <typename ElementType>
bool CompareEqualComplex(const std::vector<std::complex<ElementType>>& lhs,
const std::vector<std::complex<ElementType>>& rhs) {
return CompareEqualBuffers<std::complex<ElementType>>(lhs, rhs,
kDefaultTolerance);
}
se::Platform* platform_;
se::StreamExecutor* stream_exec_;
};
TEST_F(BufferComparatorTest, TestComplex) {
EXPECT_FALSE(
CompareEqualComplex<float>({{0.1, 0.2}, {2, 3}}, {{0.1, 0.2}, {6, 7}}));
EXPECT_TRUE(CompareEqualComplex<float>({{0.1, 0.2}, {2, 3}},
{{0.1, 0.2}, {2.2, 3.3}}));
EXPECT_TRUE(
CompareEqualComplex<float>({{0.1, 0.2}, {2, 3}}, {{0.1, 0.2}, {2, 3}}));
EXPECT_FALSE(
CompareEqualComplex<float>({{0.1, 0.2}, {2, 3}}, {{0.1, 0.2}, {6, 3}}));
EXPECT_FALSE(
CompareEqualComplex<float>({{0.1, 0.2}, {2, 3}}, {{0.1, 0.2}, {6, 7}}));
EXPECT_FALSE(
CompareEqualComplex<float>({{0.1, 0.2}, {2, 3}}, {{0.1, 6}, {2, 3}}));
EXPECT_TRUE(CompareEqualComplex<double>({{0.1, 0.2}, {2, 3}},
{{0.1, 0.2}, {2.2, 3.3}}));
EXPECT_FALSE(
CompareEqualComplex<double>({{0.1, 0.2}, {2, 3}}, {{0.1, 0.2}, {2, 7}}));
}
TEST_F(BufferComparatorTest, TestNaNs) {
EXPECT_TRUE(
CompareEqualFloatBuffers<Eigen::half>({std::nanf("")}, {std::nanf("")}));
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({std::nanf("")},
{std::nanf("1234")}));
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({std::nanf("")}, {1.}));
EXPECT_TRUE(
CompareEqualFloatBuffers<float>({std::nanf("")}, {std::nanf("")}));
EXPECT_TRUE(
CompareEqualFloatBuffers<float>({std::nanf("")}, {std::nanf("1234")}));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({std::nanf("")}, {1.}));
EXPECT_TRUE(
CompareEqualFloatBuffers<double>({std::nanf("")}, {std::nanf("")}));
EXPECT_TRUE(
CompareEqualFloatBuffers<double>({std::nanf("")}, {std::nanf("1234")}));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({std::nanf("")}, {1.}));
}
TEST_F(BufferComparatorTest, TestInfs) {
const auto inf = std::numeric_limits<float>::infinity();
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({inf}, {std::nanf("")}));
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({inf}, {inf}));
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({inf}, {65504}));
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({-inf}, {-65504}));
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({inf}, {-65504}));
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({-inf}, {65504}));
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({inf}, {20}));
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({inf}, {-20}));
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({-inf}, {20}));
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({-inf}, {-20}));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({inf}, {std::nanf("")}));
EXPECT_TRUE(CompareEqualFloatBuffers<float>({inf}, {inf}));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({inf}, {65504}));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({-inf}, {-65504}));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({inf}, {-65504}));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({-inf}, {65504}));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({inf}, {20}));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({inf}, {-20}));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({-inf}, {20}));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({-inf}, {-20}));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({inf}, {std::nanf("")}));
EXPECT_TRUE(CompareEqualFloatBuffers<double>({inf}, {inf}));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({inf}, {65504}));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({-inf}, {-65504}));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({inf}, {-65504}));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({-inf}, {65504}));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({inf}, {20}));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({inf}, {-20}));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({-inf}, {20}));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({-inf}, {-20}));
#if GOOGLE_CUDA
EXPECT_TRUE(
CompareEqualFloatBuffers<tsl::float8_e4m3fn>({inf}, {std::nanf("")}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({inf}, {inf}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({inf}, {-inf}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({inf}, {448}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({inf}, {-448}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({inf}, {20}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({inf}, {-20}));
EXPECT_FALSE(
CompareEqualFloatBuffers<tsl::float8_e5m2>({inf}, {std::nanf("")}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e5m2>({inf}, {inf}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({inf}, {-inf}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({inf}, {57344}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({-inf}, {-57344}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({inf}, {20}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({inf}, {-20}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({-inf}, {20}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({-inf}, {-20}));
#endif
}
TEST_F(BufferComparatorTest, TestNumbers) {
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({20}, {20.1}));
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({20}, {23.0}));
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({20}, {23.0}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({20}, {26.0}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({0}, {1}));
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({0.9}, {1}));
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({9}, {10}));
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({10}, {9}));
EXPECT_TRUE(CompareEqualFloatBuffers<float>({20}, {20.1}));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({20}, {23.0}));
EXPECT_TRUE(CompareEqualFloatBuffers<float>({20}, {23.0}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({20}, {26.0}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({0}, {1}));
EXPECT_TRUE(CompareEqualFloatBuffers<float>({0.9}, {1}));
EXPECT_TRUE(CompareEqualFloatBuffers<float>({9}, {10}));
EXPECT_TRUE(CompareEqualFloatBuffers<float>({10}, {9}));
EXPECT_TRUE(CompareEqualFloatBuffers<double>({20}, {20.1}));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({20}, {23.0}));
EXPECT_TRUE(CompareEqualFloatBuffers<double>({20}, {23.0}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({20}, {26.0}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({0}, {1}));
EXPECT_TRUE(CompareEqualFloatBuffers<double>({0.9}, {1}));
EXPECT_TRUE(CompareEqualFloatBuffers<double>({9}, {10}));
EXPECT_TRUE(CompareEqualFloatBuffers<double>({10}, {9}));
EXPECT_TRUE(CompareEqualFloatBuffers<int8_t>({100}, {101}));
EXPECT_FALSE(CompareEqualFloatBuffers<int8_t>({100}, {120}));
EXPECT_TRUE(CompareEqualFloatBuffers<int8_t>({100}, {120}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<int8_t>({90}, {120}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<int8_t>({0}, {10}));
EXPECT_TRUE(CompareEqualFloatBuffers<int8_t>({9}, {10}));
EXPECT_TRUE(CompareEqualFloatBuffers<int8_t>({90}, {100}));
EXPECT_TRUE(CompareEqualFloatBuffers<int8_t>({100}, {90}));
EXPECT_FALSE(CompareEqualFloatBuffers<int8_t>({-128}, {127}));
#if GOOGLE_CUDA
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({20}, {20.1}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({20}, {23.0}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({20}, {23.0}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({20}, {26.0}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({0}, {1}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({0.9}, {1}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({9}, {10}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({9}, {10}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e5m2>({20}, {20.1}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({20}, {23.0}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e5m2>({20}, {23.0}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({20}, {30.0}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({0}, {1}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e5m2>({0.9}, {1}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e5m2>({11}, {12}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e5m2>({12}, {11}));
#endif
}
TEST_F(BufferComparatorTest, TestMultiple) {
{
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>(
{20, 30, 40, 50, 60}, {20.1, 30.1, 40.1, 50.1, 60.1}));
std::vector<float> lhs(200);
std::vector<float> rhs(200);
for (int i = 0; i < 200; i++) {
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>(lhs, rhs))
<< "should be the same at index " << i;
lhs[i] = 3;
rhs[i] = 5;
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>(lhs, rhs))
<< "should be the different at index " << i;
lhs[i] = 0;
rhs[i] = 0;
}
}
{
EXPECT_TRUE(CompareEqualFloatBuffers<float>(
{20, 30, 40, 50, 60}, {20.1, 30.1, 40.1, 50.1, 60.1}));
std::vector<float> lhs(200);
std::vector<float> rhs(200);
for (int i = 0; i < 200; i++) {
EXPECT_TRUE(CompareEqualFloatBuffers<float>(lhs, rhs))
<< "should be the same at index " << i;
lhs[i] = 3;
rhs[i] = 5;
EXPECT_FALSE(CompareEqualFloatBuffers<float>(lhs, rhs))
<< "should be the different at index " << i;
lhs[i] = 0;
rhs[i] = 0;
}
}
{
EXPECT_TRUE(CompareEqualFloatBuffers<double>(
{20, 30, 40, 50, 60}, {20.1, 30.1, 40.1, 50.1, 60.1}));
std::vector<float> lhs(200);
std::vector<float> rhs(200);
for (int i = 0; i < 200; i++) {
EXPECT_TRUE(CompareEqualFloatBuffers<double>(lhs, rhs))
<< "should be the same at index " << i;
lhs[i] = 3;
rhs[i] = 5;
EXPECT_FALSE(CompareEqualFloatBuffers<double>(lhs, rhs))
<< "should be the different at index " << i;
lhs[i] = 0;
rhs[i] = 0;
}
}
{
EXPECT_TRUE(CompareEqualFloatBuffers<int8_t>({20, 30, 40, 50, 60},
{21, 31, 41, 51, 61}));
std::vector<float> lhs(200);
std::vector<float> rhs(200);
for (int i = 0; i < 200; i++) {
EXPECT_TRUE(CompareEqualFloatBuffers<int8_t>(lhs, rhs))
<< "should be the same at index " << i;
lhs[i] = 3;
rhs[i] = 5;
EXPECT_FALSE(CompareEqualFloatBuffers<int8_t>(lhs, rhs))
<< "should be the different at index " << i;
lhs[i] = 0;
rhs[i] = 0;
}
}
#if GOOGLE_CUDA
{
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>(
{20, 30, 40, 50, 60}, {20.1, 30.1, 40.1, 50.1, 60.1}));
std::vector<float> lhs(200);
std::vector<float> rhs(200);
for (int i = 0; i < 200; i++) {
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>(lhs, rhs))
<< "should be the same at index " << i;
lhs[i] = 3;
rhs[i] = 5;
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>(lhs, rhs))
<< "should be the different at index " << i;
lhs[i] = 0;
rhs[i] = 0;
}
}
{
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e5m2>(
{20, 30, 40, 50, 60}, {20.1, 30.1, 40.1, 50.1, 60.1}));
std::vector<float> lhs(200);
std::vector<float> rhs(200);
for (int i = 0; i < 200; i++) {
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e5m2>(lhs, rhs))
<< "should be the same at index " << i;
lhs[i] = 3;
rhs[i] = 5;
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>(lhs, rhs))
<< "should be the different at index " << i;
lhs[i] = 0;
rhs[i] = 0;
}
}
#endif
}
TEST_F(BufferComparatorTest, BF16) {
const int element_count = 3123;
int64_t rng_state = 0;
auto stream = stream_exec_->CreateStream().value();
se::DeviceMemoryHandle lhs(
stream_exec_,
stream_exec_->AllocateArray<Eigen::bfloat16>(element_count));
InitializeBuffer(stream.get(), BF16, &rng_state, lhs.memory());
se::DeviceMemoryHandle rhs(
stream_exec_,
stream_exec_->AllocateArray<Eigen::bfloat16>(element_count));
InitializeBuffer(stream.get(), BF16, &rng_state, rhs.memory());
BufferComparator comparator(ShapeUtil::MakeShape(BF16, {element_count}),
HloModuleConfig());
EXPECT_FALSE(comparator.CompareEqual(stream.get(), lhs.memory(), rhs.memory())
.value());
}
}
}
} |
2,062 | cpp | tensorflow/tensorflow | gpu_async_collective_annotator | null | null | #ifndef XLA_SERVICE_GPU_GPU_ASYNC_COLLECTIVE_ANNOTATOR_H_
#define XLA_SERVICE_GPU_GPU_ASYNC_COLLECTIVE_ANNOTATOR_H_
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/util.h"
namespace xla {
namespace gpu {
class GpuAsyncCollectiveAnnotator : public HloModulePass {
public:
explicit GpuAsyncCollectiveAnnotator(HloPredicate is_collective_async)
: is_collective_async_(std::move(is_collective_async)) {}
absl::string_view name() const override {
return "gpu-async-collective-annotator";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
HloPredicate is_collective_async_;
};
}
}
#endif
#include "xla/service/gpu/gpu_async_collective_annotator.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
absl::StatusOr<bool> GpuAsyncCollectiveAnnotator::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (!hlo_query::IsAsyncCollectiveStartOp(instruction)) {
continue;
}
CollectiveBackendConfig config;
config.set_is_sync(!is_collective_async_(instruction));
TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config,
instruction->backend_config<GpuBackendConfig>());
*gpu_config.mutable_collective_backend_config() = config;
TF_RETURN_IF_ERROR(instruction->set_backend_config(gpu_config));
changed = true;
}
}
return changed;
}
}
} | #include "xla/service/gpu/gpu_async_collective_annotator.h"
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
constexpr absl::string_view kHloString = R"(
HloModule ModuleWithAsync
addf32 {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
addf16 {
p0 = f16[] parameter(0)
p1 = f16[] parameter(1)
ROOT add = f16[] add(p0, p1)
}
reduce_scatterf32 {
p0 = f32[2] parameter(0)
ROOT result = f32[1] reduce-scatter(p0), replica_groups={},
dimensions={0}, to_apply=addf32
}
ENTRY entry {
pf32 = f32[1] parameter(0)
pf16 = f16[1] parameter(1)
arf32-start = f32[1] all-reduce-start(pf32), to_apply=addf32
arf32-done = f32[1] all-reduce-done(arf32-start)
arf16-start = f16[1] all-reduce-start(pf16), to_apply=addf16
arf16-done = f16[1] all-reduce-done(arf16-start)
agf32-start = (f32[1], f32[2]) all-gather-start(pf32), dimensions={0}
agf32-done = f32[2] all-gather-done(agf32-start)
agf16-start = (f16[1], f16[2]) all-gather-start(pf16), dimensions={0}
agf16-done = f16[2] all-gather-done(agf16-start)
cpf32-start = (f32[1], f32[1], u32[], u32[]) collective-permute-start(pf32),
source_target_pairs={{0,1}, {1,0}}
cpf32-done = f32[1] collective-permute-done(cpf32-start)
cpf16-start = (f16[1], f16[1], u32[], u32[]) collective-permute-start(pf16),
source_target_pairs={{0,1}, {1,0}}
cpf16-done = f16[1] collective-permute-done(cpf16-start)
rsf32-start = ((f32[2]), f32[1]) async-start(agf32-done), calls=reduce_scatterf32
rsf32-done = f32[1] async-done(rsf32-start), calls=reduce_scatterf32
ROOT tuple = (f32[1], f16[1], f32[2], f16[2], f32[1], f16[1], f32[1])
tuple(arf32-done, arf16-done, agf32-done, agf16-done, cpf32-done,
cpf16-done, rsf32-done)
}
)";
struct TestCase {
std::string test_name;
HloPredicate is_async_predicate;
absl::flat_hash_set<absl::string_view> expected_async;
absl::flat_hash_set<absl::string_view> expected_sync;
};
class GpuAsyncCollectiveAnnotatorTest
: public HloTestBase,
public ::testing::WithParamInterface<TestCase> {};
XLA_TEST_P(GpuAsyncCollectiveAnnotatorTest, Test) {
const TestCase& test_case = GetParam();
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString, 2));
TF_ASSERT_OK_AND_ASSIGN(
bool changed, GpuAsyncCollectiveAnnotator(test_case.is_async_predicate)
.Run(module.get()));
EXPECT_TRUE(changed);
for (const HloInstruction* hlo :
module->entry_computation()->instructions()) {
if (!hlo_query::IsAsyncCollectiveStartOp(hlo)) {
continue;
}
auto gpu_config = hlo->backend_config<GpuBackendConfig>();
ASSERT_TRUE(gpu_config.ok());
const CollectiveBackendConfig& backend_config =
gpu_config.value().collective_backend_config();
if (test_case.expected_async.contains(hlo->name())) {
EXPECT_FALSE(backend_config.is_sync());
}
if (test_case.expected_sync.contains(hlo->name())) {
EXPECT_TRUE(backend_config.is_sync());
}
}
}
std::vector<TestCase> TestCases() {
HloPredicate is_f16 = [](const HloInstruction* hlo) {
return hlo->operand(0)->shape().element_type() == PrimitiveType::F16;
};
return {
{"all_async",
HloPredicateTrue,
{"arf32-start", "arf16-start", "agf32-start", "agf16-start",
"cpf32-start", "cpf16-start", "rsf32-start"},
{}},
{"all_sync",
HloPredicateFalse,
{},
{"arf32-start", "arf16-start", "agf32-start", "agf16-start",
"cpf32-start", "cpf16-start", "rsf32-start"}},
{"ar_async",
HloPredicateIsOp<HloOpcode::kAllReduceStart>,
{"arf32-start", "arf16-start"},
{"agf32-start", "agf16-start", "cpf32-start", "cpf16-start",
"rsf32-start"}},
{"cp_async",
HloPredicateIsOp<HloOpcode::kCollectivePermuteStart>,
{"cpf32-start", "cpf16-start"},
{"arf32-start", "arf16-start", "agf32-start", "agf16-start",
"rsf32-start"}},
{"f16_async",
is_f16,
{"arf16-start", "agf16-start", "cpf16-start"},
{"arf32-start", "agf32-start", "cpf32-start", "rsf32-start"}},
};
}
std::string TestCaseName(const ::testing::TestParamInfo<TestCase>& test_case) {
return test_case.param.test_name;
}
INSTANTIATE_TEST_SUITE_P(GpuAsyncCollectiveAnnotatorTest,
GpuAsyncCollectiveAnnotatorTest,
::testing::ValuesIn(TestCases()), TestCaseName);
}
}
} |
2,063 | cpp | tensorflow/tensorflow | buffer_allocations | third_party/xla/xla/service/gpu/buffer_allocations.cc | third_party/xla/xla/backends/cpu/runtime/buffer_allocations_test.cc | #ifndef XLA_SERVICE_GPU_BUFFER_ALLOCATIONS_H_
#define XLA_SERVICE_GPU_BUFFER_ALLOCATIONS_H_
#include <cstddef>
#include <set>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "xla/service/buffer_assignment.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
namespace xla {
namespace gpu {
class BufferAllocations {
public:
BufferAllocations(absl::Span<se::DeviceMemoryBase const> buffers,
int device_ordinal,
se::DeviceMemoryAllocator* memory_allocator)
: buffers_(buffers.begin(), buffers.end()),
device_ordinal_(device_ordinal),
memory_allocator_(memory_allocator) {}
BufferAllocations(BufferAllocations&& other) = default;
BufferAllocations& operator=(BufferAllocations&& other) = default;
BufferAllocations(const BufferAllocations&) = delete;
BufferAllocations& operator=(const BufferAllocations&) = delete;
se::DeviceMemoryAllocator* memory_allocator() const {
return memory_allocator_;
}
int device_ordinal() const { return device_ordinal_; }
se::DeviceMemoryBase GetDeviceAddress(
BufferAllocation::Index buffer_index) const;
se::DeviceMemoryBase& GetMutableDeviceAddress(
BufferAllocation::Index buffer_index);
se::DeviceMemoryBase GetDeviceAddress(
const BufferAllocation::Slice& buffer_slice) const;
absl::Status TearDown(const std::set<se::DeviceMemoryBase>& live_addresses,
absl::Span<const BufferAllocation> allocations);
std::string ToString() const {
std::string out;
for (BufferAllocation::Index i = 0; i < buffers_.size(); ++i) {
const auto& buf = buffers_[i];
absl::StrAppendFormat(&out, "Buffer %d -> %p (%d B)", i, buf.opaque(),
buf.size());
}
return out;
}
size_t size() const { return buffers_.size(); }
private:
std::vector<se::DeviceMemoryBase> buffers_;
int device_ordinal_;
se::DeviceMemoryAllocator* memory_allocator_;
};
}
}
#endif
#include "xla/service/gpu/buffer_allocations.h"
#include <cstdint>
#include <set>
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "xla/service/buffer_assignment.h"
#include "xla/stream_executor/device_memory.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace gpu {
absl::Status BufferAllocations::TearDown(
const std::set<se::DeviceMemoryBase>& live_addresses,
absl::Span<const BufferAllocation> allocations) {
absl::Status status;
const int64_t num_buffers = allocations.size();
for (BufferAllocation::Index i = 0; i < num_buffers; ++i) {
const BufferAllocation& allocation = allocations[i];
se::DeviceMemoryBase buffer_address = GetDeviceAddress(allocation.index());
if ((allocation.maybe_live_out() &&
!live_addresses.count(buffer_address)) ||
allocation.IsPreallocatedTempBuffer()) {
auto dealloc_result =
memory_allocator_->Deallocate(device_ordinal_, buffer_address);
if (!dealloc_result.ok() && status.ok()) {
status = dealloc_result;
}
}
}
return status;
}
se::DeviceMemoryBase BufferAllocations::GetDeviceAddress(
BufferAllocation::Index buffer_index) const {
CHECK_GE(buffer_index, 0);
CHECK_LT(buffer_index, buffers_.size());
return buffers_[buffer_index];
}
se::DeviceMemoryBase& BufferAllocations::GetMutableDeviceAddress(
BufferAllocation::Index buffer_index) {
CHECK_GE(buffer_index, 0);
CHECK_LT(buffer_index, buffers_.size());
return buffers_[buffer_index];
}
se::DeviceMemoryBase BufferAllocations::GetDeviceAddress(
const BufferAllocation::Slice& buffer_slice) const {
int64_t index = buffer_slice.index();
se::DeviceMemoryBase base = GetDeviceAddress(index);
int64_t offset = buffer_slice.offset();
CHECK_LE(buffer_slice.offset(), base.size())
<< "slice offset " << offset << " must be smaller than buffer #" << index
<< " size " << base.size();
int64_t extent = offset + buffer_slice.size();
CHECK_LE(extent, base.size())
<< "slice extent " << extent << " must be smaller than buffer #" << index
<< " size " << base.size();
return base.GetByteSlice(buffer_slice.offset(), buffer_slice.size());
}
}
} | #include "xla/service/cpu/runtime/buffer_allocations.h"
#include <cstddef>
#include <vector>
#include "xla/service/buffer_assignment.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/stream_executor/device_memory.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
TEST(BufferAllocationsTest, GetDeviceAddress) {
std::vector<MaybeOwningDeviceMemory> buffers;
std::vector<float> data = {1.0, 2.0, 3.0, 4.0};
size_t size_in_bytes = data.size() * sizeof(float);
buffers.emplace_back(se::DeviceMemoryBase(data.data(), size_in_bytes));
BufferAllocations allocations(buffers);
BufferAllocation alloc(0, size_in_bytes, 0);
BufferAllocation::Slice slice(&alloc, 2 * sizeof(float),
sizeof(float));
TF_ASSERT_OK_AND_ASSIGN(se::DeviceMemoryBase alloc_mem,
allocations.GetDeviceAddress(0));
EXPECT_EQ(alloc_mem.opaque(), &data[0]);
TF_ASSERT_OK_AND_ASSIGN(se::DeviceMemoryBase slice_mem,
allocations.GetDeviceAddress(slice));
EXPECT_EQ(slice_mem.opaque(), &data[2]);
}
}
} |
2,064 | cpp | tensorflow/tensorflow | stream_executor_util | third_party/xla/xla/service/gpu/stream_executor_util.cc | third_party/xla/xla/service/gpu/stream_executor_util_test.cc | #ifndef XLA_SERVICE_GPU_STREAM_EXECUTOR_UTIL_H_
#define XLA_SERVICE_GPU_STREAM_EXECUTOR_UTIL_H_
#include <cstdint>
#include <memory>
#include <optional>
#include <string_view>
#include <tuple>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/autotuning.pb.h"
#include "xla/layout.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/hlo_module_config.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/kernel_spec.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
absl::StatusOr<se::dnn::VersionInfo> GetDnnVersionInfo(
stream_executor::StreamExecutor* stream_exec);
se::dnn::VersionInfo GetDnnVersionInfoOrDefault(
stream_executor::StreamExecutor* stream_exec,
se::dnn::VersionInfo fallback_version = se::dnn::VersionInfo{0, 0, 0});
absl::StatusOr<std::tuple<Layout, Layout, Layout>>
StreamExecutorConvLayoutsToXlaLayouts(const ConvolutionDimensionNumbers& dnums,
se::dnn::DataLayout input,
se::dnn::FilterLayout filter,
se::dnn::DataLayout output);
absl::StatusOr<
std::tuple<se::dnn::DataLayout, se::dnn::FilterLayout, se::dnn::DataLayout>>
XlaConvShapesToStreamExecutorLayouts(const ConvolutionDimensionNumbers& dnums,
const Shape& input, const Shape& filter,
const Shape& output);
std::tuple<std::optional<int64_t>, std::optional<int64_t>,
std::optional<int64_t>>
FindVectorizedFeatureDims(const ConvolutionDimensionNumbers& dnums,
const Shape& input, const Shape& filter,
const Shape& output);
absl::Mutex& GetGpuMutex(const se::StreamExecutor* stream_exec);
absl::StatusOr<std::unique_ptr<se::Kernel>> CreateKernel(
absl::string_view kernel_name, uint64_t num_args, absl::string_view ptx,
absl::Span<const uint8_t> cubin_data, se::StreamExecutor* stream_exec,
uint32_t shared_mem_bytes = 0);
absl::Status ExecuteKernelOnStream(const se::Kernel& kernel,
absl::Span<const se::DeviceMemoryBase> args,
const LaunchDimensions& dims,
se::Stream* stream);
absl::Status ExecuteKernelOnStream(const se::Kernel& kernel,
absl::Span<const se::DeviceMemoryBase> args,
const LaunchDimensions& dims,
const se::ClusterDim& cluster_dim,
se::Stream* stream);
void InitializeBuffer(se::Stream* stream, PrimitiveType buffer_type,
int64_t* rng_state, se::DeviceMemoryBase buffer);
absl::StatusOr<se::dnn::ConvolutionKind> GetDNNConvKindFromCudnnConvKind(
CudnnConvKind kind);
absl::StatusOr<se::dnn::NormKind> GetDNNNormKindFromCudnnNormKind(
CudnnNormKind kind);
absl::StatusOr<se::dnn::FMHAMaskKind> GetDNNFmhaMaskKindFromCudnnFmhaMaskKind(
CudnnfMHAMaskKind kind);
absl::StatusOr<se::dnn::DataType> GetDNNDataTypeFromPrimitiveType(
PrimitiveType type);
absl::StatusOr<AutotuneResult> PickBestResult(
absl::Span<AutotuneResult const> profile_results,
std::optional<std::string_view> instr_str,
HloModuleConfig hlo_module_config);
bool RequireDeterminism(const HloModuleConfig& config);
}
}
#endif
#include "xla/service/gpu/stream_executor_util.h"
#include <cstdint>
#include <iterator>
#include <limits>
#include <map>
#include <memory>
#include <optional>
#include <random>
#include <sstream>
#include <string_view>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/const_init.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "Eigen/Core"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/data_type.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/kernel_factory.h"
#include "xla/stream_executor/kernel_spec.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/typed_kernel_factory.h"
#include "xla/tsl/util/env_var.h"
#include "xla/tsl/util/proto/proto_utils.h"
#include "xla/util.h"
#include "tsl/platform/ml_dtypes.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
absl::StatusOr<se::dnn::VersionInfo> GetDnnVersionInfo(
stream_executor::StreamExecutor* stream_exec) {
if (!stream_exec) {
return absl::InvalidArgumentError("StreamExecutor is null");
}
stream_executor::dnn::DnnSupport* dnn = stream_exec->AsDnn();
if (!dnn) {
return absl::FailedPreconditionError(
"DNN library initialization failed. Look at the errors above for more "
"details.");
}
return dnn->GetVersion();
}
se::dnn::VersionInfo GetDnnVersionInfoOrDefault(
stream_executor::StreamExecutor* stream_exec,
se::dnn::VersionInfo fallback_version) {
return GetDnnVersionInfo(stream_exec).value_or(fallback_version);
}
namespace {
using se::dnn::DataLayout;
using se::dnn::DataLayoutString;
using se::dnn::FilterLayout;
using se::dnn::FilterLayoutString;
int64_t FindMissingDnum(absl::Span<const int64_t> vals) {
for (int i = 0; i < vals.size(); i++) {
if (!absl::c_linear_search(vals, i)) {
return i;
}
}
return vals.size();
}
absl::StatusOr<Layout> DataLayoutToXlaLayout(
DataLayout data_layout, int64_t batch_dimension, int64_t feature_dimension,
absl::Span<int64_t const> spatial_dimensions) {
std::vector<int64_t> layout;
switch (data_layout) {
case DataLayout::kBatchDepthYX:
layout.push_back(batch_dimension);
layout.push_back(feature_dimension);
layout.insert(layout.end(), spatial_dimensions.begin(),
spatial_dimensions.end());
break;
case DataLayout::kBatchDepthYX4:
case DataLayout::kBatchDepthYX32:
layout.push_back(batch_dimension);
layout.push_back(feature_dimension);
layout.insert(layout.end(), spatial_dimensions.begin(),
spatial_dimensions.end());
layout.push_back(FindMissingDnum(layout));
break;
case DataLayout::kBatchYXDepth:
layout.push_back(batch_dimension);
layout.insert(layout.end(), spatial_dimensions.begin(),
spatial_dimensions.end());
layout.push_back(feature_dimension);
break;
default:
return Internal("Invalid layout %s", DataLayoutString(data_layout));
}
return LayoutUtil::MakeLayoutFromMajorToMinor(layout);
}
}
absl::StatusOr<std::tuple<Layout, Layout, Layout>>
StreamExecutorConvLayoutsToXlaLayouts(const ConvolutionDimensionNumbers& dnums,
DataLayout input, FilterLayout filter,
DataLayout output) {
TF_ASSIGN_OR_RETURN(
Layout input_layout,
DataLayoutToXlaLayout(input, dnums.input_batch_dimension(),
dnums.input_feature_dimension(),
dnums.input_spatial_dimensions()));
TF_ASSIGN_OR_RETURN(
Layout output_layout,
DataLayoutToXlaLayout(input, dnums.output_batch_dimension(),
dnums.output_feature_dimension(),
dnums.output_spatial_dimensions()));
std::vector<int64_t> filter_layout;
switch (filter) {
case FilterLayout::kOutputInputYX:
filter_layout.push_back(dnums.kernel_output_feature_dimension());
filter_layout.push_back(dnums.kernel_input_feature_dimension());
filter_layout.insert(filter_layout.end(),
dnums.kernel_spatial_dimensions().begin(),
dnums.kernel_spatial_dimensions().end());
break;
case FilterLayout::kOutputInputYX4:
filter_layout.push_back(dnums.kernel_output_feature_dimension());
filter_layout.push_back(dnums.kernel_input_feature_dimension());
filter_layout.insert(filter_layout.end(),
dnums.kernel_spatial_dimensions().begin(),
dnums.kernel_spatial_dimensions().end());
filter_layout.push_back(FindMissingDnum(filter_layout));
break;
case FilterLayout::kOutputYXInput:
filter_layout.push_back(dnums.kernel_output_feature_dimension());
filter_layout.insert(filter_layout.end(),
dnums.kernel_spatial_dimensions().begin(),
dnums.kernel_spatial_dimensions().end());
filter_layout.push_back(dnums.kernel_input_feature_dimension());
break;
default:
return Internal("Invalid filter layout %s for conv with dnums %s,",
FilterLayoutString(filter),
ConvolutionDimensionNumbersToString(dnums));
}
return std::make_tuple(input_layout,
LayoutUtil::MakeLayoutFromMajorToMinor(filter_layout),
output_layout);
}
absl::StatusOr<std::tuple<DataLayout, FilterLayout, DataLayout>>
XlaConvShapesToStreamExecutorLayouts(const ConvolutionDimensionNumbers& dnums,
const Shape& input, const Shape& filter,
const Shape& output) {
CHECK(input.has_layout());
CHECK(filter.has_layout());
CHECK(output.has_layout());
Layout nchw_input, nchw_filter, nchw_output;
std::tie(nchw_input, nchw_filter, nchw_output) =
StreamExecutorConvLayoutsToXlaLayouts(dnums, DataLayout::kBatchDepthYX,
FilterLayout::kOutputInputYX,
DataLayout::kBatchDepthYX)
.value();
Layout nchw_vect_input, nchw_vect_filter, nchw_vect_output;
std::tie(nchw_vect_input, nchw_vect_filter, nchw_vect_output) =
StreamExecutorConvLayoutsToXlaLayouts(dnums, DataLayout::kBatchDepthYX4,
FilterLayout::kOutputInputYX4,
DataLayout::kBatchDepthYX4)
.value();
Layout nhwc_input, nhwc_filter, nhwc_output;
std::tie(nhwc_input, nhwc_filter, nhwc_output) =
StreamExecutorConvLayoutsToXlaLayouts(dnums, DataLayout::kBatchYXDepth,
FilterLayout::kOutputYXInput,
DataLayout::kBatchYXDepth)
.value();
DataLayout input_layout;
if (LayoutUtil::Equal(input.layout(), nchw_input)) {
input_layout = DataLayout::kBatchDepthYX;
} else if (LayoutUtil::Equal(input.layout(), nchw_vect_input)) {
int64_t vect_size = input.dimensions(input.layout().minor_to_major(0));
if (vect_size == 4) {
input_layout = DataLayout::kBatchDepthYX4;
} else if (vect_size == 32) {
input_layout = DataLayout::kBatchDepthYX32;
} else {
return Internal(
"Invalid input shape %s for conv with dnums %s. Most-minor dim "
"should be 4 or 32, but was %d.",
ShapeUtil::HumanStringWithLayout(input),
ConvolutionDimensionNumbersToString(dnums), vect_size);
}
} else if (LayoutUtil::Equal(input.layout(), nhwc_input)) {
input_layout = DataLayout::kBatchYXDepth;
} else {
return Internal(
"Invalid input layout %s for conv with dnums %s; expected one of (%s, "
"%s, %s)",
LayoutUtil::HumanString(input.layout()),
ConvolutionDimensionNumbersToString(dnums), nchw_input.ToString(),
nchw_vect_input.ToString(), nhwc_input.ToString());
}
FilterLayout filter_layout;
if (LayoutUtil::Equal(filter.layout(), nchw_filter)) {
filter_layout = FilterLayout::kOutputInputYX;
} else if (LayoutUtil::Equal(filter.layout(), nchw_vect_filter)) {
int64_t vect_size = filter.dimensions(filter.layout().minor_to_major(0));
if (vect_size == 4) {
filter_layout = FilterLayout::kOutputInputYX4;
} else if (vect_size == 32) {
filter_layout = FilterLayout::kOutputInputYX32;
} else {
return Internal(
"Invalid filter shape %s for conv with dnums %s. Most-minor dim "
"should be 4 or 32, but was %d.",
ShapeUtil::HumanStringWithLayout(filter),
ConvolutionDimensionNumbersToString(dnums), vect_size);
}
} else if (LayoutUtil::Equal(filter.layout(), nhwc_filter)) {
filter_layout = FilterLayout::kOutputYXInput;
} else {
return Internal(
"Invalid filter layout %s for conv with dnums %s, expected one of (%s, "
"%s, %s)",
LayoutUtil::HumanString(filter.layout()),
ConvolutionDimensionNumbersToString(dnums), nchw_filter.ToString(),
nchw_vect_filter.ToString(), nhwc_filter.ToString());
}
DataLayout output_layout;
if (LayoutUtil::Equal(output.layout(), nchw_output)) {
output_layout = DataLayout::kBatchDepthYX;
} else if (LayoutUtil::Equal(output.layout(), nchw_vect_output)) {
int64_t vect_size = output.dimensions(output.layout().minor_to_major(0));
if (vect_size == 4) {
output_layout = DataLayout::kBatchDepthYX4;
} else if (vect_size == 32) {
output_layout = DataLayout::kBatchDepthYX32;
} else {
return Internal(
"Invalid output shape %s for conv with dnums %s. Most-minor dim "
"should be 4 or 32, but was %d.",
ShapeUtil::HumanStringWithLayout(output),
ConvolutionDimensionNumbersToString(dnums), vect_size);
}
} else if (LayoutUtil::Equal(output.layout(), nhwc_output)) {
output_layout = DataLayout::kBatchYXDepth;
} else {
return Internal("Invalid output layout %s for conv with dnums %s",
LayoutUtil::HumanString(output.layout()),
ConvolutionDimensionNumbersToString(dnums));
}
return std::make_tuple(input_layout, filter_layout, output_layout);
}
static std::optional<int64_t> FindVectorizedDim(int64_t rank, int64_t d0,
int64_t d1,
absl::Span<const int64_t> ds) {
for (int64_t i = 0; i < rank; i++) {
if (i == d0 || i == d1 || absl::c_linear_search(ds, i)) {
continue;
}
return i;
}
return std::nullopt;
}
std::tuple<std::optional<int64_t>, std::optional<int64_t>,
std::optional<int64_t>>
FindVectorizedFeatureDims(const ConvolutionDimensionNumbers& dnums,
const Shape& input, const Shape& filter,
const Shape& output) {
return {
FindVectorizedDim(input.dimensions_size(), dnums.input_batch_dimension(),
dnums.input_feature_dimension(),
dnums.input_spatial_dimensions()),
FindVectorizedDim(filter.dimensions_size(),
dnums.kernel_input_feature_dimension(),
dnums.kernel_output_feature_dimension(),
dnums.kernel_spatial_dimensions()),
FindVectorizedDim(
output.dimensions_size(), dnums.output_batch_dimension(),
dnums.output_feature_dimension(), dnums.output_spatial_dimensions()),
};
}
absl::Mutex& GetGpuMutex(const se::StreamExecutor* stream_exec) {
static absl::Mutex mu(absl::kConstInit);
static auto* mutexes =
new std::map<std::pair<const se::Platform*, int64_t>,
absl::Mutex>();
absl::MutexLock global_lock(&mu);
auto it = mutexes
->emplace(std::piecewise_construct,
std::make_tuple(stream_exec->GetPlatform(),
stream_exec->device_ordinal()),
std::make_tuple())
.first;
return it->second;
}
absl::StatusOr<std::unique_ptr<se::Kernel>> CreateKernel(
absl::string_view kernel_name, uint64_t num_args, absl::string_view ptx,
absl::Span<const uint8_t> cubin_data, se::StreamExecutor* stream_exec,
uint32_t shared_mem_bytes) {
se::MultiKernelLoaderSpec loader_spec(num_args);
loader_spec.AddCudaPtxInMemory(ptx, kernel_name);
if (!cubin_data.empty()) {
loader_spec.AddCudaCubinInMemory(cubin_data, kernel_name);
}
TF_ASSIGN_OR_RETURN(std::unique_ptr<se::Kernel> kernel,
se::KernelFactory::Create(stream_exec, loader_spec));
se::KernelMetadata m;
m.set_shared_memory_bytes(shared_mem_bytes);
kernel->set_metadata(m);
return kernel;
}
absl::Status ExecuteKernelOnStream(const se::Kernel& kernel,
absl::Span<const se::DeviceMemoryBase> args,
const LaunchDimensions& dims,
se::Stream* stream) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<se::KernelArgsPackedArrayBase> kernel_args,
se::PackKernelArgs(args, kernel.metadata()));
return stream->Launch(dims.thread_counts_per_block(), dims.block_counts(),
kernel, *kernel_args);
}
absl::Status ExecuteKernelOnStream(const se::Kernel& kernel,
absl::Span<const se::DeviceMemoryBase> args,
const LaunchDimensions& dims,
const se::ClusterDim& cluster_dim,
se::Stream* stream) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<se::KernelArgsPackedArrayBase> kernel_args,
se::PackKernelArgs(args, kernel.metadata()));
return stream->Launch(dims.thread_counts_per_block(), dims.block_counts(),
cluster_dim, kernel, *kernel_args);
}
template <typename T, typename Generator>
typename std::enable_if<std::is_integral<T>::value,
T>::type static UniformDistribution(T lhs, T rhs,
Generator* gen) =
delete;
template <typename T, typename Generator>
typename std::enable_if<std::is_floating_point<T>::value,
T>::type static UniformDistribution(T lhs, T rhs,
Generator* gen) {
return std::uniform_real_distribution<T>(lhs, rhs)(*gen);
}
namespace repeat_buffer_kernel {
void* kernel();
}
template <typename T>
static void InitializeTypedBuffer(se::Stream* stream,
se::DeviceMemoryBase buffer,
int64_t* rng_state) {
constexpr int host_buffer_size = 10069;
static std::vector<T>* host_buffer = [] {
auto* ret = new std::vector<T>(host_buffer_size);
std::mt19937 gen;
for (auto& element : *ret) {
constexpr bool kIsIntegral = std::numeric_limits<T>::is_integer;
constexpr bool kIsLowRange =
!kIsIntegral && std::numeric_limits<T>::max_exponent <=
std::numeric_limits<Eigen::half>::max_exponent;
using RandomType = typename std::conditional<std::is_same_v<T, double>,
double, float>::type;
auto upper_bound = RandomType(kIsLowRange ? 0.1 : 1.0);
auto rand_val = UniformDistribution(RandomType(0), upper_bound, &gen);
element = T(kIsIntegral ? rand_val + 0.5 : rand_val);
}
return ret;
}();
CHECK_EQ(0, buffer.size() % sizeof(T));
int64_t elements_to_fill = buffer.size() / sizeof(T);
int64_t host_index = *rng_state;
CHECK_LT(host_index, host_buffer_size);
*rng_state = (*rng_state + elements_to_fill) % host_buffer_size;
int64_t first_size =
std::min<int64_t>(host_buffer_size - host_index, elements_to_fill);
TF_CHECK_OK(stream->Memcpy(&buffer, host_buffer->data() + host_index,
first_size * sizeof(T)));
elements_to_fill -= first_size;
if (elements_to_fill == 0) {
return;
}
int64_t second_size = std::min<int64_t>(host_index, elements_to_fill);
CHECK_LE(first_size + second_size, host_buffer_size);
se::DeviceMemoryBase mem =
buffer.GetByteSlice(first_size * sizeof(T), second_size * sizeof(T));
TF_CHECK_OK(stream->Memcpy(&mem, host_buffer->data(), mem.size()));
elements_to_fill -= second_size;
if (elements_to_fill == 0) {
return;
}
#ifdef GOOGLE_CUDA
CHECK_EQ(elements_to_fill, buffer.size() / sizeof(T) - host_buffer_size);
se::StreamExecutor* executor = stream->parent();
auto kernel =
se::TypedKernelFactory<se::DeviceMemoryBase, int64_t, int64_t>::Create(
executor, "RepeatBufferKernel", repeat_buffer_kernel::kernel());
if (!kernel.ok()) {
LOG(FATAL) << "Could not create RepeatBufferKernel: " << kernel.status();
}
constexpr int64_t host_buffer_bytes = host_buffer_size * sizeof(T);
constexpr int threads_per_block = 256;
constexpr int blocks_per_grid =
(host_buffer_bytes + threads_per_block - 1) / threads_per_block;
TF_CHECK_OK(stream->ThenLaunch(se::ThreadDim(threads_per_block, 1, 1),
se::BlockDim(blocks_per_grid, 1, 1), *kernel,
buffer, host_buffer_bytes,
static_cast<int64_t>(buffer.size())));
#endif
}
void InitializeBuffer(se::Stream* stream, PrimitiveType buffer_type,
int64_t* rng_state, se::DeviceMemoryBase buffer) {
return primitive_util::PrimitiveTypeSwitch<void>(
[&](auto primitive_type_constant) -> void {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant) ||
primitive_util::IsIntegralType(primitive_type_constant)) {
using NativeT = typename primitive_util::PrimitiveTypeToNative<
primitive_type_constant>::type;
return InitializeTypedBuffer<NativeT>(stream, buffer, rng_state);
}
if constexpr (primitive_util::IsComplexType(primitive_type_constant)) {
using NativeT = typename primitive_util::PrimitiveTypeToNative<
primitive_type_constant>::type;
return InitializeTypedBuffer<typename NativeT::value_type>(
stream, buffer, rng_state);
}
if constexpr (primitive_type_constant == PRED) {
return InitializeTypedBuffer<int8_t>(stream, buffer, rng_state);
}
LOG(FATAL) << "Unexpected type: "
<< primitive_util::LowercasePrimitiveTypeName(buffer_type);
},
buffer_type);
}
absl::StatusOr<se::dnn::ConvolutionKind> GetDNNConvKindFromCudnnConvKind(
CudnnConvKind kind) {
switch (kind) {
case CudnnConvKind::kBackwardFilter:
return se::dnn::BACKWARD_FILTER;
case CudnnConvKind::kBackwardInput:
return se::dnn::BACKWARD_DATA;
case CudnnConvKind::kForward:
return se::dnn::FORWARD;
case CudnnConvKind::kForwardActivation:
return se::dnn::FORWARD_BIAS_ACTIVATION;
case CudnnConvKind::kForwardGraph:
return se::dnn::FORWARD_GRAPH;
default:
break;
}
return Internal("Unexpected convolution kind");
}
absl::StatusOr<se::dnn::NormKind> GetDNNNormKindFromCudnnNormKind(
CudnnNormKind kind) {
switch (kind) {
case CudnnNormKind::kLayerForwardInfer:
return se::dnn::LAYER_FWD_INFER;
case CudnnNormKind::kLayerForwardTrain:
return se::dnn::LAYER_FWD_TRAIN;
case CudnnNormKind::kLayerBackward:
return se::dnn::LAYER_BWD;
default:
return Internal("Unexpected norm kind");
}
}
absl::StatusOr<se::dnn::FMHAMaskKind> GetDNNFmhaMaskKindFromCudnnFmhaMaskKind(
CudnnfMHAMaskKind kind) {
switch (kind) {
case CudnnfMHAMaskKind::kNoMask:
return se::dnn::NO_MASK;
case CudnnfMHAMaskKind::kPadding:
return se::dnn::PADDING;
case CudnnfMHAMaskKind::kCausal:
return se::dnn::CAUSAL;
case CudnnfMHAMaskKind::kPaddingCausal:
return se::dnn::PADDING_CAUSAL;
case CudnnfMHAMaskKind::kAlibi:
return se::dnn::ALIBI;
default:
return Internal("Unexpected fmha mask kind");
}
}
absl::StatusOr<se::dnn::DataType> GetDNNDataTypeFromPrimitiveType(
PrimitiveType type) {
switch (type) {
case F16:
return se::dnn::ToDataType<Eigen::half>::value;
case F32:
return se::dnn::ToDataType<float>::value;
case F64:
return se::dnn::ToDataType<double>::value;
case S8:
return se::dnn::ToDataType<int8_t>::value;
case S32:
return se::dnn::ToDataType<int32_t>::value;
case BF16:
return se::dnn::ToDataType<Eigen::bfloat16>::value;
case F8E4M3FN:
return se::dnn::ToDataType<tsl::float8_e4m3fn>::value;
case F8E5M2:
return se::dnn::ToDataType<tsl::float8_e5m2>::value;
default:
break;
}
return Internal("Unsupported datatype");
}
bool RequireDeterminism(const HloModuleConfig& config) {
static bool require_cudnn_determinism = [] {
bool cudnn_deterministic = false;
TF_CHECK_OK(tsl::ReadBoolFromEnvVar("TF_CUDNN_DETERMINISTIC",
false,
&cudnn_deterministic));
return cudnn_deterministic;
}();
return require_cudnn_determinism ||
config.debug_options().xla_gpu_deterministic_ops();
}
namespace {
std::vector<AutotuneResult> KeepNonFailures(
absl::Span<AutotuneResult const> profile_results) { | #include "xla/service/gpu/stream_executor_util.h"
#include <cstdint>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "absl/time/time.h"
#include "xla/autotuning.pb.h"
#include "xla/service/hlo_module_config.h"
#include "xla/tsl/util/proto/proto_utils.h"
namespace xla::gpu {
namespace {
struct Result {
int64_t run_time_ns;
int64_t scratch_bytes;
bool operator==(const Result& other) const {
return other.run_time_ns == run_time_ns &&
other.scratch_bytes == scratch_bytes;
};
explicit operator AutotuneResult() const {
AutotuneResult result;
*result.mutable_run_time() =
tsl::proto_utils::ToDurationProto(absl::Nanoseconds(run_time_ns));
result.set_scratch_bytes(scratch_bytes);
return result;
}
};
static Result ATRToResult(AutotuneResult atr) {
return Result{.run_time_ns = absl::ToInt64Nanoseconds(
tsl::proto_utils::FromDurationProto(atr.run_time())),
.scratch_bytes = atr.scratch_bytes()};
}
std::vector<AutotuneResult> Results(const std::vector<Result>& stats) {
std::vector<AutotuneResult> results;
for (const auto& s : stats) results.push_back(AutotuneResult(s));
return results;
}
TEST(StreamExecutorTest, PickBestResult) {
absl::StatusOr<AutotuneResult> atr;
atr = PickBestResult(Results({{9000, 0}, {1000, 0}, {16000, 0}}), "", {});
EXPECT_EQ(ATRToResult(atr.value()), Result({1000, 0}));
atr = PickBestResult(Results({{4700, 0}, {4600, 0}, {4500, 0}}), "", {});
EXPECT_EQ(ATRToResult(atr.value()), Result({4500, 0}));
atr = PickBestResult(Results({{4700, 0}, {4600, 2}, {4500, 1}}), "", {});
EXPECT_EQ(ATRToResult(atr.value()), Result({4700, 0}));
atr = PickBestResult(Results({{5000, 1}, {6000, 0}, {7500, 0}}), "", {});
EXPECT_EQ(ATRToResult(atr.value()), Result({6000, 0}));
}
}
} |
2,065 | cpp | tensorflow/tensorflow | autotuner_compile_util | third_party/xla/xla/service/gpu/autotuning/autotuner_compile_util.cc | third_party/xla/xla/service/gpu/autotuning/autotuner_compile_util_test.cc | #ifndef XLA_SERVICE_GPU_AUTOTUNER_COMPILE_UTIL_H_
#define XLA_SERVICE_GPU_AUTOTUNER_COMPILE_UTIL_H_
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/functional/any_invocable.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/compiler.h"
#include "xla/service/executable.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/shaped_buffer.h"
#include "xla/shape.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/gpu/redzone_allocator.h"
#include "xla/stream_executor/stream.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
namespace xla {
namespace gpu {
class AutotunerCompileUtil {
public:
using GenerateModuleFn =
absl::AnyInvocable<absl::StatusOr<std::unique_ptr<HloModule>>(
const DebugOptions&)>;
static absl::StatusOr<std::optional<AutotunerCompileUtil>> Create(
const AutotuneConfig& config, const DebugOptions& opts);
struct ProfilingOutput {
ProfilingOutput(absl::Duration duration, ScopedShapedBuffer&& buffer)
: duration(duration), output(std::move(buffer)) {}
absl::Duration duration;
ScopedShapedBuffer output;
};
absl::StatusOr<std::optional<ProfilingOutput>> ProfileExecutable(
Executable* executable, se::Stream* stream,
absl::Span<se::DeviceMemoryBase const> input_buffers,
absl::Span<Shape const> input_shapes);
absl::StatusOr<std::unique_ptr<Executable>> Compile(
GenerateModuleFn extractor);
absl::StatusOr<std::unique_ptr<HloModule>> ExtractModule(
GenerateModuleFn extractor);
private:
AutotunerCompileUtil(const AutotuneConfig& config, Compiler* compiler,
se::StreamExecutor& stream_executor, se::Stream& stream,
se::DeviceMemoryAllocator& allocator,
const DebugOptions& opts);
absl::StatusOr<ExecutionOutput> Execute(Executable& executable,
std::vector<ExecutionInput> arguments,
ExecutionProfile* profile = nullptr);
AutotuneConfig config_;
Compiler* compiler_;
se::StreamExecutor& stream_executor_;
se::Stream& stream_;
se::DeviceMemoryAllocator& allocator_;
DebugOptions opts_;
};
class RedzoneBuffers {
public:
enum BuffersToCreate {
kAllInputs = 0,
kAllInputsAllOutputs = 1,
kAllInputsOutputsNoScratch = 2,
};
static absl::StatusOr<RedzoneBuffers> FromInstruction(
const HloInstruction& instruction, const AutotuneConfig& config,
const DebugOptions& debug_options, BuffersToCreate buffers_to_create);
const std::vector<se::DeviceMemoryBase>& input_buffers() const {
return input_buffers_;
}
const std::vector<Shape>& input_shapes() const { return input_shapes_; }
const std::vector<se::DeviceMemoryBase>& output_buffers() const {
return output_buffers_;
}
const Shape& output_shape() const { return output_shape_; }
se::RedzoneAllocator& RedzoneAllocator() const { return *redzone_allocator_; }
private:
absl::Status CreateInputs(const HloInstruction& instruction,
const AutotuneConfig& config,
const DebugOptions& debug_options,
int64_t& rng_state);
absl::Status CreateOutputs(const HloInstruction& instruction,
const AutotuneConfig& config,
const DebugOptions& debug_options,
BuffersToCreate buffers_to_create,
int64_t& rng_state);
std::unique_ptr<se::RedzoneAllocator> redzone_allocator_;
std::vector<se::DeviceMemoryBase> input_buffers_;
std::vector<Shape> input_shapes_;
std::vector<se::DeviceMemoryBase> output_buffers_;
Shape output_shape_;
};
}
}
#endif
#include "xla/service/gpu/autotuner_compile_util.h"
#include <cstdint>
#include <iterator>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "xla/executable_run_options.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/compiler.h"
#include "xla/service/executable.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/gpu/gpu_executable_run_options.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/service/service_executable_run_options.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/gpu/redzone_allocator.h"
#include "xla/stream_executor/stream.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
std::vector<ExecutionInput> ExecutionInputsFromBuffers(
absl::Span<se::DeviceMemoryBase const> buffers,
absl::Span<Shape const> shapes) {
CHECK_EQ(buffers.size(), shapes.size());
std::vector<ExecutionInput> inputs;
for (int i = 0; i < buffers.size(); ++i) {
inputs.emplace_back(shapes.at(i));
inputs.back().SetUnownedBuffer(
{}, MaybeOwningDeviceMemory(buffers.at(i)));
}
return inputs;
}
}
AutotunerCompileUtil::AutotunerCompileUtil(const AutotuneConfig& config,
Compiler* compiler,
se::StreamExecutor& stream_executor,
se::Stream& stream,
se::DeviceMemoryAllocator& allocator,
const DebugOptions& opts)
: config_(config),
compiler_(compiler),
stream_executor_(stream_executor),
stream_(stream),
allocator_(allocator),
opts_(opts) {
opts_.set_xla_enable_dumping(false);
opts_.set_xla_gpu_dump_autotune_results_to("");
opts_.set_xla_gpu_load_autotune_results_from("");
opts_.set_xla_gpu_dump_llvmir(false);
opts_.set_xla_gpu_dump_autotune_logs_to("");
opts_.set_xla_gpu_force_compilation_parallelism(1);
opts_.set_xla_gpu_enable_llvm_module_compilation_parallelism(false);
opts_.clear_xla_gpu_enable_command_buffer();
opts_.set_xla_embed_ir_in_executable(false);
opts_.set_xla_gpu_kernel_cache_file("");
}
absl::StatusOr<std::optional<AutotunerCompileUtil::ProfilingOutput>>
AutotunerCompileUtil::ProfileExecutable(
Executable* executable, se::Stream* stream,
absl::Span<se::DeviceMemoryBase const> input_buffers,
absl::Span<Shape const> input_shapes) {
{
std::vector<ExecutionInput> execution_inputs =
ExecutionInputsFromBuffers(input_buffers, input_shapes);
absl::StatusOr<ExecutionOutput> execution_output =
Execute(*executable, std::move(execution_inputs));
if (!execution_output.ok()) {
if (execution_output.status().code() ==
absl::StatusCode::kResourceExhausted) {
return {std::nullopt};
}
return execution_output.status();
}
TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());
}
std::vector<ExecutionInput> execution_inputs =
ExecutionInputsFromBuffers(input_buffers, input_shapes);
ExecutionProfile profile;
profile.set_warmup_run_executed(true);
TF_ASSIGN_OR_RETURN(
ExecutionOutput execution_output,
Execute(*executable, std::move(execution_inputs), &profile));
return std::make_optional<ProfilingOutput>(
absl::Nanoseconds(profile.compute_time_ns()),
execution_output.Commit().ConsumeResult());
}
absl::StatusOr<std::unique_ptr<Executable>> AutotunerCompileUtil::Compile(
GenerateModuleFn extractor) {
absl::StatusOr<std::unique_ptr<HloModule>> new_hlo_module = extractor(opts_);
if (new_hlo_module.status().GetPayload(kUncompilableFusion).has_value()) {
return std::unique_ptr<Executable>();
} else if (!new_hlo_module.status().ok()) {
return new_hlo_module.status();
}
absl::StatusOr<std::unique_ptr<Executable>> out = compiler_->RunBackend(
std::move(*new_hlo_module), &stream_executor_,
Compiler::CompileOptions{&allocator_, nullptr,
{},
true});
if (out.status().code() == absl::StatusCode::kResourceExhausted ||
out.status().code() == absl::StatusCode::kCancelled) {
return std::unique_ptr<Executable>();
}
return out;
}
absl::StatusOr<std::unique_ptr<HloModule>> AutotunerCompileUtil::ExtractModule(
GenerateModuleFn extractor) {
return extractor(opts_);
}
absl::StatusOr<std::optional<AutotunerCompileUtil>>
AutotunerCompileUtil::Create(const AutotuneConfig& config,
const DebugOptions& opts) {
if (config.IsDeviceless()) {
return std::nullopt;
}
se::StreamExecutor* stream_exec = config.GetExecutor();
se::DeviceMemoryAllocator* allocator = config.GetAllocator();
TF_ASSIGN_OR_RETURN(se::Stream* const stream, config.GetStream());
TF_ASSIGN_OR_RETURN(Compiler * compiler,
Compiler::GetForPlatform(stream_exec->GetPlatform()));
return AutotunerCompileUtil(config, compiler, *stream_exec, *stream,
*allocator, opts);
}
absl::StatusOr<ExecutionOutput> AutotunerCompileUtil::Execute(
Executable& executable, std::vector<ExecutionInput> arguments,
ExecutionProfile* profile) {
GpuExecutableRunOptions gpu_opts;
gpu_opts.set_requires_exclusive_lock_on_gpu();
ExecutableRunOptions run_options;
run_options.set_device_ordinal(stream_executor_.device_ordinal());
run_options.set_stream(&stream_);
run_options.set_allocator(&allocator_);
run_options.set_gpu_executable_run_options(&gpu_opts);
run_options.set_execution_profile(profile);
ServiceExecutableRunOptions service_run_options(run_options);
TF_ASSIGN_OR_RETURN(ExecutionOutput output,
executable.ExecuteAsyncOnStreamWrapper(
&service_run_options, std::move(arguments)));
return std::move(output);
}
absl::StatusOr<RedzoneBuffers> RedzoneBuffers::FromInstruction(
const HloInstruction& instruction, const AutotuneConfig& config,
const DebugOptions& debug_options, BuffersToCreate buffers_to_create) {
RedzoneBuffers buffers;
TF_ASSIGN_OR_RETURN(auto rz_allocator, AutotunerUtil::CreateRedzoneAllocator(
config, debug_options));
buffers.redzone_allocator_ =
std::make_unique<se::RedzoneAllocator>(std::move(rz_allocator));
int64_t rng_state = 0;
TF_RETURN_IF_ERROR(
buffers.CreateInputs(instruction, config, debug_options, rng_state));
if (buffers_to_create == BuffersToCreate::kAllInputsAllOutputs ||
buffers_to_create == BuffersToCreate::kAllInputsOutputsNoScratch) {
TF_RETURN_IF_ERROR(buffers.CreateOutputs(instruction, config, debug_options,
buffers_to_create, rng_state));
}
return buffers;
}
absl::Status RedzoneBuffers::CreateInputs(const HloInstruction& instruction,
const AutotuneConfig& config,
const DebugOptions& debug_options,
int64_t& rng_state) {
for (const auto* operand : instruction.operands()) {
TF_ASSIGN_OR_RETURN(
se::DeviceMemoryBase buf,
AutotunerUtil::CreateBuffer(*redzone_allocator_, operand->shape(),
config, rng_state));
input_buffers_.push_back(buf);
input_shapes_.push_back(operand->shape());
}
return absl::OkStatus();
}
absl::Status RedzoneBuffers::CreateOutputs(const HloInstruction& instruction,
const AutotuneConfig& config,
const DebugOptions& debug_options,
BuffersToCreate buffers_to_create,
int64_t& rng_state) {
if (!instruction.shape().IsTuple()) {
TF_ASSIGN_OR_RETURN(
se::DeviceMemoryBase buf,
AutotunerUtil::CreateBuffer(*redzone_allocator_, instruction.shape(),
config, rng_state));
output_buffers_.push_back(buf);
output_shape_ = instruction.shape();
return absl::OkStatus();
}
auto current_shape_it = instruction.shape().tuple_shapes().begin();
auto end = instruction.shape().tuple_shapes().end();
end -= buffers_to_create == kAllInputsAllOutputs ? 0 : 1;
output_shape_ = std::distance(current_shape_it, end) == 1
? output_shape_ = *current_shape_it
: ShapeUtil::MakeTupleShape(
std::vector<Shape>{current_shape_it, end});
for (; current_shape_it < end; current_shape_it++) {
if (current_shape_it->IsTuple()) {
return Unimplemented("Nested tuples are unsupported by RedzoneBuffers.");
}
TF_ASSIGN_OR_RETURN(
se::DeviceMemoryBase buf,
AutotunerUtil::CreateBuffer(*redzone_allocator_, *current_shape_it,
config, rng_state));
output_buffers_.push_back(buf);
}
return absl::OkStatus();
}
}
} | #include "xla/service/gpu/autotuner_compile_util.h"
#include <vector>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/platform_util.h"
#include "xla/stream_executor/platform.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
using AutotunerCompileUtilTest = HloTestBase;
TEST_F(AutotunerCompileUtilTest, VerifyOutputNotATuple) {
constexpr absl::string_view kHlo = R"(
HloModule hlo
ENTRY main {
p0 = f32[2,2] parameter(0)
p1 = f32[4,4] parameter(1)
p2 = f32[6,6] parameter(2)
ROOT root = f32[1,2,3] custom-call(p0, p1, p2), custom_call_target="fake"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, GetOptimizedModule(kHlo));
se::Platform* platform = PlatformUtil::GetDefaultPlatform().value();
TF_ASSERT_OK_AND_ASSIGN(std::vector<se::StreamExecutor*> executors,
PlatformUtil::GetStreamExecutors(platform));
AutotuneConfig autotune_config{DeviceConfig{executors.at(0), nullptr},
GetDebugOptionsForTest()};
auto& root = *module->entry_computation()->root_instruction();
TF_ASSERT_OK_AND_ASSIGN(RedzoneBuffers rzb,
RedzoneBuffers::FromInstruction(
root, autotune_config, GetDebugOptionsForTest(),
RedzoneBuffers::kAllInputs));
EXPECT_EQ(rzb.input_shapes().size(), 3);
EXPECT_EQ(rzb.input_buffers().size(), 3);
EXPECT_EQ(rzb.output_buffers().size(), 0);
EXPECT_NE(rzb.output_shape(), root.shape());
TF_ASSERT_OK_AND_ASSIGN(RedzoneBuffers rzb2,
RedzoneBuffers::FromInstruction(
root, autotune_config, GetDebugOptionsForTest(),
RedzoneBuffers::kAllInputsAllOutputs));
EXPECT_EQ(rzb2.input_shapes().size(), 3);
EXPECT_EQ(rzb2.input_buffers().size(), 3);
EXPECT_EQ(rzb2.output_buffers().size(), 1);
EXPECT_EQ(rzb2.output_shape(), root.shape());
TF_ASSERT_OK_AND_ASSIGN(RedzoneBuffers rzb3,
RedzoneBuffers::FromInstruction(
root, autotune_config, GetDebugOptionsForTest(),
RedzoneBuffers::kAllInputsOutputsNoScratch));
EXPECT_EQ(rzb3.input_shapes().size(), 3);
EXPECT_EQ(rzb3.input_buffers().size(), 3);
EXPECT_EQ(rzb3.output_buffers().size(), 1);
EXPECT_EQ(rzb3.output_shape(), root.shape());
}
TEST_F(AutotunerCompileUtilTest, VerifyOutputTupleOneElement) {
constexpr absl::string_view kHlo = R"(
HloModule hlo
ENTRY main {
p0 = f32[2,2] parameter(0)
p1 = f32[4,4] parameter(1)
p2 = f32[6,6] parameter(2)
ROOT root = (f32[1,2,3]) custom-call(p0, p1, p2), custom_call_target="fake"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, GetOptimizedModule(kHlo));
se::Platform* platform = PlatformUtil::GetDefaultPlatform().value();
TF_ASSERT_OK_AND_ASSIGN(std::vector<se::StreamExecutor*> executors,
PlatformUtil::GetStreamExecutors(platform));
AutotuneConfig autotune_config{DeviceConfig{executors.at(0), nullptr},
GetDebugOptionsForTest()};
auto& root = *module->entry_computation()->root_instruction();
TF_ASSERT_OK_AND_ASSIGN(RedzoneBuffers rzb,
RedzoneBuffers::FromInstruction(
root, autotune_config, GetDebugOptionsForTest(),
RedzoneBuffers::kAllInputs));
EXPECT_EQ(rzb.input_shapes().size(), 3);
EXPECT_EQ(rzb.input_buffers().size(), 3);
EXPECT_EQ(rzb.output_buffers().size(), 0);
EXPECT_NE(rzb.output_shape(), root.shape());
TF_ASSERT_OK_AND_ASSIGN(RedzoneBuffers rzb2,
RedzoneBuffers::FromInstruction(
root, autotune_config, GetDebugOptionsForTest(),
RedzoneBuffers::kAllInputsAllOutputs));
EXPECT_EQ(rzb2.input_shapes().size(), 3);
EXPECT_EQ(rzb2.input_buffers().size(), 3);
EXPECT_EQ(rzb2.output_buffers().size(), 1);
EXPECT_FALSE(rzb2.output_shape().IsTuple());
EXPECT_EQ(rzb2.output_shape(), root.shape().tuple_shapes(0));
TF_ASSERT_OK_AND_ASSIGN(RedzoneBuffers rzb3,
RedzoneBuffers::FromInstruction(
root, autotune_config, GetDebugOptionsForTest(),
RedzoneBuffers::kAllInputsOutputsNoScratch));
EXPECT_EQ(rzb3.input_shapes().size(), 3);
EXPECT_EQ(rzb3.input_buffers().size(), 3);
EXPECT_EQ(rzb3.output_buffers().size(), 0);
}
TEST_F(AutotunerCompileUtilTest, VerifyOutputTupleTwoElements) {
constexpr absl::string_view kHlo = R"(
HloModule hlo
ENTRY main {
p0 = f32[2,2] parameter(0)
p1 = f32[4,4] parameter(1)
p2 = f32[6,6] parameter(2)
ROOT root = (f32[1,2,3], u8[1,2]) custom-call(p0, p1, p2), custom_call_target="fake"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, GetOptimizedModule(kHlo));
se::Platform* platform = PlatformUtil::GetDefaultPlatform().value();
TF_ASSERT_OK_AND_ASSIGN(std::vector<se::StreamExecutor*> executors,
PlatformUtil::GetStreamExecutors(platform));
AutotuneConfig autotune_config{DeviceConfig{executors.at(0), nullptr},
GetDebugOptionsForTest()};
auto& root = *module->entry_computation()->root_instruction();
TF_ASSERT_OK_AND_ASSIGN(RedzoneBuffers rzb,
RedzoneBuffers::FromInstruction(
root, autotune_config, GetDebugOptionsForTest(),
RedzoneBuffers::kAllInputs));
EXPECT_EQ(rzb.input_shapes().size(), 3);
EXPECT_EQ(rzb.input_buffers().size(), 3);
EXPECT_EQ(rzb.output_buffers().size(), 0);
EXPECT_NE(rzb.output_shape(), root.shape());
TF_ASSERT_OK_AND_ASSIGN(RedzoneBuffers rzb2,
RedzoneBuffers::FromInstruction(
root, autotune_config, GetDebugOptionsForTest(),
RedzoneBuffers::kAllInputsAllOutputs));
EXPECT_EQ(rzb2.input_shapes().size(), 3);
EXPECT_EQ(rzb2.input_buffers().size(), 3);
EXPECT_EQ(rzb2.output_buffers().size(), 2);
EXPECT_TRUE(rzb2.output_shape().IsTuple());
EXPECT_EQ(rzb2.output_shape(), root.shape());
TF_ASSERT_OK_AND_ASSIGN(RedzoneBuffers rzb3,
RedzoneBuffers::FromInstruction(
root, autotune_config, GetDebugOptionsForTest(),
RedzoneBuffers::kAllInputsOutputsNoScratch));
EXPECT_EQ(rzb3.input_shapes().size(), 3);
EXPECT_EQ(rzb3.input_buffers().size(), 3);
EXPECT_EQ(rzb3.output_buffers().size(), 1);
EXPECT_FALSE(rzb3.output_shape().IsTuple());
EXPECT_EQ(rzb3.output_shape(), root.shape().tuple_shapes(0));
}
}
} |
2,066 | cpp | tensorflow/tensorflow | fusion_merger | third_party/xla/xla/service/gpu/transforms/fusion_merger.cc | third_party/xla/xla/service/gpu/transforms/fusion_merger_test.cc | #ifndef XLA_SERVICE_GPU_FUSION_MERGER_H_
#define XLA_SERVICE_GPU_FUSION_MERGER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
class FusionMerger : public HloModulePass {
public:
explicit FusionMerger(const se::DeviceDescription& d,
HloCostAnalysis::ShapeSizeFunction f)
: gpu_device_info_(d), shape_size_function_(f) {}
absl::string_view name() const override { return "fusion_merger"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
se::DeviceDescription gpu_device_info_;
HloCostAnalysis::ShapeSizeFunction shape_size_function_;
};
}
}
#endif
#include "xla/service/gpu/fusion_merger.h"
#include <optional>
#include <string>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/gpu/model/gpu_performance_model.h"
#include "xla/service/gpu/model/gpu_performance_model_base.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_graph_dumper.h"
#include "xla/service/instruction_fusion.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
namespace xla {
namespace gpu {
class FusionInstructionMerger {
public:
explicit FusionInstructionMerger(
HloComputation* computation, const se::DeviceDescription& gpu_device_info,
HloCostAnalysis::ShapeSizeFunction shape_size_function)
: computation_(computation),
shape_size_function_(shape_size_function),
gpu_device_info_(gpu_device_info),
dump_fusion_visualization_(computation->parent()
->config()
.debug_options()
.xla_dump_fusion_visualization()) {}
absl::Status Run();
bool changed() const { return changed_; }
private:
FusionDecision ShouldFuse(HloInstruction* producer);
absl::Status FuseIntoAllUsers(HloInstruction* producer);
HloComputation* computation_;
HloCostAnalysis::ShapeSizeFunction shape_size_function_;
std::optional<GpuHloCostAnalysis> cost_analysis_;
FusionInfoCache fusion_info_cache_;
const se::DeviceDescription& gpu_device_info_;
bool changed_ = false;
bool dump_fusion_visualization_ = false;
int total_visited_ = 0;
int total_merged_ = 0;
int num_fail_no_users_ = 0;
int num_fail_not_loop_fusion_ = 0;
int num_fail_merge_all_users_ = 0;
int num_fail_inefficient_fusion_emitter_ = 0;
int num_fail_fusion_too_large_ = 0;
int num_fail_uncoalesced_read_ = 0;
int num_fail_slower_if_fused_ = 0;
FusionInstructionMerger(const FusionInstructionMerger&) = delete;
FusionInstructionMerger& operator=(const FusionInstructionMerger&) = delete;
};
absl::Status FusionInstructionMerger::FuseIntoAllUsers(
HloInstruction* producer) {
std::vector<HloInstruction*> users = producer->users();
for (HloInstruction* user : users) {
if (dump_fusion_visualization_) {
RegisterFusionState(
*computation_,
absl::StrCat("About to fuse |", producer->name(), "| into |",
user->name(), "| inside FusionMerger"),
*user,
producer);
}
TF_RETURN_IF_ERROR(cost_analysis_->RemoveInstruction(user));
HloInstruction* consumer = user;
if (consumer->opcode() != HloOpcode::kFusion) {
consumer = computation_->AddInstruction(HloInstruction::CreateFusion(
user->shape(), ChooseFusionKind(*producer, *user), user));
TF_CHECK_OK(computation_->ReplaceInstruction(user, consumer));
}
consumer->MergeFusionInstruction(producer);
TF_RETURN_IF_ERROR(cost_analysis_->RevisitInstruction(consumer));
fusion_info_cache_.Invalidate(consumer);
if (dump_fusion_visualization_) {
RegisterFusionState(*computation_,
absl::StrCat("Fused |", producer->name(), "| into |",
user->name(), "| inside FusionMerger"),
*consumer);
}
changed_ = true;
}
CHECK_EQ(0, producer->user_count()) << producer->ToString();
TF_RETURN_IF_ERROR(computation_->RemoveInstruction(producer));
TF_RETURN_IF_ERROR(cost_analysis_->RemoveInstruction(producer));
fusion_info_cache_.Invalidate(producer);
VLOG(2) << "Merged fusion instruction: " << producer->name()
<< " into users { "
<< absl::StrJoin(users, ", ",
[](std::string* out, HloInstruction* user) {
absl::StrAppend(out, user->name());
})
<< " }";
return absl::OkStatus();
}
absl::Status FusionInstructionMerger::Run() {
for (HloInstruction* producer : computation_->MakeInstructionPostOrder()) {
if (producer->opcode() != HloOpcode::kFusion) {
continue;
}
FusionDecision should_fuse = ShouldFuse(producer);
if (should_fuse) {
TF_RETURN_IF_ERROR(FuseIntoAllUsers(producer));
++total_merged_;
} else {
VLOG(3) << "Not fusing fusion |" << producer->name()
<< "| with all of it's users due to: " << should_fuse.Explain();
if (dump_fusion_visualization_ && !producer->users().empty()) {
RegisterFusionState(
*computation_,
absl::StrCat(
"Not fusing fusion |", producer->name(),
"| into all of its users due to: ", should_fuse.Explain()),
*producer->users()[0],
producer);
}
}
}
VLOG(1) << "FusionInstructionMerger EXIT"
<< " computation: " << computation_->name()
<< " total_visited: " << total_visited_
<< " total_merged: " << total_merged_ << " merge failures { "
<< " no_users: " << num_fail_no_users_
<< " not_loop_fusion: " << num_fail_not_loop_fusion_
<< " merge_all_users: " << num_fail_merge_all_users_
<< " uncoalesced_read: " << num_fail_uncoalesced_read_
<< " inefficient_fusion_emitter: "
<< num_fail_inefficient_fusion_emitter_
<< " slower_if_fused: " << num_fail_slower_if_fused_
<< " fusion_too_large: " << num_fail_fusion_too_large_ << " }";
return absl::OkStatus();
}
bool TransposesMostData(const HloInstruction& fusion) {
float score = 0;
for (const HloInstruction* instr : fusion.fused_instructions()) {
if (IsPhysicallyTransposing(*instr)) {
score += 1.0 * ShapeUtil::ElementsInRecursive(instr->shape()) /
ShapeUtil::ElementsInRecursive(fusion.shape());
if (score >= 0.5) {
VLOG(3) << fusion.ToString() << " transpose ratio exceeds " << score;
return true;
}
}
}
return false;
}
FusionDecision FusionInstructionMerger::ShouldFuse(HloInstruction* producer) {
++total_visited_;
VLOG(4) << "Considering producer " << producer->name();
if (producer->users().empty()) {
++num_fail_no_users_;
return "fusion has no users";
}
if (!producer->IsLoopFusion()) {
++num_fail_not_loop_fusion_;
return "not a loop fusion";
}
auto producer_hero = GetRealHeroForMultiOutputFusion(*producer);
bool has_reduction_user = false;
for (const HloInstruction* user : producer->users()) {
if (user->opcode() == HloOpcode::kBitcast) {
++num_fail_merge_all_users_;
return "not fusing bitcast ops";
}
if (user->IsCustomFusion()) {
++num_fail_merge_all_users_;
return "not fusing custom fusions";
}
auto consumer_hero = GetRealHeroForMultiOutputFusion(*user);
if (auto compatible =
FusionHeroesAreCompatible(producer_hero, consumer_hero);
!compatible) {
return compatible;
}
FusionDecision fusible = IsProducerConsumerFusible(*producer, *user);
if (!fusible) {
++num_fail_merge_all_users_;
VLOG(9) << user->ToString();
return fusible;
}
if (IsInputFusibleReduction(*user)) {
has_reduction_user = true;
}
}
if (has_reduction_user && TransposesMostData(*producer)) {
++num_fail_uncoalesced_read_;
return "would read mostly uncoalesced";
}
for (const HloInstruction* user : producer->users()) {
FusionDecision fits = FusionFitsInBudget(
*user, *producer, gpu_device_info_,
true, &fusion_info_cache_);
if (!fits) {
++num_fail_fusion_too_large_;
return fits;
}
}
if (!cost_analysis_) {
VLOG(2) << "Running full HLO cost analysis for " << computation_->name();
cost_analysis_.emplace(
GpuHloCostAnalysis::Options{shape_size_function_,
{},
true},
&gpu_device_info_);
TF_CHECK_OK(computation_->Accept(&cost_analysis_.value()));
}
for (const HloInstruction* user : producer->users()) {
if (cost_analysis_->ProducerConsumerMergedTooLarge(*producer, *user)) {
++num_fail_inefficient_fusion_emitter_;
return FusionDecision{} << "if merged with " << user->name()
<< " will generate huge IR";
}
}
GpuPerformanceModel::RunTimes t = GpuPerformanceModel::EstimateRunTimes(
producer, &*cost_analysis_, GpuPerformanceModelOptions::Default(),
producer->users());
if (t.time_fused > t.time_unfused) {
++num_fail_slower_if_fused_;
return "will execute slower if fused";
}
return {};
}
absl::StatusOr<bool> FusionMerger::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
VLOG(1) << "FusionMerger for module: " << module->name();
for (auto* computation :
module->MakeNonfusionComputations(execution_threads)) {
VLOG(9) << "Before running FusionInstructionMerger for computation: "
<< computation->name();
XLA_VLOG_LINES(9, computation->ToString());
FusionInstructionMerger fusion_merger(computation, gpu_device_info_,
shape_size_function_);
TF_RETURN_IF_ERROR(fusion_merger.Run());
changed |= fusion_merger.changed();
VLOG(9) << "After running FusionInstructionMerger for computation: "
<< computation->name() << " changed: " << changed;
XLA_VLOG_LINES(9, computation->ToString());
}
return changed;
}
}
} | #include "xla/service/gpu/fusion_merger.h"
#include <cstdint>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
class FusionMergerTest : public HloTestBase {
HloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const {
return [&](const Shape& shape) {
constexpr int64_t kPointerSize = 8;
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
};
}
public:
FusionMerger fusion_merger_{TestGpuDeviceInfo::RTXA6000DeviceInfo(),
ShapeSizeBytesFunction()};
FusionMergerTest() : HloTestBase() {}
};
TEST_F(FusionMergerTest, MergeSharedFusionInstruction) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule MergeSharedFusionInstruction
comp.3 {
constant.param_0 = f32[4]{0} parameter(0)
param.param_1.2 = (f32[4]{0}, f32[4]{0}, f32[4]{0}) parameter(1)
get-tuple-element.6 = f32[4]{0} get-tuple-element(param.param_1.2), index=0
ROOT add.7 = f32[4]{0} add(constant.param_0, get-tuple-element.6)
}
comp.2 {
param.param_1.1 = (f32[4]{0}, f32[4]{0}, f32[4]{0}) parameter(0)
get-tuple-element.4 = f32[4]{0} get-tuple-element(param.param_1.1), index=1
get-tuple-element.5 = f32[4]{0} get-tuple-element(param.param_1.1), index=2
ROOT add.6 = f32[4]{0} add(get-tuple-element.4, get-tuple-element.5)
}
comp.1 {
add.1.param_1.1 = f32[4]{0} parameter(1)
constant.param_1.3 = f32[4]{0} parameter(0)
add.5 = f32[4]{0} add(add.1.param_1.1, constant.param_1.3)
ROOT multiply.3 = f32[4]{0} multiply(add.5, constant.param_1.3)
}
comp {
add.1.param_1 = f32[4]{0} parameter(1)
constant.param_1.1 = f32[4]{0} parameter(0)
multiply.2 = f32[4]{0} multiply(add.1.param_1, constant.param_1.1)
ROOT add.4 = f32[4]{0} add(multiply.2, constant.param_1.1)
}
ENTRY MergeSharedFusionInstruction.Computation0 {
constant = f32[4]{0} constant({1, 1, 1, 1})
param = (f32[4]{0}, f32[4]{0}, f32[4]{0}) parameter(0)
fusion.3 = f32[4]{0} fusion(constant, param), kind=kLoop, calls=comp.3
fusion.4 = f32[4]{0} fusion(param), kind=kLoop, calls=comp.2
fusion.5 = f32[4]{0} fusion(constant, fusion.4), kind=kLoop, calls=comp.1
fusion.6 = f32[4]{0} fusion(constant, fusion.4), kind=kLoop, calls=comp
ROOT tuple = (f32[4]{0}, f32[4]{0}, f32[4]{0}) tuple(fusion.3, fusion.5, fusion.6)
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
EXPECT_EQ(HloOpcode::kTuple, root->opcode());
auto* operand0 = root->operand(0);
EXPECT_EQ(HloOpcode::kFusion, operand0->opcode());
EXPECT_EQ(4, operand0->fused_instruction_count());
auto* operand1 = root->operand(1);
EXPECT_EQ(HloOpcode::kFusion, operand1->opcode());
EXPECT_EQ(7, operand1->fused_instruction_count());
auto* operand2 = root->operand(2);
EXPECT_EQ(HloOpcode::kFusion, operand2->opcode());
EXPECT_EQ(7, operand2->fused_instruction_count());
}
TEST_F(FusionMergerTest, MoreMemoryAccessIfFused) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f32add {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT _ = f32[] add(x, y)
}
comp0 {
p = (f32[100000000], f32[100000000], f32[100000000], f32[100000000]) parameter(0)
gte0 = f32[100000000] get-tuple-element(p), index=0
gte1 = f32[100000000] get-tuple-element(p), index=1
add.9 = f32[100000000] add(gte0, gte1)
gte2 = f32[100000000] get-tuple-element(p), index=2
add.10 = f32[100000000] add(add.9, gte2)
gte3 = f32[100000000] get-tuple-element(p), index=3
add.11 = f32[100000000] add(add.10, gte3)
p1 = (f32[100000000], f32[100000000], f32[100000000], f32[100000000]) parameter(1)
gte4 = f32[100000000] get-tuple-element(p1), index=0
gte5 = f32[100000000] get-tuple-element(p1), index=1
add.12 = f32[100000000] add(gte4, gte5)
gte6 = f32[100000000] get-tuple-element(p1), index=2
add.13 = f32[100000000] add(add.12, gte6)
gte7 = f32[100000000] get-tuple-element(p1), index=3
add.14 = f32[100000000] add(add.13, gte7)
ROOT r = f32[100000000] add(add.14, add.11)
}
comp1 {
p = f32[100000000] parameter(0)
c0 = f32[] constant(0)
ROOT r = f32[] reduce(p, c0), dimensions={0}, to_apply=f32add
}
comp2 {
p = f32[100000000] parameter(0)
c0 = f32[] constant(0)
r = f32[] reduce(p, c0), dimensions={0}, to_apply=f32add
ROOT n = f32[] negate(r)
}
ENTRY m.Computation2 {
p0 = (f32[100000000], f32[100000000], f32[100000000], f32[100000000]) parameter(0)
p1 = (f32[100000000], f32[100000000], f32[100000000], f32[100000000]) parameter(1)
fusion.0 = f32[100000000] fusion(p0, p1), kind=kLoop, calls=comp0
fusion.1 = f32[] fusion(fusion.0), kind=kLoop, calls=comp1
fusion.2 = f32[] fusion(fusion.0), kind=kLoop, calls=comp2
ROOT tuple = (f32[], f32[]) tuple(fusion.1, fusion.2)
}
)")
.value();
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, LessMemoryAccessIfFused) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
comp.2 {
state.param_1.1 = (f32[4]{0}, f32[4]{0}, f32[4]{0}) parameter(0)
get-tuple-element.5 = f32[4]{0} get-tuple-element(state.param_1.1), index=0
get-tuple-element.6 = f32[4]{0} get-tuple-element(state.param_1.1), index=1
add.7 = f32[4]{0} add(get-tuple-element.5, get-tuple-element.6)
get-tuple-element.7 = f32[4]{0} get-tuple-element(state.param_1.1), index=2
ROOT add.8 = f32[4]{0} add(add.7, get-tuple-element.7)
}
comp.1 {
add.1.param_1.1 = f32[4]{0} parameter(1)
constant.param_1.3 = f32[4]{0} parameter(0)
add.5 = f32[4]{0} add(add.1.param_1.1, constant.param_1.3)
ROOT multiply.3 = f32[4]{0} multiply(add.5, constant.param_1.3)
}
comp {
add.1.param_1 = f32[4]{0} parameter(1)
constant.param_1.1 = f32[4]{0} parameter(0)
multiply.2 = f32[4]{0} multiply(add.1.param_1, constant.param_1.1)
ROOT add.4 = f32[4]{0} add(multiply.2, constant.param_1.1)
}
ENTRY m.Computation2 {
constant = f32[4]{0} constant({1, 1, 1, 1})
state = (f32[4]{0}, f32[4]{0}, f32[4]{0}) parameter(0)
fusion.2 = f32[4]{0} fusion(state), kind=kLoop, calls=comp.2
fusion.3 = f32[4]{0} fusion(constant, fusion.2), kind=kLoop, calls=comp.1
fusion.4 = f32[4]{0} fusion(constant, fusion.2), kind=kLoop, calls=comp
ROOT tuple = (f32[4]{0}, f32[4]{0}) tuple(fusion.3, fusion.4)
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillMergeIntoInputFusion) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f1_computation {
f1_p0 = f32[32]{0} parameter(0)
ROOT f1_root = f32[32]{0} add(f1_p0, f1_p0)
}
add_computation {
add_lhs = f32[] parameter(0)
add_rhs = f32[] parameter(1)
ROOT add_root = f32[] add(add_lhs, add_rhs)
}
f2_computation {
f2_p0 = f32[32]{0} parameter(0)
f2_mul = f32[32]{0} multiply(f2_p0, f2_p0)
f2_zero = f32[] constant(0)
ROOT f2_root = f32[] reduce(f2_mul, f2_zero), dimensions={0},
to_apply=add_computation
}
ENTRY entry {
p0 = f32[32]{0} parameter(0)
f1 = f32[32]{0} fusion(p0), kind=kLoop, calls=f1_computation
ROOT f2 = f32[] fusion(f1), kind=kInput, calls=f2_computation
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Parameter())));
}
TEST_F(FusionMergerTest, WillMergeIntoUnfusedConsumer) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule jit_matmul.36
max (parameter.13: f32[], parameter.14: f32[]) -> f32[] {
parameter.13 = f32[] parameter(0)
parameter.14 = f32[] parameter(1)
ROOT maximum.15 = f32[] maximum(f32[] parameter.13, f32[] parameter.14)
}
add (parameter.29: f32[], parameter.30: f32[]) -> f32[] {
parameter.29 = f32[] parameter(0)
parameter.30 = f32[] parameter(1)
ROOT add.31 = f32[] add(f32[] parameter.29, f32[] parameter.30)
}
fused_computation.1 (param_1.4: f32[200,200,200], param_2.1: f32[200,200]) -> f32[200,200] {
param_1.4 = f32[200,200,200]{2,1,0} parameter(0)
param_2.1 = f32[200,200]{1,0} parameter(1)
broadcast.3 = f32[200,200,200]{2,1,0} broadcast(f32[200,200]{1,0} param_2.1), dimensions={0,2}
subtract.0 = f32[200,200,200]{2,1,0} subtract(f32[200,200,200]{2,1,0} param_1.4, f32[200,200,200]{2,1,0} broadcast.3)
exponential.0 = f32[200,200,200]{2,1,0} exponential(f32[200,200,200]{2,1,0} subtract.0)
constant.27 = f32[] constant(0)
ROOT reduce.0 = f32[200,200]{1,0} reduce(f32[200,200,200]{2,1,0} exponential.0, f32[] constant.27), dimensions={1}, to_apply=add
}
fused_computation.3 (param_0.7: f32[200,200], param_1.9: f32[200,200]) -> f32[200,200,200] {
param_1.9 = f32[200,200]{1,0} parameter(1)
broadcast.10 = f32[200,200,200]{2,1,0} broadcast(f32[200,200]{1,0} param_1.9), dimensions={0,1}
param_0.7 = f32[200,200]{1,0} parameter(0)
broadcast.8 = f32[200,200,200]{2,1,0} broadcast(f32[200,200]{1,0} param_0.7), dimensions={1,2}
ROOT add.1 = f32[200,200,200]{2,1,0} add(f32[200,200,200]{2,1,0} broadcast.10, f32[200,200,200]{2,1,0} broadcast.8)
}
ENTRY entry (parameter.1: f32[200,200], parameter.2: f32[200,200]) -> f32[200,200] {
parameter.2 = f32[200,200]{1,0} parameter(1)
parameter.1 = f32[200,200]{1,0} parameter(0)
fusion.3 = f32[200,200,200]{2,1,0} fusion(f32[200,200]{1,0} parameter.2, f32[200,200]{1,0} parameter.1), kind=kLoop, calls=fused_computation.3
constant.11 = f32[] constant(-inf)
reduce.16 = f32[200,200]{1,0} reduce(f32[200,200,200]{2,1,0} fusion.3, f32[] constant.11), dimensions={1}, to_apply=max
ROOT fusion.1 = f32[200,200]{1,0} fusion(f32[200,200,200]{2,1,0} fusion.3, f32[200,200]{1,0} reduce.16), kind=kInput, calls=fused_computation.1
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Fusion(), m::Parameter(), m::Parameter())));
}
TEST_F(FusionMergerTest, WillNotMergeReduceUnfriendlyLayouts) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f1_computation {
f1_p0 = f32[16,16,256]{0,1,2} parameter(0)
add = f32[16,16,256]{0,1,2} add(f1_p0, f1_p0)
ROOT f1_root = f32[16,16,256]{2,1,0} copy(add)
}
add_computation {
add_lhs = f32[] parameter(0)
add_rhs = f32[] parameter(1)
ROOT add_root = f32[] add(add_lhs, add_rhs)
}
f2_computation {
f2_p0 = f32[16,16,256]{2,1,0} parameter(0)
f2_zero = f32[] constant(0)
ROOT f2_root = f32[] reduce(f2_p0, f2_zero), dimensions={0,1,2},
to_apply=add_computation
}
ENTRY entry {
p0 = f32[16,16,256]{0,1,2} parameter(0)
f1 = f32[16,16,256]{2,1,0} fusion(p0), kind=kLoop, calls=f1_computation
ROOT f2 = f32[] fusion(f1), kind=kInput, calls=f2_computation
})")
.value();
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillMergeReduceNotTooUnfriendlyLayouts) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f1_computation {
f1_p0 = f32[16,16,256]{0,1,2} parameter(0)
slice1 = f32[5,16,256]{0,1,2} slice(f1_p0), slice={[0:5], [0:16], [0:256]}
f1_copy = f32[5,16,256]{2,1,0} copy(slice1)
slice2 = f32[11,16,256]{0,1,2} slice(f1_p0), slice={[0:11], [0:16], [0:256]}
bitcast = f32[11,16,256]{2,1,0} bitcast(slice2)
ROOT f1_root = f32[16,16,256]{2,1,0} concatenate(f1_copy, bitcast), dimensions={0}
}
add_computation {
add_lhs = f32[] parameter(0)
add_rhs = f32[] parameter(1)
ROOT add_root = f32[] add(add_lhs, add_rhs)
}
f2_computation {
f2_p0 = f32[16,16,256]{2,1,0} parameter(0)
f2_zero = f32[] constant(0)
ROOT f2_root = f32[] reduce(f2_p0, f2_zero), dimensions={0,1,2},
to_apply=add_computation
}
ENTRY entry {
p0 = f32[16,16,256]{0,1,2} parameter(0)
f1 = f32[16,16,256]{2,1,0} fusion(p0), kind=kLoop, calls=f1_computation
ROOT f2 = f32[] fusion(f1), kind=kInput, calls=f2_computation
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, AvoidsLargeFusion) {
constexpr int64_t kNumParams = MaxOperandsAndOutputsPerFusion() + 1;
auto module = CreateNewVerifiedModule();
HloComputation::Builder b(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {10, 100});
std::vector<HloInstruction*> entry_params;
for (int64_t i = 0; i < kNumParams; ++i) {
entry_params.push_back(
b.AddInstruction(HloInstruction::CreateParameter(i, shape, "p")));
}
auto make_fusion = [&](absl::Span<HloInstruction* const> params) {
HloComputation::Builder sub_builder("subcomp");
HloInstruction* sum = nullptr;
for (int64_t i = 0; i < params.size(); ++i) {
auto p = sub_builder.AddInstruction(
HloInstruction::CreateParameter(i, shape, "p"));
if (sum == nullptr) {
sum = p;
} else {
sum = sub_builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, sum, p));
}
}
HloComputation* subcomp =
module->AddEmbeddedComputation(sub_builder.Build());
return HloInstruction::CreateFusion(
shape, HloInstruction::FusionKind::kLoop, params, subcomp);
};
auto fusion = b.AddInstruction(
make_fusion(absl::MakeSpan(entry_params)
.subspan(0, MaxOperandsAndOutputsPerFusion())));
b.AddInstruction(make_fusion({entry_params.back(), fusion}));
module->AddEntryComputation(b.Build());
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillNotMergeIfFusionEmitterIsInefficient) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f1 {
Arg_0.5 = f32[200000] parameter(0)
slice.7 = f32[100000] slice(Arg_0.5), slice={[0:199999:2]}
slice.8 = f32[100000] slice(Arg_0.5), slice={[1:200000:2]}
add.9 = f32[100000] add(slice.7, slice.8)
slice.10 = f32[50000] slice(add.9), slice={[0:99999:2]}
slice.11 = f32[50000] slice(add.9), slice={[1:100000:2]}
add.12 = f32[50000] add(slice.10, slice.11)
slice.13 = f32[25000] slice(add.12), slice={[0:49999:2]}
slice.14 = f32[25000] slice(add.12), slice={[1:50000:2]}
add.15 = f32[25000] add(slice.13, slice.14)
slice.16 = f32[12500] slice(add.15), slice={[0:24999:2]}
slice.17 = f32[12500] slice(add.15), slice={[1:25000:2]}
add.18 = f32[12500] add(slice.16, slice.17)
slice.19 = f32[6250] slice(add.18), slice={[0:12499:2]}
slice.20 = f32[6250] slice(add.18), slice={[1:12500:2]}
add.21 = f32[6250] add(slice.19, slice.20)
slice.22 = f32[3125] slice(add.21), slice={[0:6249:2]}
slice.23 = f32[3125] slice(add.21), slice={[1:6250:2]}
ROOT add.24 = f32[3125] add(slice.22, slice.23)
}
f2 {
Arg_0 = f32[3125] parameter(0)
slice.25 = f32[1562] slice(Arg_0), slice={[0:3124:2]}
slice.26 = f32[1562] slice(Arg_0), slice={[1:3125:2]}
add.27 = f32[1562] add(slice.25, slice.26)
slice.28 = f32[781] slice(add.27), slice={[0:1561:2]}
slice.29 = f32[781] slice(add.27), slice={[1:1562:2]}
add.30 = f32[781] add(slice.28, slice.29)
slice.31 = f32[390] slice(add.30), slice={[0:780:2]}
slice.32 = f32[390] slice(add.30), slice={[1:781:2]}
add.33 = f32[390] add(slice.31, slice.32)
slice.34 = f32[195] slice(add.33), slice={[0:389:2]}
slice.35 = f32[195] slice(add.33), slice={[1:390:2]}
add.36 = f32[195] add(slice.34, slice.35)
slice.37 = f32[97] slice(add.36), slice={[0:194:2]}
slice.38 = f32[97] slice(add.36), slice={[1:195:2]}
add.39 = f32[97] add(slice.37, slice.38)
slice.40 = f32[48] slice(add.39), slice={[0:96:2]}
slice.41 = f32[48] slice(add.39), slice={[1:97:2]}
ROOT add.42 = f32[48] add(slice.40, slice.41)
}
ENTRY e {
p0 = f32[200000] parameter(0)
f1 = f32[3125] fusion(p0), kind=kLoop, calls=f1
ROOT r = f32[48] fusion(f1), kind=kLoop, calls=f2
})")
.value();
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillMergeSliceIntoReusingConsumer) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f1 {
p01 = s8[1000000] parameter(0)
ROOT s0 = s8[10] slice(p01), slice={[0:10]}
}
f2 {
p02 = s8[10] parameter(0)
ROOT b0 = s8[10,1000000] broadcast(p02), dimensions={0}
}
ENTRY e {
p0 = s8[1000000] parameter(0)
f1 = s8[10] fusion(p0), kind=kLoop, calls=f1
ROOT r = s8[10,1000000] fusion(f1), kind=kLoop, calls=f2
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillMergeExpensiveFusionsIfSavesMemory) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
%f_a (p: f32[]) -> f32[1024,1024,1024] {
%p = f32[] parameter(0)
%b = f32[1024,1024,1024] broadcast(%p), dimensions={}
ROOT %t = f32[1024,1024,1024] tanh(%b)
}
%f_b (p: f32[1024,1024,1024]) -> f32[1024,1024,1024] {
%p = f32[1024,1024,1024] parameter(0)
ROOT %t = f32[1024,1024,1024] tanh(%p)
}
%f_c (p: f32[1024,1024,1024]) -> f32[1024,1024,1024] {
%p = f32[1024,1024,1024] parameter(0)
ROOT %t = f32[1024,1024,1024] tanh(%p)
}
ENTRY entry {
p0 = f32[] parameter(0)
f1 = f32[1024,1024,1024] fusion(p0), kind=kLoop, calls=%f_a
f2 = f32[1024,1024,1024] fusion(f1), kind=kLoop, calls=%f_b
f3 = f32[1024,1024,1024] fusion(f1), kind=kLoop, calls=%f_c
ROOT f4 = f32[1024,1024,1024] add(f2, f3)
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillMergeExpensiveFusionsWithSingleConsumer) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
%f_b (p: f32[1024,1024,1024]) -> f32[1024,1024,1024] {
%p = f32[1024,1024,1024] parameter(0)
ROOT %t = f32[1024,1024,1024] tanh(%p)
}
%f_c (p: f32[1024,1024,1024]) -> f32[1024,1024,1024] {
%p = f32[1024,1024,1024] parameter(0)
ROOT %t = f32[1024,1024,1024] add(%p, %p)
}
ENTRY entry {
p0 = f32[1024,1024,1024] parameter(0)
f1 = f32[1024,1024,1024] fusion(p0), kind=kLoop, calls=%f_b
ROOT f2 = f32[1024,1024,1024] fusion(f1), kind=kLoop, calls=%f_c
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillNotMergeExpensiveFusionsWithReusingConsumer) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
%f_b {
%p = f32[1024,1024,1024] parameter(0)
%t1 = f32[1024,1024,1024] tanh(%p)
%t2 = f32[1024,1024,1024] tanh(%t1)
%t3 = f32[1024,1024,1024] tanh(%t2)
%t4 = f32[1024,1024,1024] tanh(%t3)
%t5 = f32[1024,1024,1024] tanh(%t4)
%t6 = f32[1024,1024,1024] tanh(%t5)
%t7 = f32[1024,1024,1024] tanh(%t6)
%t8 = f32[1024,1024,1024] tanh(%t7)
ROOT %t9 = f32[1024,1024,1024] tanh(%t8)
}
%f_c {
%p = f32[1024,1024,1024] parameter(0)
ROOT %t = f32[1024,1024,1024,2048] broadcast(%p), dimensions={0,1,2}
}
ENTRY entry {
p0 = f32[1024,1024,1024] parameter(0)
f1 = f32[1024,1024,1024] fusion(p0), kind=kLoop, calls=%f_b
ROOT f2 = f32[1024,1024,1024,2048] fusion(f1), kind=kLoop, calls=%f_c
})")
.value();
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, NoMergeWithBitcast) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f32add {
x.634 = f32[] parameter(0)
y.635 = f32[] parameter(1)
ROOT add.636 = f32[] add(x.634, y.635)
}
fused_computation.103 {
param_0.310 = f16[1,8,512,1536]{2,3,1,0} parameter(0)
param_1.420 = f32[8,512]{1,0} parameter(1)
bitcast.1144 = f32[1,8,512]{2,1,0} bitcast(param_1.420)
convert.252 = f16[1,8,512]{2,1,0} convert(bitcast.1144)
bitcast.1143 = f16[8,512]{1,0} bitcast(convert.252)
broadcast.481 = f16[1,8,512,1536]{2,3,1,0} broadcast(bitcast.1143), dimensions={1,2}
divide.15 = f16[1,8,512,1536]{2,3,1,0} divide(param_0.310, broadcast.481)
ROOT bitcast.1142 = f16[8,512,1536]{1,2,0} bitcast(divide.15)
}
fused_computation.105 {
param_1.426 = f16[8,1536,512]{2,1,0} parameter(1)
bitcast.1896 = f16[1,8,1536,512]{3,2,1,0} bitcast(param_1.426)
transpose.238 = f16[1,8,512,1536]{2,3,1,0} transpose(bitcast.1896), dimensions={0,1,3,2}
param_0.315 = f16[8,512]{1,0} parameter(0)
broadcast.482 = f16[1,8,512,1536]{2,3,1,0} broadcast(param_0.315), dimensions={1,2}
subtract.22 = f16[1,8,512,1536]{2,3,1,0} subtract(transpose.238, broadcast.482)
ROOT exponential.15 = f16[1,8,512,1536]{2,3,1,0} exponential(subtract.22)
}
fused_computation.104 {
param_0.1000 = f16[8,1536,512]{2,1,0} parameter(0)
convert.652 = f32[8,1536,512]{2,1,0} convert(param_0.1000)
constant_752 = f32[] constant(-0)
ROOT reduce.232 = f32[8,512]{1,0} reduce(convert.652, constant_752),
dimensions={1}, to_apply=f32add
}
ENTRY entry {
p0 = f16[8,1536,512]{2,1,0} parameter(0)
p1 = f16[8,512]{1,0} parameter(1)
fusion.105 = f16[1,8,512,1536]{2,3,1,0} fusion(p1, p0), kind=kLoop, calls=fused_computation.105
bitcast.1787 = f16[8,1536,512]{2,1,0} bitcast(fusion.105)
fusion.104 = f32[8,512]{1,0} fusion(bitcast.1787), kind=kInput, calls=fused_computation.104
ROOT fusion.103 = f16[8,512,1536]{1,2,0} fusion(fusion.105, fusion.104), kind=kLoop, calls=fused_computation.103
}
)")
.value();
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, CostBasedMerge) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
fused_computation.45 {
param_1.194 = f16[8,1536,512]{2,1,0} parameter(1)
bitcast.1042 = f16[1,8,512,1536]{2,3,1,0} bitcast(param_1.194)
param_0.135 = f16[8,512]{1,0} parameter(0)
broadcast.391 = f16[1,8,512,1536]{2,3,1,0} broadcast(param_0.135), dimensions={1,2}
subtract.6 = f16[1,8,512,1536]{2,3,1,0} subtract(bitcast.1042, broadcast.391)
ROOT exponential.11 = f16[1,8,512,1536]{2,3,1,0} exponential(subtract.6)
}
f32add {
x.634 = f32[] parameter(0)
y.635 = f32[] parameter(1)
ROOT add.636 = f32[] add(x.634, y.635)
}
fused_computation.44 {
param_0.869 = f16[1,8,512,1536]{2,3,1,0} parameter(0)
convert.221 = f32[1,8,512,1536]{2,3,1,0} convert(param_0.869)
transpose.212 = f32[1,8,1536,512]{3,2,1,0} transpose(convert.221), dimensions={0,1,3,2}
bitcast.1041 = f32[8,1536,512]{2,1,0} bitcast(transpose.212)
constant_429 = f32[] constant(0)
ROOT reduce.149 = f32[8,512]{1,0} reduce(bitcast.1041, constant_429), dimensions={1}, to_apply=f32add
}
fused_computation.43 {
param_0.130 = f16[1,8,512,1536]{2,3,1,0} parameter(0)
param_1.188 = f32[8,512]{1,0} parameter(1)
bitcast.1040 = f32[1,8,512]{2,1,0} bitcast(param_1.188)
convert.220 = f16[1,8,512]{2,1,0} convert(bitcast.1040)
bitcast.1039 = f16[8,512]{1,0} bitcast(convert.220)
broadcast.390 = f16[1,8,512,1536]{2,3,1,0} broadcast(bitcast.1039), dimensions={1,2}
divide.11 = f16[1,8,512,1536]{2,3,1,0} divide(param_0.130, broadcast.390)
ROOT bitcast.1038 = f16[8,512,1536]{1,2,0} bitcast(divide.11)
}
ENTRY entry {
p0 = f16[8,1536,512]{2,1,0} parameter(0)
p1 = f16[8,512]{1,0} parameter(1)
fusion.45 = f16[1,8,512,1536]{2,3,1,0} fusion(p1, p0), kind=kLoop, calls=fused_computation.45
fusion.44 = f32[8,512]{1,0} fusion(fusion.45), kind=kInput, calls=fused_computation.44
ROOT fusion.43 = f16[8,512,1536]{1,2,0} fusion(fusion.45, fusion.44), kind=kLoop, calls=fused_computation.43
}
)")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, CostBasedNoMerge) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
add_float_.56 {
x.57 = f32[] parameter(0)
y.58 = f32[] parameter(1)
ROOT add.59 = f32[] add(x.57, y.58)
}
fused_computation.66 {
constant.635 = f32[] constant(0)
broadcast.257 = f32[459,3]{1,0} broadcast(constant.635), dimensions={}
constant.641 = f32[] constant(1)
broadcast.256 = f32[459,3]{1,0} broadcast(constant.641), dimensions={}
broadcast.255 = f32[459]{0} broadcast(constant.635), dimensions={}
iota.28 = f32[459]{0} iota(), iota_dimension=0
constant.629 = f32[] constant(1.49891067)
broadcast.253 = f32[459]{0} broadcast(constant.629), dimensions={}
multiply.39 = f32[459]{0} multiply(iota.28, broadcast.253)
constant.633 = f32[] constant(-1)
broadcast.252 = f32[459]{0} broadcast(constant.633), dimensions={}
add.31 = f32[459]{0} add(multiply.39, broadcast.252)
ceil.11 = f32[459]{0} ceil(add.31)
constant.630 = f32[] constant(685)
broadcast.251 = f32[459]{0} broadcast(constant.630), dimensions={}
clamp.49 = f32[459]{0} clamp(broadcast.255, ceil.11, broadcast.251)
subtract.11 = f32[459]{0} subtract(clamp.49, multiply.39)
broadcast.249 = f32[459,3]{1,0} broadcast(subtract.11), dimensions={0}
iota.26 = f32[459,3]{1,0} iota(), iota_dimension=1
add.30 = f32[459,3]{1,0} add(broadcast.249, iota.26)
abs.3 = f32[459,3]{1,0} abs(add.30)
subtract.10 = f32[459,3]{1,0} subtract(broadcast.256, abs.3)
maximum.6 = f32[459,3]{1,0} maximum(broadcast.257, subtract.10)
ROOT reduce.3 = f32[459]{0} reduce(maximum.6, constant.635), dimensions={1}, to_apply=add_float_.56
}
fused_computation.67 {
constant.684 = f32[] constant(0)
broadcast.296 = f32[1130,3]{1,0} broadcast(constant.684), dimensions={}
constant.685 = f32[] constant(1)
broadcast.295 = f32[1130,3]{1,0} broadcast(constant.685), dimensions={}
broadcast.294 = f32[1130]{0} broadcast(constant.684), dimensions={}
iota.41 = f32[1130]{0} iota(), iota_dimension=0
constant.675 = f32[] constant(1.34513271)
broadcast.293 = f32[1130]{0} broadcast(constant.675), dimensions={}
multiply.47 = f32[1130]{0} multiply(iota.41, broadcast.293)
constant.677 = f32[] constant(-1)
broadcast.290 = f32[1130]{0} broadcast(constant.677), dimensions={}
add.39 = f32[1130]{0} add(multiply.47, broadcast.290)
ceil.15 = f32[1130]{0} ceil(add.39)
constant.676 = f32[] constant(1517)
broadcast.289 = f32[1130]{0} broadcast(constant.676), dimensions={}
clamp.53 = f32[1130]{0} clamp(broadcast.294, ceil.15, broadcast.289)
subtract.19 = f32[1130]{0} subtract(clamp.53, multiply.47)
broadcast.287 = f32[1130,3]{1,0} broadcast(subtract.19), dimensions={0}
iota.39 = f32[1130,3]{1,0} iota(), iota_dimension=1
add.38 = f32[1130,3]{1,0} add(broadcast.287, iota.39)
abs.7 = f32[1130,3]{1,0} abs(add.38)
subtract.18 = f32[1130,3]{1,0} subtract(broadcast.295, abs.7)
maximum.10 = f32[1130,3]{1,0} maximum(broadcast.296, subtract.18)
ROOT reduce.4 = f32[1130]{0} reduce(maximum.10, constant.684), dimensions={1}, to_apply=add_float_.56
}
fused_computation.59 {
constant.532 = f32[] constant(0)
broadcast.316 = f32[1130,3]{1,0} broadcast(constant.532), dimensions={}
constant.663 = f32[] constant(1)
broadcast.315 = f32[1130,3]{1,0} broadcast(constant.663), dimensions={}
broadcast.314 = f32[1130]{0} broadcast(constant.532), dimensions={}
iota.47 = f32[1130]{0} iota(), iota_dimension=0
constant.579 = f32[] constant(1.34513271)
broadcast.311 = f32[1130]{0} broadcast(constant.579), dimensions={}
multiply.51 = f32[1130]{0} multiply(iota.47, broadcast.311)
constant.578 = f32[] constant(-1)
broadcast.310 = f32[1130]{0} broadcast(constant.578), dimensions={}
add.43 = f32[1130]{0} add(multiply.51, broadcast.310)
ceil.17 = f32[1130]{0} ceil(add.43)
constant.576 = f32[] constant(1517)
broadcast.309 = f32[1130]{0} broadcast(constant.576), dimensions={}
clamp.55 = f32[1130]{0} clamp(broadcast.314, ceil.17, broadcast.309)
subtract.24 = f32[1130]{0} subtract(clamp.55, multiply.51)
broadcast.306 = f32[1130,3]{1,0} broadcast(subtract.24), dimensions={0}
iota.45 = f32[1130,3]{1,0} iota(), iota_dimension=1
add.42 = f32[1130,3]{1,0} add(broadcast.306, iota.45)
abs.9 = f32[1130,3]{1,0} abs(add.42)
subtract.23 = f32[1130,3]{1,0} subtract(broadcast.315, abs.9)
maximum.12 = f32[1130,3]{1,0} maximum(broadcast.316, subtract.23)
param_2.183 = f32[1130]{0} parameter(2)
broadcast.172 = f32[1130,3]{1,0} broadcast(param_2.183), dimensions={0}
divide.3 = f32[1130,3]{1,0} divide(maximum.12, broadcast.172)
bitcast.53 = f32[3390]{0} bitcast(divide.3)
broadcast.171 = f32[3390,1377]{1,0} broadcast(bitcast.53), dimensions={0}
broadcast.276 = f32[459,3]{1,0} broadcast(constant.532), dimensions={}
broadcast.275 = f32[459,3]{1,0} broadcast(constant.663), dimensions={}
broadcast.274 = f32[459]{0} broadcast(constant.532), dimensions={}
iota.35 = f32[459]{0} iota(), iota_dimension=0
constant.614 = f32[] constant(1.49891067)
broadcast.273 = f32[459]{0} broadcast(constant.614), dimensions={}
multiply.43 = f32[459]{0} multiply(iota.35, broadcast.273)
broadcast.272 = f32[459]{0} broadcast(constant.578), dimensions={}
add.35 = f32[459]{0} add(multiply.43, broadcast.272)
ceil.13 = f32[459]{0} ceil(add.35)
constant.611 = f32[] constant(685)
broadcast.269 = f32[459]{0} broadcast(constant.611), dimensions={}
clamp.51 = f32[459]{0} clamp(broadcast.274, ceil.13, broadcast.269)
subtract.15 = f32[459]{0} subtract(clamp.51, multiply.43)
broadcast.267 = f32[459,3]{1,0} broadcast(subtract.15), dimensions={0}
iota.33 = f32[459,3]{1,0} iota(), iota_dimension=1
add.34 = f32[459,3]{1,0} add(broadcast.267, iota.33)
abs.5 = f32[459,3]{1,0} abs(add.34)
subtract.14 = f32[459,3]{1,0} subtract(broadcast.275, abs.5)
maximum.8 = f32[459,3]{1,0} maximum(broadcast.276, subtract.14)
param_1.177 = f32[459]{0} parameter(1)
broadcast.170 = f32[459,3]{1,0} broadcast(param_1.177), dimensions={0}
divide.2 = f32[459,3]{1,0} divide(maximum.8, broadcast.170)
bitcast.52 = f32[1377]{0} bitcast(divide.2)
broadcast.169 = f32[3390,1377]{1,0} broadcast(bitcast.52), dimensions={1}
multiply.15 = f32[3390,1377]{1,0} multiply(broadcast.171, broadcast.169)
bitcast.61 = f32[1130,3,459,3]{3,2,1,0} bitcast(multiply.15)
transpose.68 = f32[459,1130,3,3]{2,0,3,1} transpose(bitcast.61), dimensions={2,0,3,1}
copy.1 = f |
2,067 | cpp | tensorflow/tensorflow | cudnn_simplify_padding | third_party/xla/xla/service/gpu/transforms/cudnn_simplify_padding.cc | third_party/xla/xla/service/gpu/transforms/cudnn_simplify_padding_test.cc | #ifndef XLA_SERVICE_GPU_CUDNN_SIMPLIFY_PADDING_H_
#define XLA_SERVICE_GPU_CUDNN_SIMPLIFY_PADDING_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla::gpu {
class CudnnSimplifyPadding : public HloModulePass {
public:
CudnnSimplifyPadding() = default;
absl::string_view name() const override { return "cudnn_simplify_padding"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/gpu/cudnn_simplify_padding.h"
#include <algorithm>
#include <cstdint>
#include <iterator>
#include <optional>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/literal.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
namespace m = ::xla::match;
std::optional<int64_t> FindFalseIndex(absl::Span<const bool> vals) {
std::optional<int64_t> missing_dim;
for (int i = 0; i < vals.size(); i++) {
if (vals[i]) {
continue;
}
if (missing_dim.has_value()) {
VLOG(2) << "Multiple dimensions are missing from conv dnums; can't "
"determine which is vect_c dimension";
return std::nullopt;
}
missing_dim = i;
}
return missing_dim;
}
std::optional<int64_t> FindOutputVectCDim(HloInstruction* conv) {
const ConvolutionDimensionNumbers& dnums =
conv->convolution_dimension_numbers();
int64_t num_dims = conv->shape().tuple_shapes(0).dimensions_size();
absl::InlinedVector<bool, 5> seen_dims(num_dims);
seen_dims[dnums.output_batch_dimension()] = true;
seen_dims[dnums.output_feature_dimension()] = true;
for (int64_t d : dnums.output_spatial_dimensions()) {
seen_dims[d] = true;
}
return FindFalseIndex(seen_dims);
}
std::optional<int64_t> FindKernelVectCDim(HloInstruction* conv) {
const ConvolutionDimensionNumbers& dnums =
conv->convolution_dimension_numbers();
int64_t num_dims = conv->operand(1)->shape().dimensions_size();
absl::InlinedVector<bool, 5> seen_dims(num_dims);
seen_dims[dnums.kernel_input_feature_dimension()] = true;
seen_dims[dnums.kernel_output_feature_dimension()] = true;
for (int64_t d : dnums.kernel_spatial_dimensions()) {
seen_dims[d] = true;
}
return FindFalseIndex(seen_dims);
}
std::optional<int64_t> NumTrailingZeroOutputFeatures(HloInstruction* conv) {
const ConvolutionDimensionNumbers& dnums =
conv->convolution_dimension_numbers();
int64_t feature_dim = dnums.kernel_output_feature_dimension();
const HloInstruction* weights = conv->operand(1);
auto backend_config = conv->backend_config<GpuBackendConfig>();
if (backend_config.ok() &&
backend_config->cudnn_conv_backend_config().reordered_int8_nchw_vect()) {
VLOG(2) << "Matched int8x32 convolution with filter reordering";
const HloInstruction *reshape, *transpose;
bool matched =
Match(weights, m::Reshape(m::Transpose(
&transpose, m::Reshape(&reshape, m::Op(&weights)))));
if (!matched || feature_dim != 0 || transpose->shape().rank() != 8) {
VLOG(2) << "The filter output feature dimension cannot be determined, as "
"the reordering sequence is modified";
return std::nullopt;
}
const auto& transpose_dimensions =
Cast<HloTransposeInstruction>(transpose)->dimensions();
int64_t preceding_size = 1;
for (int64_t i = transpose_dimensions.at(3) - 1; i >= 0; --i) {
preceding_size *= reshape->shape().dimensions(i);
}
int64_t accumulated_size = 1;
for (int64_t size : weights->shape().dimensions()) {
if (accumulated_size < preceding_size) {
accumulated_size *= size;
++feature_dim;
} else {
break;
}
}
if (accumulated_size != preceding_size) {
VLOG(2) << "Something is really wrong here, I give up";
return std::nullopt;
}
VLOG(2) << "Computed output feature dimension: " << feature_dim;
}
VLOG(2) << "Computing NumTrailingZeroOutputFeatures of " << conv->ToString()
<< "\nwith weights " << weights->ToString();
if (Match(weights, m::Pad(m::Op(), m::ConstantEffectiveScalar(0)))) {
const PaddingConfig::PaddingConfigDimension& padding_config =
weights->padding_config().dimensions(feature_dim);
VLOG(2) << "Success: Weights is a pad; padding on output feature dim is "
<< padding_config.edge_padding_high();
return padding_config.edge_padding_high();
} else if (const HloInstruction * pad; Match(
weights, m::Reshape(m::Pad(&pad, m::Op(),
m::ConstantEffectiveScalar(0))))) {
std::optional<int64_t> vect_c_dim = FindKernelVectCDim(conv);
if (!vect_c_dim.has_value()) {
VLOG(2) << "fail: Can't find vect_c dimension in conv.";
return std::nullopt;
}
if (*vect_c_dim != dnums.kernel_input_feature_dimension() + 1) {
VLOG(2) << "fail: vect_c dim is in the wrong place; should be right "
"after kernel input feature dims in conv.";
return std::nullopt;
}
absl::InlinedVector<int64_t, 5> expected_pad_dim_sizes(
weights->shape().dimensions().begin(),
weights->shape().dimensions().end());
expected_pad_dim_sizes[dnums.kernel_input_feature_dimension()] *=
weights->shape().dimensions(*vect_c_dim);
expected_pad_dim_sizes.erase(expected_pad_dim_sizes.begin() + *vect_c_dim);
if (pad->shape().dimensions() != expected_pad_dim_sizes) {
VLOG(2) << "fail: Reshape doesn't simply merge vect_c dimension into "
"input features dim "
<< weights->ToString() << " but expected dims "
<< absl::StrJoin(expected_pad_dim_sizes, ",");
return std::nullopt;
}
int64_t feature_dim_before_reshape = feature_dim;
if (dnums.kernel_output_feature_dimension() >
dnums.kernel_input_feature_dimension()) {
feature_dim_before_reshape--;
}
const PaddingConfig::PaddingConfigDimension& padding_config =
pad->padding_config().dimensions(feature_dim_before_reshape);
VLOG(2) << "Success: Weights is a reshape of a pad; padding on output "
"feature dim is "
<< padding_config.edge_padding_high();
return padding_config.edge_padding_high();
} else if (Match(weights, m::Constant())) {
const Literal& lit = weights->literal();
const auto& dims = weights->shape().dimensions();
absl::InlinedVector<int64_t, 5> multi_index;
for (int64_t dim : dims) {
multi_index.push_back(dim - 1);
}
auto decrement_multi_index = [&] {
for (int i = 0; i < multi_index.size(); ++i) {
if (i != feature_dim) {
int64_t& idx = multi_index[i];
--idx;
if (idx == -1) {
idx = dims[i] - 1;
} else {
return true;
}
}
}
int64_t& idx = multi_index[feature_dim];
--idx;
return idx != -1;
};
do {
if (!lit.IsZero(multi_index)) {
break;
}
} while (decrement_multi_index());
int64_t first_trailing_zero_feature = multi_index[feature_dim] + 1;
if (first_trailing_zero_feature == 0) {
VLOG(2) << "Weights constant is entirely zero.";
} else {
VLOG(2) << "First nonzero index in weights constant is "
<< absl::StrJoin(multi_index, ",");
}
int64_t ret =
std::max<int64_t>(0, weights->shape().dimensions(feature_dim) -
first_trailing_zero_feature);
VLOG(2) << "Success: weights is a constant; num zero trailing output "
"features is "
<< ret;
return ret;
}
return std::nullopt;
}
absl::StatusOr<bool> TrySimplifyPadding(HloInstruction* instr) {
HloInstruction* conv;
HloInstruction* transpose = nullptr;
HloInstruction* reshape = nullptr;
HloInstruction* slice;
HloInstruction* pad;
auto conv_matcher = m::GetTupleElement(
m::CustomCall(&conv).WithPredicate([](const HloInstruction* instr) {
return instr->custom_call_target() == kCudnnConvForwardCallTarget ||
instr->custom_call_target() ==
kCudnnConvBiasActivationForwardCallTarget;
}),
0);
auto pad_matcher = m::Pad(m::Op(), m::ConstantEffectiveScalar(0));
if (!MatchAndLogIfFailed(instr, "conv-slice-pad",
m::Pad(&pad, m::Slice(&slice, conv_matcher),
m::ConstantEffectiveScalar(0)),
VLOG_IS_ON(3), pad_matcher) &&
!MatchAndLogIfFailed(
instr, "conv-reshape-slice-pad",
m::Pad(&pad, m::Slice(&slice, m::Reshape(&reshape, conv_matcher)),
m::ConstantEffectiveScalar(0)),
VLOG_IS_ON(3), pad_matcher) &&
!MatchAndLogIfFailed(
instr, "conv-transpose-reshape-slice-pad",
m::Pad(&pad,
m::Slice(&slice,
m::Reshape(&reshape,
m::Transpose(&transpose, conv_matcher))),
m::ConstantEffectiveScalar(0)),
VLOG_IS_ON(3), pad_matcher)) {
return false;
}
VLOG(2) << "Found pattern to attempt to simplify:\n"
<< "conv: " << conv->ToString()
<< "\ntranspose: "
<< (transpose != nullptr ? transpose->ToString() : "(null)")
<< "\nreshape: "
<< (reshape != nullptr ? reshape->ToString() : "(null)")
<< "\nslice: " << slice->ToString()
<< "\npad: " << pad->ToString();
std::optional<int64_t> num_known_zero_output_features =
NumTrailingZeroOutputFeatures(conv);
if (!num_known_zero_output_features.has_value() ||
*num_known_zero_output_features == 0) {
VLOG(2) << "fail: Didn't find any known-zero output features";
return false;
}
const auto& dnums = conv->convolution_dimension_numbers();
int64_t output_feature_dim;
if (reshape == nullptr) {
CHECK_EQ(transpose, nullptr);
output_feature_dim = dnums.output_feature_dimension();
} else {
std::optional<int64_t> vect_c_dim_before_transpose =
FindOutputVectCDim(conv);
if (!vect_c_dim_before_transpose.has_value()) {
VLOG(2) << "Couldn't find vect_c output dim in conv.";
return false;
}
int64_t feature_dim_after_transpose;
int64_t vect_c_dim_after_transpose;
if (transpose == nullptr) {
feature_dim_after_transpose = dnums.output_feature_dimension();
vect_c_dim_after_transpose = *vect_c_dim_before_transpose;
} else {
const auto& transpose_dims = transpose->dimensions();
feature_dim_after_transpose = std::distance(
transpose->dimensions().begin(),
absl::c_find(transpose_dims, dnums.output_feature_dimension()));
vect_c_dim_after_transpose = std::distance(
transpose->dimensions().begin(),
absl::c_find(transpose_dims, *vect_c_dim_before_transpose));
}
if (vect_c_dim_after_transpose != feature_dim_after_transpose + 1) {
VLOG(2) << "fail: after transpose (if present), vect_c dim must appear "
"immediately after output feature dim: Computed "
"vect_d_dim_after_transpose to be "
<< vect_c_dim_after_transpose;
return false;
}
absl::InlinedVector<int64_t, 5> expected_reshape_dim_sizes(
reshape->operand(0)->shape().dimensions().begin(),
reshape->operand(0)->shape().dimensions().end());
expected_reshape_dim_sizes[feature_dim_after_transpose] *=
expected_reshape_dim_sizes[vect_c_dim_after_transpose];
expected_reshape_dim_sizes.erase(expected_reshape_dim_sizes.begin() +
vect_c_dim_after_transpose);
if (reshape->shape().dimensions() != expected_reshape_dim_sizes) {
VLOG(2) << "fail: Reshape doesn't merge vect_c with feature dimension.";
return false;
}
output_feature_dim = feature_dim_after_transpose;
}
if (!absl::c_all_of(slice->slice_starts(), [](auto v) { return v == 0; }) ||
!absl::c_all_of(slice->slice_strides(), [](auto v) { return v == 1; })) {
VLOG(2) << "fail: Slice doesn't start at the front or has stride != 1.";
return false;
}
for (int64_t dim = 0; dim < slice->slice_limits().size(); dim++) {
if (slice->slice_starts(dim) != 0 || slice->slice_strides(dim) != 1 ||
(dim != output_feature_dim &&
slice->slice_limits(dim) !=
slice->operand(0)->shape().dimensions(dim))) {
VLOG(2) << "fail: Slice removes something other than the features dim.";
return false;
}
}
int64_t num_sliced_from_feature_dim =
slice->operand(0)->shape().dimensions(output_feature_dim) -
slice->slice_limits(output_feature_dim);
if (num_sliced_from_feature_dim > *num_known_zero_output_features) {
VLOG(2) << "fail: Slice removes " << num_sliced_from_feature_dim
<< " features from the conv, but only "
<< *num_known_zero_output_features
<< " features in the conv are known to be zero.";
return false;
}
if (pad->padding_config().dimensions(output_feature_dim).interior_padding() !=
0) {
VLOG(2)
<< "fail: Can't merge slice into pad because pad adds interior padding "
"in feature dimension.";
return false;
}
VLOG(1) << "Eliminating " << num_sliced_from_feature_dim
<< " elements of padding from conv " << conv->name();
PaddingConfig new_padding_config = pad->padding_config();
PaddingConfig::PaddingConfigDimension* new_pad_feature_dim =
new_padding_config.mutable_dimensions(output_feature_dim);
new_pad_feature_dim->set_edge_padding_high(
new_pad_feature_dim->edge_padding_high() - num_sliced_from_feature_dim);
TF_ASSIGN_OR_RETURN(HloInstruction * new_pad,
MakePadHlo(slice->mutable_operand(0),
pad->mutable_operand(1), new_padding_config));
TF_RETURN_IF_ERROR(pad->parent()->ReplaceInstruction(pad, new_pad));
return true;
}
}
absl::StatusOr<bool> CudnnSimplifyPadding::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* comp :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instr : comp->MakeInstructionPostOrder()) {
TF_ASSIGN_OR_RETURN(bool c, TrySimplifyPadding(instr));
changed |= c;
}
}
return changed;
}
} | #include "xla/service/gpu/cudnn_simplify_padding.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/functional/function_ref.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/literal.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/call_inliner.h"
#include "xla/service/gpu/cudnn_pad_for_convolutions.h"
#include "xla/service/gpu/cudnn_vectorize_convolutions.h"
#include "xla/service/hlo_pass_fix.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/service/reshape_mover.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/dnn.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
namespace m = ::xla::match;
class CudnnSimplifyPaddingTest : public HloTestBase {
protected:
absl::StatusOr<bool> RunEndToEnd(std::pair<int, int> compute_capability,
HloModule* module) {
se::CudaComputeCapability cc{compute_capability.first,
compute_capability.second};
TF_RETURN_IF_ERROR(
RunHloPass(CudnnPadForConvolutions(cc), module).status());
TF_RETURN_IF_ERROR(
RunHloPass(CudnnVectorizeConvolutions(
cc, se::dnn::VersionInfo{8, 3, 0}),
module)
.status());
VLOG(1) << "after vectorizing convs:\n" << module->ToString();
TF_RETURN_IF_ERROR(RunHloPass(CallInliner(), module).status());
VLOG(1) << "after inliner:\n" << module->ToString();
TF_RETURN_IF_ERROR(RunHloPass(TupleSimplifier(), module).status());
VLOG(1) << "after tuple simplifier:\n" << module->ToString();
TF_ASSIGN_OR_RETURN(bool changed,
RunHloPass(CudnnSimplifyPadding(), module));
VLOG(1) << "after simplify_padding:\n" << module->ToString();
{
HloPassFix<HloPassPipeline> pipeline("reshape-mover and algsimp");
pipeline.AddPass<ReshapeMover>();
pipeline.AddPass<AlgebraicSimplifier>(AlgebraicSimplifierOptions());
TF_RETURN_IF_ERROR(RunHloPass(pipeline, module).status());
}
VLOG(1) << "after reshape mover + algsimp:\n" << module->ToString();
return changed;
}
absl::StatusOr<bool> RunJustThisPass(HloModule* module) {
TF_ASSIGN_OR_RETURN(bool changed,
RunHloPass(CudnnSimplifyPadding(), module));
VLOG(1) << "after simplify_padding:\n" << module->ToString();
TF_RETURN_IF_ERROR(RunHloPass(HloPassFix<AlgebraicSimplifier>(
AlgebraicSimplifierOptions()),
module)
.status());
return changed;
}
};
void ExpectOnlyPadsOneDim(int64_t dim, int64_t padding_high,
const PaddingConfig& p) {
SCOPED_TRACE(p.DebugString());
for (int i = 0; i < p.dimensions_size(); ++i) {
SCOPED_TRACE(absl::StrCat("dimension ", i));
EXPECT_EQ(p.dimensions(i).edge_padding_low(), 0);
if (i == dim) {
EXPECT_EQ(p.dimensions(i).edge_padding_high(), padding_high);
} else {
EXPECT_EQ(p.dimensions(i).edge_padding_high(), 0);
}
}
}
template <typename NativeT>
void SetConstantValue(
HloInstruction* instr,
absl::FunctionRef<NativeT(absl::Span<const int64_t>, NativeT)> value_fn) {
Literal new_literal = instr->literal().Clone();
new_literal.MutableEachCell<int8_t>(value_fn);
TF_EXPECT_OK(instr->parent()->ReplaceWithNewInstruction(
instr, HloInstruction::CreateConstant(std::move(new_literal))));
}
TEST_F(CudnnSimplifyPaddingTest, EndToEnd) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
conv1 = (s8[10,20,30,190], u8[0]) custom-call(
s8[10,20,30,63] parameter(0), s8[3,5,63,190] parameter(1),
f32[10] parameter(2), s8[10,20,30,190] parameter(3)),
window={size=3x5}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convBiasActivationForward"
conv1_result = get-tuple-element(conv1), index=0
ROOT conv2 = (s8[10,20,30,29], u8[0]) custom-call(
conv1_result, s8[3,5,190,29] parameter(4),
f32[10] parameter(5), s8[10,20,30,29] parameter(6)),
window={size=3x5}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convBiasActivationForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunEndToEnd({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
GmockMatch(m::Tuple(
m::Slice(m::Reshape(m::GetTupleElement(m::CustomCall(
{"__cudnn$convBiasActivationForward"},
m::GetTupleElement(
m::CustomCall({"__cudnn$convBiasActivationForward"}), 0),
m::Op(), m::Op(), m::Op())))),
m::Op())));
}
TEST_F(CudnnSimplifyPaddingTest, EndToEndNCHW) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
conv1 = (s8[1,64,480,400], u8[0]) custom-call(
s8[1,112,480,400] parameter(0), s8[3,3,112,64] parameter(1),
f32[64] parameter(2)),
window={size=3x3}, dim_labels=bf01_01io->bf01,
custom_call_target="__cudnn$convBiasActivationForward"
conv1_result = get-tuple-element(conv1), index=0
convert = f32[1,64,480,400] convert(conv1_result)
constant = f32[] constant(0.349002093)
broadcast = f32[1,64,480,400] broadcast(constant)
ROOT multiply = f32[1,64,480,400] multiply(convert, broadcast)
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunEndToEnd({7, 5}, module.get()));
EXPECT_FALSE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Reshape(m::Multiply())));
}
TEST_F(CudnnSimplifyPaddingTest, PaddedWeights) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights = pad(s8[3,3,10,10] parameter(0), s8[] constant(0)), padding=0_0x0_0x0_0x0_4
conv = (s8[10,10,10,10], u8[0]) custom-call(
s8[10,10,10,10] parameter(1),
weights
), window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,10,10,6] slice(conv_result), slice={[0:10], [0:10], [0:10], [0:6]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* pad = nullptr;
ASSERT_THAT(root,
GmockMatch(m::Pad(&pad, m::GetTupleElement(m::CustomCall(), 0),
m::ConstantScalar(0))));
ExpectOnlyPadsOneDim(3, 1, pad->padding_config());
}
TEST_F(CudnnSimplifyPaddingTest, PaddedWeightsNotPaddedEnough) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights = pad(s8[3,3,10,10] parameter(0), s8[] constant(0)), padding=0_0x0_0x0_0x0_3
conv = (s8[10,10,10,10], u8[0]) custom-call(
s8[10,10,10,10] parameter(1),
weights
), window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,10,10,6] slice(conv_result), slice={[0:10], [0:10], [0:10], [0:6]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, PaddedAndReshapedWeightsNCHW) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights_p = pad(s8[64,60,3,3] parameter(0), s8[] constant(0)), padding=0_0x0_4x0_0x0_0
weights = s8[2,32,64,3,3] reshape(weights_p)
conv = (s8[10,2,32,10,10], u8[0]) custom-call(
s8[10,2,32,10,10] parameter(1),
weights
), window={size=3x3}, dim_labels=bf?01_i?o01->bf?01,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,60,10,10] slice(s8[10,64,10,10] reshape(conv_result)), slice={[0:10], [0:60], [0:10], [0:10]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_5x0_0x0_0
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* pad = nullptr;
ASSERT_THAT(
root, GmockMatch(
m::Pad(&pad, m::Reshape(m::GetTupleElement(m::CustomCall(), 0)),
m::ConstantScalar(0))));
ExpectOnlyPadsOneDim(1, 1, pad->padding_config());
}
TEST_F(CudnnSimplifyPaddingTest, PaddedAndReshapedWeightsNHWC) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights_p = pad(s8[3,3,64,60] parameter(0), s8[] constant(0)), padding=0_0x0_0x0_0x0_4
weights = s8[3,3,2,32,64] reshape(weights_p)
conv = (s8[10,10,10,2,32], u8[0]) custom-call(
s8[10,10,10,2,32] parameter(1),
weights
), window={size=3x3}, dim_labels=b01f?_01i?o->b01f?,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,10,10,60] slice(s8[10,10,10,64] reshape(conv_result)), slice={[0:10], [0:10], [0:10], [0:60]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* pad = nullptr;
ASSERT_THAT(
root, GmockMatch(
m::Pad(&pad, m::Reshape(m::GetTupleElement(m::CustomCall(), 0)),
m::ConstantScalar(0))));
ExpectOnlyPadsOneDim(3, 1, pad->padding_config());
}
TEST_F(CudnnSimplifyPaddingTest, PaddedTransposedAndReshapedOutput) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights_p = pad(s8[64,60,3,3] parameter(0), s8[] constant(0)), padding=0_0x0_4x0_0x0_0
weights = s8[2,32,64,3,3] reshape(weights_p)
conv = (s8[10,2,10,10,32], u8[0]) custom-call(
s8[10,2,10,10,32] parameter(1),
weights
), window={size=3x3}, dim_labels=bf01?_i?o01->bf01?,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
conv_transposed = s8[10,2,32,10,10] transpose(conv_result), dimensions={0,1,4,2,3}
slice = s8[10,60,10,10] slice(s8[10,64,10,10] reshape(conv_transposed)), slice={[0:10], [0:60], [0:10], [0:10]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_6x0_0x0_0
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* pad = nullptr;
ASSERT_THAT(
root,
GmockMatch(m::Pad(
&pad,
m::Reshape(m::Transpose(m::GetTupleElement(m::CustomCall(), 0))),
m::ConstantScalar(0))));
ExpectOnlyPadsOneDim(1, 2, pad->padding_config());
}
TEST_F(CudnnSimplifyPaddingTest, PaddedConstantWeight) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
conv = (s8[10,10,10,10], u8[0]) custom-call(
s8[10,10,10,10] parameter(0),
s8[3,3,10,10] constant({...})
), window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,10,10,6] slice(conv_result), slice={[0:10], [0:10], [0:10], [0:6]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5
}
)")
.value();
{
HloInstruction* weights = nullptr;
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Pad(m::Slice(m::GetTupleElement(m::CustomCall(
m::Op(), m::Constant(&weights)))),
m::Op())));
SetConstantValue<int8_t>(
weights, [](absl::Span<const int64_t> dims, int8_t old_val) -> int8_t {
if (dims[3] < 6) return 1;
return 0;
});
}
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* pad = nullptr;
ASSERT_THAT(root,
GmockMatch(m::Pad(&pad, m::GetTupleElement(m::CustomCall(), 0),
m::ConstantScalar(0))));
ExpectOnlyPadsOneDim(3, 1, pad->padding_config());
}
TEST_F(CudnnSimplifyPaddingTest, PaddedConstantWeightIsNotLargeEnough) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
conv = (s8[10,10,10,10], u8[0]) custom-call(
s8[10,10,10,10] parameter(0),
s8[3,3,10,10] constant({...})
), window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,10,10,6] slice(conv_result), slice={[0:10], [0:10], [0:10], [0:6]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5
}
)")
.value();
{
HloInstruction* weights = nullptr;
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Pad(m::Slice(m::GetTupleElement(m::CustomCall(
m::Op(), m::Constant(&weights)))),
m::Op())));
SetConstantValue<int8_t>(
weights, [](absl::Span<const int64_t> dims, int8_t old_val) -> int8_t {
if (dims[3] < 5 ) return 0;
return 1;
});
}
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, ReshapeDoesntMergeVectCDim) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights_p = pad(s8[64,60,3,3] parameter(0), s8[] constant(0)), padding=0_0x0_4x0_0x0_0
weights = s8[2,64,3,3,32] reshape(weights_p)
conv = (s8[10,2,10,10,32], u8[0]) custom-call(
s8[10,2,10,10,32] parameter(1),
weights_p
), window={size=3x3}, dim_labels=bf01?_io01?->bf01?,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,60,10,10] slice(s8[10,64,10,10] reshape(conv_result)), slice={[0:10], [0:60], [0:10], [0:10]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_6x0_0x0_0
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, TwoVectCDimsInOutput) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights_p = pad(s8[64,60,3,3] parameter(0), s8[] constant(0)), padding=0_0x0_4x0_0x0_0
weights = s8[2,64,3,3,32] reshape(weights_p)
conv = (s8[10,2,10,10,4,8], u8[0]) custom-call(
s8[10,2,10,10,32] parameter(1),
weights
), window={size=3x3}, dim_labels=bf01?_io01?->bf01??,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
conv_transposed = s8[10,2,4,8,10,10] transpose(conv_result), dimensions={0,1,4,5,2,3}
slice = s8[10,60,10,10] slice(s8[10,64,10,10] reshape(conv_transposed)), slice={[0:10], [0:60], [0:10], [0:10]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_6x0_0x0_0
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, TwoVectCDimsInKernel) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights_p = pad(s8[64,60,3,3] parameter(0), s8[] constant(0)), padding=0_0x0_4x0_0x0_0
weights = s8[2,64,3,3,4,8] reshape(weights_p)
conv = (s8[10,2,10,10,32], u8[0]) custom-call(
s8[10,2,10,10,32] parameter(1),
weights
), window={size=3x3}, dim_labels=bf01?_io01??->bf01?,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
conv_transposed = s8[10,2,32,10,10] transpose(conv_result), dimensions={0,1,4,2,3}
slice = s8[10,60,10,10] slice(s8[10,64,10,10] reshape(conv_transposed)), slice={[0:10], [0:60], [0:10], [0:10]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_6x0_0x0_0
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, SliceDoesntStartAtBeginning) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights = pad(s8[3,3,10,10] parameter(0), s8[] constant(0)), padding=0_0x0_0x0_0x0_4
conv = (s8[10,10,10,10], u8[0]) custom-call(
s8[10,10,10,10] parameter(1),
weights
), window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,9,10,6] slice(conv_result), slice={[0:10], [1:10], [0:10], [0:6]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, SliceDoesntStartAtBeginningOfFeatureDim) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights = pad(s8[3,3,10,10] parameter(0), s8[] constant(0)), padding=0_0x0_0x0_0x0_4
conv = (s8[10,10,10,10], u8[0]) custom-call(
s8[10,10,10,10] parameter(1),
weights
), window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,10,10,5] slice(conv_result), slice={[0:10], [0:10], [0:10], [1:6]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, SliceHasStride) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights = pad(s8[3,3,10,10] parameter(0), s8[] constant(0)), padding=0_0x0_0x0_0x0_4
conv = (s8[10,10,10,10], u8[0]) custom-call(
s8[10,10,10,10] parameter(1),
weights
), window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,10,10,3] slice(conv_result), slice={[0:10], [0:10], [0:10], [0:6:2]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, PadAddsInteriorPadding) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights = pad(s8[3,3,10,10] parameter(0), s8[] constant(0)), padding=0_0x0_0x0_0x0_4
conv = (s8[10,10,10,10], u8[0]) custom-call(
s8[10,10,10,10] parameter(1),
weights
), window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,10,10,6] slice(conv_result), slice={[0:10], [0:10], [0:10], [0:6]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5_1
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, SliceMoreElementsThanPad) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights = pad(s8[3,3,10,10] parameter(0), s8[] constant(0)), padding=0_0x0_0x0_0x0_4
conv = (s8[10,10,10,10], u8[0]) custom-call(
s8[10,10,10,10] parameter(1),
weights
), window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,10,10,6] slice(conv_result), slice={[0:10], [0:10], [0:10], [0:6]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_2
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* slice = nullptr;
ASSERT_THAT(root, GmockMatch(m::Slice(
&slice, m::GetTupleElement(m::CustomCall(), 0))));
for (int64_t i = 0; i < slice->shape().dimensions_size(); ++i) {
SCOPED_TRACE(i);
EXPECT_EQ(slice->slice_starts(i), 0);
EXPECT_EQ(slice->slice_strides(i), 1);
if (i != 3) {
EXPECT_EQ(slice->slice_limits(i), 10);
} else {
EXPECT_EQ(slice->slice_limits(i), 8);
}
}
}
TEST_F(CudnnSimplifyPaddingTest, NoChangeOnNonTrivialConstants) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule jit_outer
ENTRY main.26 {
reshape.2 = f32[1,3,3,12]{3,2,1,0} parameter(0)
constant.1 = f32[3,3,1,12]{3,2,1,0} constant({ {
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
}, {
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
}, {
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } } } })
cudnn-conv = (f32[1,5,5,12]{3,2,1,0}, u8[0]{0}) custom-call(reshape.2, constant.1), window={size=3x3 pad=2_2x2_2}, dim_labels=b01f_01io->b01f, feature_group_count=12, custom_call_target="__cudnn$convForward"
get-tuple-element = f32[1,5,5,12]{3,2,1,0} get-tuple-element(cudnn-conv), index=0
slice.2 = f32[1,5,1,12]{3,2,1,0} slice(get-tuple-element), slice={[0:1], [0:5], [0:1], [0:12]}
constant.0 = f32[] constant(0)
ROOT pad.1 = f32[1,5,3,12]{3,2,1,0} pad(slice.2, constant.0), padding=0_0x0_0x2_0x0_0
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, NoChangeOnComplexSlices) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule jit_outer
ENTRY main.26 {
reshape.2 = f32[1,3,3,12]{3,2,1,0} parameter(0)
constant.1 = f32[3,3,1,12]{3,2,1,0} constant({ {
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
}, {
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
}, {
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } } } })
cudnn-conv = (f32[1,5,5,12]{3,2,1,0}, u8[0]{0}) custom-call(reshape.2, constant.1), window={size=3x3 pad=2_2x2_2}, dim_labels=b01f_01io->b01f, feature_group_count=12, custom_call_target="__cudnn$convForward"
get-tuple-element = f32[1,5,5,12]{3,2,1,0} get-tuple-element(cudnn-conv), index=0
slice.2 = f32[1,5,5,4]{3,2,1,0} slice(get-tuple-element), slice={[0:1], [0:5], [0:5], [2:6]}
constant.0 = f32[] constant(0)
ROOT pad.1 = f32[1,5,5,12]{3,2,1,0} pad(slice.2, constant.0), padding=0_0x0_0x0_0x0_8
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, ScanOrderFeatureDimLast) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule jit_outer
ENTRY main.26 {
reshape.2 = f32[1,3,3,12]{3,2,1,0} parameter(0)
constant.1 = f32[3,3,1,12]{3,2,1,0} constant({ {
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
}, {
{ { 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0 } },
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
}, {
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } } } })
cudnn-conv = (f32[1,5,5,12]{3,2,1,0}, u8[0]{0}) custom-call(reshape.2, constant.1), window={size=3x3 pad=2_2x2_2}, dim_labels=b01f_01io->b01f, feature_group_count=12, custom_call_target="__cudnn$convForward"
get-tuple-element = f32[1,5,5,12]{3,2,1,0} get-tuple-element(cudnn-conv), index=0
slice.2 = f32[1,5,5,6]{3,2,1,0} slice(get-tuple-element), slice={[0:1], [0:5], [0:5], [0:6]}
constant.0 = f32[] constant(0)
ROOT pad.1 = f32[1,5,5,12]{3,2,1,0} pad(slice.2, constant.0), padding=0_0x0_0x0_0x0_6
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, Int8FilterReorderedOutputFirst) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
conv.1 = (s8[1,63,80,80], u8[0]) custom-call(
s8[1,112,80,80] parameter(0), s8[63,112,3,3] parameter(1)),
window={size=3x3}, dim_labels=bf01_oi01->bf01,
custom_call_target="__cudnn$convForward"
gte.1 = s8[1,63,80,80] get-tuple-element(conv.1), index=0
const.0 = s8[] constant(0)
ROOT pad.1 = s8[1,64,80,80] pad(gte.1, const.0), padding=0_0x0_1x0_0x0_0
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunEndToEnd({7, 5}, module.get()));
EXPECT_TRUE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, Int8FilterReorderedOutputLast) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
conv.1 = (s8[1,63,80,80], u8[0]) custom-call(
s8[1,112,80,80] parameter(0), s8[3,3,112,63] parameter(1)),
window={size=3x3}, dim_labels=bf01_01io->bf01,
custom_call_target="__cudnn$convForward"
gte.1 = s8[1,63,80,80] get-tuple-element(conv.1), index=0
const.0 = s8[] constant(0)
ROOT pad.1 = s8[1,64,80,80] pad(gte.1, const.0), padding=0_0x0_1x0_0x0_0
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunEndToEnd({7, 5}, module.get()));
EXPECT_TRUE(changed);
}
}
} |
2,068 | cpp | tensorflow/tensorflow | gpu_conv_padding_legalization | null | null | #ifndef XLA_SERVICE_GPU_GPU_CONV_PADDING_LEGALIZATION_H_
#define XLA_SERVICE_GPU_GPU_CONV_PADDING_LEGALIZATION_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class GpuConvPaddingLegalization : public HloModulePass {
public:
absl::string_view name() const override {
return "gpu-conv-padding-legalization";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
absl::StatusOr<bool> RunOnComputation(HloComputation* computation);
bool CanonicalizeForwardConvolution(HloInstruction* conv);
bool CanonicalizeBackwardFilterConvolution(HloInstruction* backward_conv);
bool CanonicalizeBackwardInputConvolution(HloInstruction* backward_conv);
};
}
}
#endif
#include "xla/service/gpu/gpu_conv_padding_legalization.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/literal_util.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
bool IsForwardConvolutionCanonical(const HloInstruction& conv) {
CHECK(conv.custom_call_target() == kCudnnConvForwardCallTarget ||
conv.custom_call_target() ==
kCudnnConvBiasActivationForwardCallTarget ||
conv.custom_call_target() == kCudnnConvForwardGraphCallTarget);
return window_util::HasSymmetricPadding(conv.window()) &&
!window_util::HasNegativePadding(conv.window()) &&
!window_util::HasDilation(conv.window());
}
HloInstruction* MaybePaddedAndSlicedInput(
Window* conv_window, const ConvolutionDimensionNumbers& conv_dnums,
HloInstruction* input) {
HloComputation* computation = input->parent();
if (!window_util::HasSymmetricPadding(*conv_window) ||
window_util::HasBaseDilation(*conv_window)) {
PaddingConfig padding_config =
MakeNoPaddingConfig(input->shape().dimensions_size());
for (size_t i = 0; i < conv_dnums.input_spatial_dimensions().size(); ++i) {
int64_t dim = conv_dnums.input_spatial_dimensions(i);
if (conv_window->dimensions(i).padding_low() > 0) {
padding_config.mutable_dimensions(dim)->set_edge_padding_low(
conv_window->dimensions(i).padding_low());
conv_window->mutable_dimensions(i)->set_padding_low(0);
}
if (conv_window->dimensions(i).padding_high() > 0) {
padding_config.mutable_dimensions(dim)->set_edge_padding_high(
conv_window->dimensions(i).padding_high());
conv_window->mutable_dimensions(i)->set_padding_high(0);
}
if (conv_window->dimensions(i).base_dilation() != 1) {
padding_config.mutable_dimensions(dim)->set_interior_padding(
conv_window->dimensions(i).base_dilation() - 1);
conv_window->mutable_dimensions(i)->set_base_dilation(1);
}
}
PrimitiveType element_type = input->shape().element_type();
HloInstruction* padding = computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(element_type)));
input =
MakePadHlo(input, padding, padding_config, &input->metadata()).value();
}
if (window_util::HasNegativePadding(*conv_window)) {
std::vector<int64_t> start_indices(input->shape().dimensions_size(), 0);
std::vector<int64_t> limit_indices(input->shape().dimensions().begin(),
input->shape().dimensions().end());
std::vector<int64_t> strides(input->shape().dimensions_size(), 1);
for (size_t i = 0; i < conv_dnums.input_spatial_dimensions().size(); ++i) {
int64_t dim = conv_dnums.input_spatial_dimensions(i);
if (conv_window->dimensions(i).padding_low() < 0) {
start_indices[dim] += -conv_window->dimensions(i).padding_low();
conv_window->mutable_dimensions(i)->set_padding_low(0);
}
if (conv_window->dimensions(i).padding_high() < 0) {
limit_indices[dim] -= -conv_window->dimensions(i).padding_high();
conv_window->mutable_dimensions(i)->set_padding_high(0);
}
}
input = MakeSliceHlo(input, start_indices, limit_indices, strides).value();
}
return input;
}
HloInstruction* MaybePaddedKernel(const Window& conv_window,
const ConvolutionDimensionNumbers& conv_dnums,
HloInstruction* kernel) {
if (!window_util::HasWindowDilation(conv_window)) {
return kernel;
}
PaddingConfig padding_config;
for (size_t i = 0; i < kernel->shape().dimensions_size(); ++i) {
padding_config.add_dimensions();
}
for (size_t i = 0; i < conv_dnums.kernel_spatial_dimensions().size(); ++i) {
int64_t dim = conv_dnums.kernel_spatial_dimensions(i);
padding_config.mutable_dimensions(dim)->set_interior_padding(
conv_window.dimensions(i).window_dilation() - 1);
}
HloComputation* computation = kernel->parent();
PrimitiveType element_type = kernel->shape().element_type();
HloInstruction* padding = computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(element_type)));
return MakePadHlo(kernel, padding, padding_config, &kernel->metadata())
.value();
}
}
bool GpuConvPaddingLegalization::CanonicalizeForwardConvolution(
HloInstruction* conv) {
if (IsForwardConvolutionCanonical(*conv)) {
return false;
}
Window new_conv_window = conv->window();
HloInstruction* new_input = MaybePaddedAndSlicedInput(
&new_conv_window, conv->convolution_dimension_numbers(),
conv->mutable_operand(0));
HloInstruction* new_kernel =
MaybePaddedKernel(new_conv_window, conv->convolution_dimension_numbers(),
conv->mutable_operand(1));
for (size_t i = 0; i < new_conv_window.dimensions_size(); ++i) {
WindowDimension* dim = new_conv_window.mutable_dimensions(i);
dim->set_size(new_kernel->shape().dimensions(
conv->convolution_dimension_numbers().kernel_spatial_dimensions(i)));
dim->set_window_dilation(1);
}
VLOG(1) << "Canonicalizing forward conv";
std::vector<HloInstruction*> operands(conv->operands().begin(),
conv->operands().end());
operands[0] = new_input;
operands[1] = new_kernel;
auto new_conv = conv->parent()->AddInstruction(
conv->CloneWithNewOperands(conv->shape(), operands));
new_conv->set_window(new_conv_window);
VLOG(1) << "Replacing:\n " << conv->ToString() << "\nwith:\n "
<< new_conv->ToString();
TF_CHECK_OK(conv->parent()->ReplaceInstruction(conv, new_conv));
return true;
}
namespace {
void IncreasePaddingLowBy(int64_t delta, WindowDimension* window_dim) {
window_dim->set_padding_low(window_dim->padding_low() + delta);
}
void IncreasePaddingHighBy(int64_t delta, WindowDimension* window_dim) {
window_dim->set_padding_high(window_dim->padding_high() + delta);
}
}
bool GpuConvPaddingLegalization::CanonicalizeBackwardFilterConvolution(
HloInstruction* backward_conv) {
CHECK_EQ(backward_conv->custom_call_target(),
kCudnnConvBackwardFilterCallTarget);
if (window_util::HasSymmetricPadding(backward_conv->window())) {
return false;
}
HloInstruction* input = backward_conv->mutable_operand(0);
Window new_backward_conv_window = backward_conv->window();
PaddingConfig input_padding_config =
MakeNoPaddingConfig(input->shape().rank());
ConvolutionDimensionNumbers backward_conv_dnums =
backward_conv->convolution_dimension_numbers();
for (size_t i = 0; i < backward_conv->window().dimensions_size(); ++i) {
int64_t padding_low = backward_conv->window().dimensions(i).padding_low();
int64_t padding_high = backward_conv->window().dimensions(i).padding_high();
if (padding_low < 0 || padding_high < 0) {
return false;
}
int64_t new_conv_padding = std::min(padding_low, padding_high);
int64_t dim = backward_conv_dnums.input_spatial_dimensions(i);
input_padding_config.mutable_dimensions(dim)->set_edge_padding_low(
padding_low - new_conv_padding);
input_padding_config.mutable_dimensions(dim)->set_edge_padding_high(
padding_high - new_conv_padding);
auto* new_dim = new_backward_conv_window.mutable_dimensions(i);
new_dim->set_padding_low(new_conv_padding);
new_dim->set_padding_high(new_conv_padding);
}
HloComputation* computation = backward_conv->parent();
HloInstruction* output = backward_conv->mutable_operand(1);
HloInstruction* padding =
computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(input->shape().element_type())));
HloInstruction* padded_input =
MakePadHlo(input, padding, input_padding_config).value();
HloInstruction* new_backward_conv =
computation->AddInstruction(backward_conv->CloneWithNewOperands(
backward_conv->shape(), {padded_input, output}));
new_backward_conv->set_window(new_backward_conv_window);
VLOG(1) << "Canonicalizing backward filter conv";
VLOG(1) << "Replacing:\n " << backward_conv->ToString() << "\nwith:\n "
<< new_backward_conv->ToString();
TF_CHECK_OK(
computation->ReplaceInstruction(backward_conv, new_backward_conv));
return true;
}
bool GpuConvPaddingLegalization::CanonicalizeBackwardInputConvolution(
HloInstruction* backward_conv) {
if (window_util::HasSymmetricPadding(backward_conv->window())) {
return false;
}
Window new_backward_conv_window = backward_conv->window();
ConvolutionDimensionNumbers backward_conv_dnums =
backward_conv->convolution_dimension_numbers();
Shape backward_conv_shape = backward_conv->shape().tuple_shapes(0);
Shape new_backward_conv_shape = backward_conv_shape;
for (size_t i = 0; i < backward_conv->window().dimensions_size(); ++i) {
int64_t padding_low = backward_conv->window().dimensions(i).padding_low();
int64_t padding_high = backward_conv->window().dimensions(i).padding_high();
if (padding_low < 0 || padding_high < 0) {
return false;
}
if (padding_low > padding_high) {
IncreasePaddingLowBy(padding_high - padding_low,
new_backward_conv_window.mutable_dimensions(i));
} else if (padding_low < padding_high) {
IncreasePaddingHighBy(padding_low - padding_high,
new_backward_conv_window.mutable_dimensions(i));
}
int64_t dim = backward_conv_dnums.input_spatial_dimensions(i);
new_backward_conv_shape.set_dimensions(
dim, new_backward_conv_shape.dimensions(dim) +
std::abs(padding_low - padding_high));
}
HloComputation* computation = backward_conv->parent();
HloInstruction* output = backward_conv->mutable_operand(0);
HloInstruction* filter = backward_conv->mutable_operand(1);
HloInstruction* new_backward_conv_call =
computation->AddInstruction(backward_conv->CloneWithNewOperands(
ShapeUtil::MakeTupleShape(
{new_backward_conv_shape, ShapeUtil::MakeShape(U8, {0})}),
{output, filter}));
new_backward_conv_call->set_window(new_backward_conv_window);
HloInstruction* new_backward_conv =
computation->AddInstruction(HloInstruction::CreateGetTupleElement(
new_backward_conv_shape, new_backward_conv_call, 0));
HloInstruction* new_backward_conv_scratch =
computation->AddInstruction(HloInstruction::CreateGetTupleElement(
new_backward_conv_call->shape().tuple_shapes(1),
new_backward_conv_call, 1));
std::vector<int64_t> start_indices(
new_backward_conv->shape().dimensions_size(), 0LL);
std::vector<int64_t> limit_indices(
new_backward_conv->shape().dimensions().begin(),
new_backward_conv->shape().dimensions().end());
std::vector<int64_t> strides(new_backward_conv->shape().dimensions_size(),
1LL);
for (size_t i = 0; i < backward_conv->window().dimensions_size(); ++i) {
int64_t padding_low = backward_conv->window().dimensions(i).padding_low();
int64_t padding_high = backward_conv->window().dimensions(i).padding_high();
int64_t dim = backward_conv_dnums.input_spatial_dimensions(i);
if (padding_low > padding_high) {
start_indices[dim] += padding_low - padding_high;
} else if (padding_low < padding_high) {
limit_indices[dim] -= padding_high - padding_low;
}
}
Shape slice_shape =
ShapeInference::InferSliceShape(new_backward_conv->shape(), start_indices,
limit_indices, strides)
.value();
CHECK(ShapeUtil::Compatible(slice_shape, backward_conv_shape))
<< ShapeUtil::HumanString(slice_shape) << " vs "
<< ShapeUtil::HumanString(backward_conv_shape);
HloInstruction* slice = computation->AddInstruction(
HloInstruction::CreateSlice(backward_conv_shape, new_backward_conv,
start_indices, limit_indices, strides));
HloInstruction* new_tuple = computation->AddInstruction(
HloInstruction::CreateTuple({slice, new_backward_conv_scratch}));
VLOG(1) << "Canonicalizing backward input conv";
VLOG(1) << "Replacing:\n " << backward_conv->ToString() << "\nwith:\n "
<< new_tuple->ToString();
TF_CHECK_OK(computation->ReplaceInstruction(backward_conv, new_tuple));
return true;
}
absl::StatusOr<bool> GpuConvPaddingLegalization::RunOnComputation(
HloComputation* computation) {
bool changed = false;
std::vector<HloCustomCallInstruction*> convs;
for (auto* instr : computation->instructions()) {
if (IsCustomCallToDnnConvolution(*instr)) {
convs.push_back(Cast<HloCustomCallInstruction>(instr));
}
}
for (HloCustomCallInstruction* instruction : convs) {
TF_ASSIGN_OR_RETURN(auto kind, GetCudnnConvKind(instruction));
changed |= [&] {
switch (kind) {
case CudnnConvKind::kForward:
case CudnnConvKind::kForwardActivation:
case CudnnConvKind::kForwardGraph:
return CanonicalizeForwardConvolution(instruction);
case CudnnConvKind::kBackwardInput:
return CanonicalizeBackwardInputConvolution(instruction);
case CudnnConvKind::kBackwardFilter:
return CanonicalizeBackwardFilterConvolution(instruction);
}
}();
}
return changed;
}
absl::StatusOr<bool> GpuConvPaddingLegalization::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
TF_ASSIGN_OR_RETURN(bool result, RunOnComputation(computation));
changed |= result;
}
return changed;
}
}
} | #include "xla/service/gpu/gpu_conv_padding_legalization.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
using GpuConvPaddingLegalizationTest = HloTestBase;
TEST_F(GpuConvPaddingLegalizationTest, BackwardInputConvolve) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule convolution_module
ENTRY %convolution (operand f64[2,2,2,3]{3,2,1,0}) -> (f64[2,2,4,4]{3,2,1,0}, u8[0]) {
%operand = f64[2,2,2,3]{3,2,1,0} parameter(0)
%kernel = f64[2,3,2,3]{3,2,1,0} constant(
{
{
{
{ 0.29629629629629628, 0.30246913580246915, 0.30864197530864196 },
{ 0.31481481481481483, 0.32098765432098764, 0.3271604938271605 }
},
{
{ 0.25925925925925924, 0.26543209876543211, 0.27160493827160492 },
{ 0.27777777777777779, 0.2839506172839506, 0.29012345679012347 }
},
{
{ 0.22222222222222221, 0.22839506172839505, 0.23456790123456789 },
{ 0.24074074074074073, 0.24691358024691357, 0.25308641975308643 }
}
},
{
{
{ 0.18518518518518517, 0.19135802469135801, 0.19753086419753085 },
{ 0.20370370370370369, 0.20987654320987653, 0.21604938271604937 }
},
{
{ 0.14814814814814814, 0.15432098765432098, 0.16049382716049382 },
{ 0.16666666666666666, 0.1728395061728395, 0.17901234567901234 }
},
{
{ 0.1111111111111111, 0.11728395061728394, 0.12345679012345678 },
{ 0.12962962962962962, 0.13580246913580246, 0.1419753086419753 }
}
}
})
%reverse = f64[2,3,2,3]{3,2,1,0} reverse(%kernel), dimensions={0,1}
ROOT %custom-call = (f64[2,2,4,4]{3,2,1,0}, u8[0]{0}) custom-call(f64[2,2,2,3]{3,2,1,0} %operand, f64[2,3,2,3]{3,2,1,0} %reverse), window={size=2x3 stride=2x2 pad=0_0x0_1}, dim_labels=bf01_01io->b01f, custom_call_target="__cudnn$convBackwardInput", backend_config="{\"algorithm\":\"0\",\"tensor_ops_enabled\":false,\"conv_result_scale\":1,\"activation_mode\":\"0\",\"side_input_scale\":0}"
}
)")
.value();
ASSERT_TRUE(GpuConvPaddingLegalization().Run(module.get()).value());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Tuple(
m::Slice(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardInputCallTarget},
m::Op(), m::Reverse(m::Constant())),
0)),
m::GetTupleElement())));
auto slice = root->operand(0);
Shape expected_slice_shape = ShapeUtil::MakeShape(F64, {2, 2, 4, 4});
EXPECT_TRUE(ShapeUtil::Equal(slice->shape(), expected_slice_shape));
auto conv = slice->operand(0);
Shape expected_conv_shape = ShapeUtil::MakeShape(F64, {2, 2, 4, 5});
EXPECT_TRUE(ShapeUtil::Equal(conv->shape(), expected_conv_shape));
}
}
}
} |
2,069 | cpp | tensorflow/tensorflow | horizontal_loop_fusion | third_party/xla/xla/service/gpu/transforms/horizontal_loop_fusion.cc | third_party/xla/xla/service/gpu/transforms/horizontal_loop_fusion_test.cc | #ifndef XLA_SERVICE_GPU_HORIZONTAL_LOOP_FUSION_H_
#define XLA_SERVICE_GPU_HORIZONTAL_LOOP_FUSION_H_
#include <string>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class GpuHorizontalLoopFusion : public HloModulePass {
public:
GpuHorizontalLoopFusion() = default;
explicit GpuHorizontalLoopFusion(absl::string_view prefix)
: prefix_(prefix) {}
absl::string_view name() const override {
return "gpu_horizontal_loop_fusion";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
absl::StatusOr<bool> RunOnComputation(HloComputation*);
std::string prefix_;
};
}
}
#endif
#include "xla/service/gpu/horizontal_loop_fusion.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/sub_byte_normalization.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
PrimitiveType GetUniqueOutputTypeOfFusible(const HloInstruction& fusible) {
auto outputs = GetOutputsOfFusible(fusible);
CHECK(!outputs.empty());
PrimitiveType first_output_type = outputs[0]->shape().element_type();
for (size_t i = 1; i < outputs.size(); ++i) {
PrimitiveType cur_output_type = outputs[i]->shape().element_type();
CHECK(first_output_type == cur_output_type)
<< "Output types are expected to be unique, but see "
<< PrimitiveType_Name(first_output_type) << " and "
<< PrimitiveType_Name(cur_output_type);
}
return first_output_type;
}
class HorizontalLoopFusionImpl {
public:
explicit HorizontalLoopFusionImpl(HloComputation* computation,
absl::string_view prefix)
: computation_(computation), prefix_(prefix) {}
~HorizontalLoopFusionImpl() = default;
absl::StatusOr<bool> Run();
private:
absl::Status Fuse(absl::Span<HloInstruction*> fused_fusion_instrs,
bool sliced_input_fusion,
std::vector<HloInstruction*>& to_fuse_candidates);
absl::Status CreateFusedComputation(
absl::Span<HloInstruction*> fused_fusion_instrs,
std::unique_ptr<HloComputation>* uniq_computation,
std::vector<HloInstruction*>* bound_operands, bool sliced_input_fusion);
absl::StatusOr<bool> FuseConsumerOperands(
HloInstruction* consumer, bool sliced_input_fusion,
std::vector<HloInstruction*>& to_fuse_candidates);
class FusionCandidates {
public:
explicit FusionCandidates(HloInstruction* consumer,
bool sliced_input_fusion)
: fusible_instrs_(),
pos_(0),
sliced_input_fusion_(sliced_input_fusion) {
Initialize(consumer);
}
absl::Span<HloInstruction*> GetNextSpanOfFusions();
private:
void Initialize(HloInstruction*);
std::vector<HloInstruction*> fusible_instrs_;
size_t pos_;
bool sliced_input_fusion_;
};
HloComputation* computation_;
std::string prefix_;
};
bool IsFusibleCandidate(const HloInstruction& instr) {
if (!instr.control_successors().empty() ||
!instr.control_predecessors().empty()) {
return false;
}
if (IsNestableVariadicReduction(instr)) {
return false;
}
if (instr.IsElementwise() && instr.operand_count() > 0) {
return true;
}
if (!instr.IsLoopFusion()) {
return false;
}
auto outputs = GetOutputsOfFusible(instr);
CHECK(!outputs.empty());
const HloInstruction* first_output = outputs[0];
for (size_t i = 1; i < outputs.size(); ++i) {
if (first_output->shape().element_type() !=
outputs[i]->shape().element_type()) {
return false;
}
}
return true;
}
bool IsProfitableFusionCandidate(const HloInstruction& instr,
bool sliced_input_fusion) {
const int64_t kShapeThreshold =
sliced_input_fusion ? 128 * 2048 : 8192 * 8192;
const int64_t kInstrCountThreshold = sliced_input_fusion ? 30 : 128;
const HloInstruction* root = (instr.opcode() == HloOpcode::kFusion)
? instr.fused_expression_root()
: &instr;
if (root->opcode() == HloOpcode::kTuple) {
Shape shape = root->operand(0)->shape();
if (ShapeUtil::ElementsIn(shape) > kShapeThreshold) {
VLOG(2) << "Profitable check failed due to element count with "
"sliced_input_fusion="
<< sliced_input_fusion;
return false;
}
} else {
Shape shape = root->shape();
if (ShapeUtil::ElementsIn(shape) > kShapeThreshold) {
VLOG(2) << "Profiltable check failed due to element size with "
"sliced_input_fusion="
<< sliced_input_fusion;
return false;
}
}
if (instr.opcode() == HloOpcode::kFusion &&
instr.fused_instruction_count() > kInstrCountThreshold) {
return false;
}
return true;
}
bool HasOnlyRowMajorLayout(const HloInstruction& instr) {
if (instr.opcode() != HloOpcode::kFusion) {
return LayoutUtil::IsMonotonicWithDim0Major(instr.shape().layout());
}
auto fused_instrs = instr.fused_instructions_computation()->instructions();
for (HloInstruction* i : fused_instrs) {
if (!LayoutUtil::IsDenseArray(i->shape())) {
continue;
}
if (!LayoutUtil::IsMonotonicWithDim0Major(i->shape().layout())) {
return false;
}
}
return true;
}
bool AnyOpndIsParamSharedAmongFusions(
const HloInstruction* instr,
const absl::flat_hash_set<HloInstruction*>& fusion_instrs) {
return absl::c_any_of(instr->operands(), [&](const HloInstruction* opnd) {
return opnd->opcode() == HloOpcode::kParameter &&
absl::c_any_of(opnd->users(), [&](const HloInstruction* user) {
return user != instr && fusion_instrs.contains(user);
});
});
}
void HorizontalLoopFusionImpl::FusionCandidates::Initialize(
HloInstruction* consumer) {
absl::flat_hash_set<HloInstruction*> fusible_candidates;
std::vector<HloInstruction*> ordered_fusible_candidates;
for (HloInstruction* opnd : consumer->operands()) {
HloInstruction* predecessor = opnd->LatestNonGteAncestor();
if (IsFusibleCandidate(*predecessor)) {
if (fusible_candidates.insert(predecessor).second) {
ordered_fusible_candidates.push_back(predecessor);
}
}
}
for (HloInstruction* instr : ordered_fusible_candidates) {
if (!IsConsumerTheOnlyNonRootUser(*instr, *consumer)) {
VLOG(2) << "sliced_input_fusion=" << sliced_input_fusion_
<< " rejects maybe illegal instr " << instr->ToString()
<< "; including it may create cycles in HLO.";
continue;
} else if (!IsProfitableFusionCandidate(*instr, sliced_input_fusion_)) {
VLOG(2) << "sliced_input_fusion=" << sliced_input_fusion_
<< " rejects may-not-be profitable fusion instr"
<< instr->ToString();
continue;
} else if (!HasOnlyRowMajorLayout(*instr)) {
VLOG(2) << "sliced_input_fusion=" << sliced_input_fusion_
<< " rejects non-row-major fusion instr " << instr->ToString();
continue;
} else if (AnyOpndIsParamSharedAmongFusions(instr, fusible_candidates)) {
VLOG(2) << "sliced_input_fusion=" << sliced_input_fusion_
<< " rejects the fusion instr because it shares parameter with"
<< " other fusion candidates, instr: " << instr->ToString();
continue;
} else {
VLOG(2) << "Find a fusion candidate " << instr->ToString();
fusible_instrs_.push_back(instr);
}
}
std::stable_sort(
fusible_instrs_.begin(), fusible_instrs_.end(),
[&](const HloInstruction* a, const HloInstruction* b) {
if (GetUniqueOutputTypeOfFusible(*a) !=
GetUniqueOutputTypeOfFusible(*b)) {
return GetUniqueOutputTypeOfFusible(*a) <
GetUniqueOutputTypeOfFusible(*b);
} else if (GetOutputSizeOfFusible(*a) != GetOutputSizeOfFusible(*b)) {
return GetOutputSizeOfFusible(*a) < GetOutputSizeOfFusible(*b);
} else if (GetInstrCountOfFusible(*a) != GetInstrCountOfFusible(*b)) {
return GetInstrCountOfFusible(*a) < GetInstrCountOfFusible(*b);
} else {
return ShapeUtil::ElementsIn(GetOutputsOfFusible(*a)[0]->shape()) <
ShapeUtil::ElementsIn(GetOutputsOfFusible(*b)[0]->shape());
}
});
}
absl::Span<HloInstruction*>
HorizontalLoopFusionImpl::FusionCandidates::GetNextSpanOfFusions() {
if (pos_ >= fusible_instrs_.size()) {
return absl::Span<HloInstruction*>();
}
const auto kMaxFusionBatchSize = [&]() -> int64_t {
if (sliced_input_fusion_) {
return 32;
} else {
if (fusible_instrs_[pos_]->opcode() == HloOpcode::kFusion) {
return 32;
} else {
return 64;
}
}
}();
size_t left = pos_;
size_t right = pos_ + 1;
size_t first_output_size = GetOutputSizeOfFusible(*fusible_instrs_[left]);
PrimitiveType first_output_type =
GetUniqueOutputTypeOfFusible(*fusible_instrs_[left]);
constexpr int64_t kMaxCudaParamSize = 4000;
size_t accum_io_size = 0;
size_t accum_num_outputs = 0;
for (; right < fusible_instrs_.size(); ++right) {
PrimitiveType cur_output_type =
GetUniqueOutputTypeOfFusible(*fusible_instrs_[right]);
if (first_output_type != cur_output_type) {
break;
}
if (first_output_size != GetOutputSizeOfFusible(*fusible_instrs_[right])) {
break;
}
if (GetInstrCountOfFusible(*fusible_instrs_[left]) !=
GetInstrCountOfFusible(*fusible_instrs_[right])) {
break;
}
if (!sliced_input_fusion_ &&
!ShapeUtil::EqualIgnoringElementType(
GetOutputsOfFusible(*fusible_instrs_[left])[0]->shape(),
GetOutputsOfFusible(*fusible_instrs_[right])[0]->shape())) {
break;
}
size_t num_outputs = GetOutputSizeOfFusible(*fusible_instrs_[right]);
accum_num_outputs += num_outputs;
if (accum_num_outputs >= kMaxFusionBatchSize) {
break;
}
accum_io_size += fusible_instrs_.at(right)->operand_count() + num_outputs;
if (accum_io_size * 8 >= kMaxCudaParamSize) {
break;
}
}
VLOG(2) << "horizontal fuse get instruction span with " << (right - left)
<< " instructions for sliced_input_fusion=" << sliced_input_fusion_
<< " fusion";
pos_ = right;
return absl::MakeSpan(fusible_instrs_).subspan(left, right - left);
}
absl::StatusOr<bool> HorizontalLoopFusionImpl::FuseConsumerOperands(
HloInstruction* consumer, bool sliced_input_fusion,
std::vector<HloInstruction*>& to_fuse_candidates) {
bool changed = false;
FusionCandidates loop_fusion_candidates(consumer, sliced_input_fusion);
while (true) {
auto fusibles = loop_fusion_candidates.GetNextSpanOfFusions();
if (fusibles.empty()) {
break;
} else if (fusibles.size() == 1) {
continue;
}
changed = true;
std::vector<HloInstruction*> fusion_instrs;
for (HloInstruction* instr : fusibles) {
if (instr->opcode() == HloOpcode::kFusion) {
fusion_instrs.push_back(instr);
} else {
TF_ASSIGN_OR_RETURN(
HloInstruction * fusion_instr,
MakeFusionInstruction(instr, HloInstruction::FusionKind::kLoop));
fusion_instrs.push_back(fusion_instr);
}
}
TF_RETURN_IF_ERROR(Fuse(absl::MakeSpan(fusion_instrs), sliced_input_fusion,
to_fuse_candidates));
}
return changed;
}
absl::Status HorizontalLoopFusionImpl::CreateFusedComputation(
absl::Span<HloInstruction*> fused_fusion_instrs,
std::unique_ptr<HloComputation>* uniq_computation,
std::vector<HloInstruction*>* bound_operands, bool sliced_input_fusion) {
HloComputation::Builder b(prefix_ + "horizontally_fused_computation");
size_t fused_comp_param_id = 0;
for (size_t i = 0; i < fused_fusion_instrs.size(); ++i) {
auto old_params = fused_fusion_instrs[i]->fused_parameters();
for (size_t j = 0; j < old_params.size(); ++j) {
HloInstruction* bound_opnd = fused_fusion_instrs[i]->mutable_operand(j);
b.AddInstruction(HloInstruction::CreateParameter(
fused_comp_param_id++, bound_opnd->shape(),
absl::StrCat("param_", i, "_", j)));
bound_operands->push_back(bound_opnd);
}
}
HloInstruction* dummy_root = b.AddInstruction(
HloInstruction::CreateTuple(std::vector<HloInstruction*>{}));
*uniq_computation = b.Build(dummy_root);
HloComputation* comp = uniq_computation->get();
absl::flat_hash_map<const HloInstruction*, HloInstruction*> clone_map;
size_t new_param_id = 0;
for (size_t i = 0; i < fused_fusion_instrs.size(); ++i) {
auto old_params = fused_fusion_instrs[i]->fused_parameters();
for (size_t j = 0; j < old_params.size(); ++j) {
HloInstruction* old_param = old_params[j];
HloInstruction* new_param = comp->parameter_instruction(new_param_id++);
clone_map.insert({old_param, new_param});
}
}
const OpMetadata* metadata = nullptr;
for (size_t i = 0; i < fused_fusion_instrs.size(); ++i) {
auto def_to_use_order = fused_fusion_instrs[i]
->fused_instructions_computation()
->MakeInstructionPostOrder();
for (HloInstruction* old_instr : def_to_use_order) {
if (old_instr->opcode() == HloOpcode::kParameter ||
(sliced_input_fusion && old_instr->opcode() == HloOpcode::kTuple &&
old_instr == fused_fusion_instrs[i]->fused_expression_root())) {
continue;
}
std::vector<HloInstruction*> new_opnds;
const auto& old_opnds = old_instr->operands();
new_opnds.reserve(old_opnds.size());
for (HloInstruction* old_opnd : old_opnds) {
CHECK(clone_map.find(old_opnd) != clone_map.end());
new_opnds.push_back(clone_map[old_opnd]);
}
HloInstruction* new_instr = comp->AddInstruction(
old_instr->CloneWithNewOperands(old_instr->shape(), new_opnds));
clone_map.insert({old_instr, new_instr});
metadata = &old_instr->metadata();
}
}
size_t fused_instr_output_size =
GetOutputSizeOfFusible(*fused_fusion_instrs[0]);
if (sliced_input_fusion) {
std::vector<HloInstruction*> concated_outputs;
for (size_t i = 0; i < fused_instr_output_size; ++i) {
std::vector<HloInstruction*> instr_outputs(fused_fusion_instrs.size());
for (size_t j = 0; j < fused_fusion_instrs.size(); ++j) {
const HloInstruction* old_output =
GetOutputsOfFusible(*fused_fusion_instrs[j])[i];
HloInstruction* new_output = clone_map[old_output];
if (new_output->shape().dimensions_size() == 1) {
instr_outputs[j] = new_output;
} else {
Shape new_shape = ShapeUtil::MakeShapeWithDenseLayout(
new_output->shape().element_type(),
{ShapeUtil::ElementsIn(new_output->shape())},
std::vector<int64_t>(1, 0));
TF_ASSIGN_OR_RETURN(instr_outputs[j],
MakeReshapeHlo(new_shape, new_output));
}
}
TF_ASSIGN_OR_RETURN(HloInstruction * concated_output,
MakeConcatHlo(instr_outputs, 0));
concated_outputs.push_back(concated_output);
}
std::vector<HloInstruction*> output_slices(concated_outputs.size() *
fused_fusion_instrs.size());
for (size_t i = 0; i < concated_outputs.size(); ++i) {
HloInstruction* concated_output = concated_outputs[i];
int64_t slice_start = 0;
for (size_t j = 0; j < fused_fusion_instrs.size(); ++j) {
const HloInstruction* old_output =
GetOutputsOfFusible(*fused_fusion_instrs[j])[i];
Shape shape = old_output->shape();
int64_t slice_limit = slice_start + ShapeUtil::ElementsIn(shape);
TF_ASSIGN_OR_RETURN(
output_slices[concated_outputs.size() * j + i],
MakeSliceHlo(concated_output, {slice_start}, {slice_limit},
{1}));
slice_start = slice_limit;
}
}
HloInstruction* tuple = comp->AddInstruction(
HloInstruction::CreateTuple(output_slices), metadata);
comp->set_root_instruction(tuple, true);
TF_RETURN_IF_ERROR(comp->RemoveInstruction(dummy_root));
} else {
std::vector<HloInstruction*> tuple_operands(fused_instr_output_size *
fused_fusion_instrs.size());
for (size_t i = 0; i < fused_instr_output_size; ++i) {
for (size_t j = 0; j < fused_fusion_instrs.size(); ++j) {
const HloInstruction* old_output =
GetOutputsOfFusible(*fused_fusion_instrs[j])[i];
HloInstruction* new_output = clone_map[old_output];
tuple_operands[fused_instr_output_size * j + i] = new_output;
}
}
HloInstruction* tuple =
comp->AddInstruction(HloInstruction::CreateTuple(tuple_operands));
comp->set_root_instruction(tuple, true);
TF_RETURN_IF_ERROR(comp->RemoveInstruction(dummy_root));
}
return absl::OkStatus();
}
absl::Status HorizontalLoopFusionImpl::Fuse(
absl::Span<HloInstruction*> fused_fusion_instrs, bool sliced_input_fusion,
std::vector<HloInstruction*>& to_fuse_candidates) {
std::unique_ptr<HloComputation> uniq_computation;
std::vector<HloInstruction*> bound_operands;
TF_RETURN_IF_ERROR(CreateFusedComputation(fused_fusion_instrs,
&uniq_computation, &bound_operands,
sliced_input_fusion));
HloComputation* fused_comp = computation_->parent()->AddEmbeddedComputation(
std::move(uniq_computation));
HloInstruction* hori_fusion_instr = computation_->AddInstruction(
HloInstruction::CreateFusion(fused_comp->root_instruction()->shape(),
sliced_input_fusion
? HloInstruction::FusionKind::kInput
: HloInstruction::FusionKind::kLoop,
bound_operands, fused_comp, prefix_),
&fused_comp->root_instruction()->metadata());
fused_comp->SetFusionInstruction(hori_fusion_instr);
to_fuse_candidates.push_back(hori_fusion_instr);
size_t total_output_id = 0;
for (size_t i = 0; i < fused_fusion_instrs.size(); ++i) {
std::vector<HloInstruction*> bitcasts_or_gte;
HloInstruction* fused_instr = fused_fusion_instrs[i];
size_t num_out | #include "xla/service/gpu/horizontal_loop_fusion.h"
#include <cstdint>
#include <optional>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/log/log.h"
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/instruction_fusion.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_pass_fix.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
class HorizontalLoopFusionTest : public HloTestBase {
public:
static bool IsFusion(const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kFusion;
}
};
TEST_F(HorizontalLoopFusionTest, BasicTest) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule BasicTest
fused_computation.1 {
arg.1 = f16[1024]{0} parameter(0)
arg.2 = f16[1024]{0} parameter(1)
ROOT mul.1 = f16[1024]{0} multiply(arg.1, arg.2)
}
fused_computation.2 {
arg.1 = f16[123]{0} parameter(0)
arg.2 = f16[123]{0} parameter(1)
ROOT add.1 = f16[123]{0} add(arg.1, arg.2)
}
ENTRY entry_computation {
arg.1 = f16[1024]{0} parameter(0)
arg.2 = f16[1024]{0} parameter(1)
arg.3 = f16[123]{0} parameter(2)
arg.4 = f16[123]{0} parameter(3)
fusion.1 = f16[1024]{0}
fusion(arg.1, arg.2), kind=kLoop, calls=fused_computation.1
fusion.2 = f16[123]{0}
fusion(arg.3, arg.4), kind=kLoop, calls=fused_computation.2
ROOT tuple.1 = (f16[1024]{0}, f16[123]{0})
tuple(fusion.1, fusion.2)
}
)")
.value();
EXPECT_TRUE(GpuHorizontalLoopFusion().Run(module.get()).value());
TF_ASSERT_OK(verifier().Run(module.get()).status());
EXPECT_FALSE(HloDCE().Run(module.get()).value());
const HloInstruction* entry_root =
module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(entry_root,
GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)),
m::GetTupleElement(m::Fusion()))));
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Slice(m::Concatenate()),
m::Slice(m::Concatenate()))));
}
TEST_F(HorizontalLoopFusionTest, NegativeTestForCycle) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule NegativeTestForCycle
fused_computation.1 {
arg.1 = f16[123]{0} parameter(0)
arg.2 = f16[123]{0} parameter(1)
ROOT mul.1 = f16[123]{0} multiply(arg.1, arg.2)
}
fused_computation.2 {
arg.1 = f16[123]{0} parameter(0)
arg.2 = f16[123]{0} parameter(1)
ROOT add.1 = f16[123]{0} add(arg.1, arg.2)
}
ENTRY entry_computation {
arg.1 = f16[123]{0} parameter(0)
arg.2 = f16[123]{0} parameter(1)
arg.3 = f16[123]{0} parameter(2)
arg.4 = f16[123]{0} parameter(3)
fusion.1 = f16[123]{0}
fusion(arg.1, arg.2), kind=kLoop, calls=fused_computation.1
add.2 = f16[123]{0} add(fusion.1, arg.4)
fusion.2 = f16[123]{0}
fusion(add.2, arg.3), kind=kLoop, calls=fused_computation.2
ROOT tuple.1 = (f16[123]{0}, f16[123]{0}, f16[123]{0})
tuple(fusion.1, fusion.2, add.2)
}
)")
.value();
EXPECT_FALSE(GpuHorizontalLoopFusion().Run(module.get()).value());
}
TEST_F(HorizontalLoopFusionTest, NegativeTestForIncompatibleTypes) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule NegativeTestForIncompatibleTypes
fused_computation.1 {
arg.1 = f16[1024]{0} parameter(0)
arg.2 = f16[1024]{0} parameter(1)
ROOT mul.1 = f16[1024]{0} multiply(arg.1, arg.2)
}
fused_computation.2 {
arg.1 = s32[123]{0} parameter(0)
arg.2 = s32[123]{0} parameter(1)
ROOT add.1 = s32[123]{0} add(arg.1, arg.2)
}
ENTRY entry_computation {
arg.1 = f16[1024]{0} parameter(0)
arg.2 = f16[1024]{0} parameter(1)
arg.3 = s32[123]{0} parameter(2)
arg.4 = s32[123]{0} parameter(3)
fusion.1 = f16[1024]{0}
fusion(arg.1, arg.2), kind=kLoop, calls=fused_computation.1
fusion.2 = s32[123]{0}
fusion(arg.3, arg.4), kind=kLoop, calls=fused_computation.2
ROOT tuple.1 = (f16[1024]{0}, s32[123]{0})
tuple(fusion.1, fusion.2)
}
)")
.value();
EXPECT_FALSE(GpuHorizontalLoopFusion().Run(module.get()).value());
}
TEST_F(HorizontalLoopFusionTest, FusingIntoKLoopAndKInputTogether) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule FusingIntoKLoopAndKInputTogether
fused_computation.1 {
arg.1 = f16[129, 2048]{1, 0} parameter(0)
arg.2 = f16[129, 2048]{1, 0} parameter(1)
ROOT mul.1 = f16[129,2048]{1, 0} multiply(arg.1, arg.2)
}
fused_computation.2 {
arg.1 = f16[129, 2048]{1, 0} parameter(0)
arg.2 = f16[129, 2048]{1, 0} parameter(1)
ROOT mul.1 = f16[129,2048]{1, 0} multiply(arg.1, arg.2)
}
fused_computation.3 {
arg.1 = f16[130, 2048]{1, 0} parameter(0)
arg.2 = f16[130, 2048]{1, 0} parameter(1)
ROOT mul.1 = f16[130,2048]{1, 0} multiply(arg.1, arg.2)
}
fused_computation.4 {
arg.1 = f16[130, 2048]{1, 0} parameter(0)
arg.2 = f16[130, 2048]{1, 0} parameter(1)
ROOT mul.1 = f16[130,2048]{1, 0} multiply(arg.1, arg.2)
}
fused_computation.5 {
arg.1 = f16[123]{0} parameter(0)
arg.2 = f16[123]{0} parameter(1)
ROOT add.1 = f16[123]{0} add(arg.1, arg.2)
}
fused_computation.6 {
arg.1 = f16[128]{0} parameter(0)
arg.2 = f16[128]{0} parameter(1)
ROOT add.1 = f16[128]{0} add(arg.1, arg.2)
}
ENTRY entry_computation {
arg.1 = f16[129, 2048]{1, 0} parameter(0)
arg.2 = f16[129, 2048]{1, 0} parameter(1)
arg.3 = f16[129, 2048]{1, 0} parameter(2)
arg.4 = f16[129, 2048]{1, 0} parameter(3)
arg.5 = f16[130, 2048]{1, 0} parameter(4)
arg.6 = f16[130, 2048]{1, 0} parameter(5)
arg.7 = f16[130, 2048]{1, 0} parameter(6)
arg.8 = f16[130, 2048]{1, 0} parameter(7)
arg.9 = f16[123]{0} parameter(8)
arg.10 = f16[123]{0} parameter(9)
arg.11 = f16[128]{0} parameter(10)
arg.12 = f16[128]{0} parameter(11)
fusion.1 = f16[129,2048]{1, 0}
fusion(arg.1, arg.2), kind=kLoop, calls=fused_computation.1
fusion.2 = f16[129,2048]{1, 0}
fusion(arg.3, arg.4), kind=kLoop, calls=fused_computation.2
fusion.3 = f16[130,2048]{1, 0}
fusion(arg.5, arg.6), kind=kLoop, calls=fused_computation.3
fusion.4 = f16[130,2048]{1, 0}
fusion(arg.7, arg.8), kind=kLoop, calls=fused_computation.4
fusion.5 = f16[123]{0}
fusion(arg.9, arg.10), kind=kLoop, calls=fused_computation.5
fusion.6 = f16[128]{0}
fusion(arg.11, arg.12), kind=kLoop, calls=fused_computation.6
ROOT tuple.1 = (f16[129,2048]{1, 0}, f16[129,2048]{1, 0},
f16[130,2048]{1, 0}, f16[130,2048]{1, 0},
f16[123]{0}, f16[128]{0})
tuple(fusion.1, fusion.2, fusion.3, fusion.4, fusion.5, fusion.6)
}
)")
.value();
EXPECT_TRUE(GpuHorizontalLoopFusion().Run(module.get()).value());
int input_fusion_count = 0;
int loop_fusion_count = 0;
for (auto inst : module->entry_computation()->MakeInstructionPostOrder()) {
if (inst->opcode() == HloOpcode::kFusion) {
input_fusion_count +=
(inst->fusion_kind() == HloInstruction::FusionKind::kInput) ? 1 : 0;
loop_fusion_count +=
(inst->fusion_kind() == HloInstruction::FusionKind::kLoop) ? 1 : 0;
}
}
EXPECT_EQ(input_fusion_count, 1);
EXPECT_EQ(loop_fusion_count, 2);
}
TEST_F(HorizontalLoopFusionTest, HorizontalLoopFusionAfterVerticalFusion) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule MergeSharedFusionInstruction
ENTRY MergeSharedFusionInstruction.Computation0 {
param.1.1 = f32[4,1024]{1,0} parameter(0)
param.1.2 = f32[4,1024]{1,0} parameter(1)
param.1.3 = f32[4,1024]{1,0} parameter(2)
param.2.1 = f32[321,5]{1,0} parameter(3)
param.2.2 = f32[321,5]{1,0} parameter(4)
param.2.3 = f32[321,5]{1,0} parameter(5)
const.1 = f32[] constant(3)
const.2 = f32[] constant(3)
broadcast.1 = f32[4,1024]{1,0} broadcast(const.1), dimensions={}
broadcast.2 = f32[321,5]{1,0} broadcast(const.2), dimensions={}
mul.1.1 = f32[4,1024]{1,0} multiply(param.1.1, param.1.2)
mul.1.2 = f32[4,1024]{1,0} multiply(param.1.3, broadcast.1)
add.1 = f32[4,1024]{1,0} add(mul.1.1, mul.1.2)
mul.2.1 = f32[321,5]{1,0} multiply(param.2.1, param.2.2)
mul.2.2 = f32[321,5]{1,0} multiply(param.2.3, broadcast.2)
add.2 = f32[321,5]{1,0} add(mul.2.1, mul.2.2)
ROOT tuple = (f32[4,1024]{1,0}, f32[321,5]{1,0}) tuple(add.1, add.2)
})")
.value();
HloPassPipeline fusion("fusion");
const se::DeviceDescription device_info =
TestGpuDeviceInfo::RTXA6000DeviceInfo();
fusion.AddPass<xla::gpu::GpuInstructionFusion>(false,
device_info);
fusion.AddPass<xla::gpu::GpuInstructionFusion>(true,
device_info);
EXPECT_TRUE(fusion.Run(module.get()).value());
EXPECT_TRUE(GpuHorizontalLoopFusion().Run(module.get()).value());
TF_ASSERT_OK(verifier().Run(module.get()).status());
VLOG(2) << "Dump after horizontal fusion:";
VLOG(2) << module->ToString();
const HloInstruction* entry_root =
module->entry_computation()->root_instruction();
const HloInstruction* fusion_instr = nullptr;
ASSERT_THAT(entry_root,
GmockMatch(m::Tuple(
m::Bitcast(m::GetTupleElement(m::Fusion(&fusion_instr))),
m::Bitcast(m::GetTupleElement(m::Fusion())))));
ASSERT_TRUE(fusion_instr->IsMultiOutputFusion());
EXPECT_THAT(fusion_instr->fused_expression_root(),
GmockMatch(m::Tuple(
m::Slice(m::Concatenate(m::Reshape(), m::Reshape())),
m::Slice(m::Concatenate(m::Reshape(), m::Reshape())))));
EXPECT_TRUE(RunAndCompareNoHloPasses(std::move(module), ErrorSpec{0, 0}));
}
TEST_F(HorizontalLoopFusionTest, GradientDescentOptimizerLike) {
HloComputation::Builder builder(TestName());
std::vector<HloInstruction*> var_outs;
for (int64_t i = 0; i < 128; ++i) {
Shape shape = ShapeUtil::MakeShape(F32, {i + 1, 1024});
HloInstruction* param_var_in = builder.AddInstruction(
HloInstruction::CreateParameter(i * 3 + 0, shape, "var.in"));
HloInstruction* param_alpha =
builder.AddInstruction(HloInstruction::CreateParameter(
i * 3 + 1, ShapeUtil::MakeShape(F32, {}), "alpha"));
HloInstruction* param_delta = builder.AddInstruction(
HloInstruction::CreateParameter(i * 3 + 2, shape, "delta"));
HloInstruction* alpha_broadcasted = builder.AddInstruction(
HloInstruction::CreateBroadcast(shape, param_alpha, {}));
HloInstruction* alpha_delta =
builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, alpha_broadcasted, param_delta));
HloInstruction* var_out =
builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kSubtract, param_var_in, alpha_delta));
var_outs.push_back(var_out);
}
builder.AddInstruction(HloInstruction::CreateTuple(var_outs));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{0, 0}));
}
TEST_F(HorizontalLoopFusionTest, FusingDifferentOutputs) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule HeterogeneousMultiOutputFusions
fused_computation.1 {
arg.1 = f16[1024]{0} parameter(0)
arg.2 = f16[1024]{0} parameter(1)
arg.3 = f16[1024]{0} parameter(2)
arg.4 = f16[1024]{0} parameter(3)
mul.1 = f16[1024]{0} multiply(arg.1, arg.2)
mul.2 = f16[1024]{0} multiply(arg.3, arg.4)
add.1 = f16[1024]{0} add(mul.1, mul.2)
ROOT tuple.1 = (f16[1024]{0}, f16[1024]{0}) tuple(add.1, mul.1)
}
fused_computation.2 {
arg.1 = f16[123]{0} parameter(0)
arg.2 = f16[123]{0} parameter(1)
arg.3 = f16[123]{0} parameter(2)
arg.4 = f16[123]{0} parameter(3)
add.1 = f16[123]{0} add(arg.1, arg.2)
add.2 = f16[123]{0} add(arg.3, arg.4)
mul.1 = f16[123]{0} multiply(add.1, add.2)
ROOT tuple.1 = (f16[123]{0}, f16[123]{0}) tuple(mul.1, add.1)
}
ENTRY entry_computation {
arg.1 = f16[1024]{0} parameter(0)
arg.2 = f16[1024]{0} parameter(1)
arg.3 = f16[1024]{0} parameter(2)
arg.4 = f16[1024]{0} parameter(3)
arg.5 = f16[123]{0} parameter(4)
arg.6 = f16[123]{0} parameter(5)
arg.7 = f16[123]{0} parameter(6)
arg.8 = f16[123]{0} parameter(7)
fusion.1 = (f16[1024]{0}, f16[1024]{0})
fusion(arg.1, arg.2, arg.3, arg.4),
kind=kLoop, calls=fused_computation.1
fusion.2 = (f16[123]{0}, f16[123]{0})
fusion(arg.5, arg.6, arg.7, arg.8),
kind=kLoop, calls=fused_computation.2
gte.1 = f16[1024]{0} get-tuple-element(fusion.1), index=0
gte.2 = f16[1024]{0} get-tuple-element(fusion.1), index=1
gte.3 = f16[123]{0} get-tuple-element(fusion.2), index=0
gte.4 = f16[123]{0} get-tuple-element(fusion.2), index=1
ROOT tuple.1 = (f16[1024]{0}, f16[1024]{0}, f16[123]{0}, f16[123]{0})
tuple(gte.1, gte.2, gte.3, gte.4)
}
)")
.value();
EXPECT_TRUE(GpuHorizontalLoopFusion().Run(module.get()).value());
TF_ASSERT_OK(verifier().Run(module.get()).status());
EXPECT_FALSE(HloDCE().Run(module.get()).value());
VLOG(2) << "Dump after horizontal fusion:";
VLOG(2) << module->ToString();
EXPECT_TRUE(RunAndCompareNoHloPasses(std::move(module), ErrorSpec{0, 0}));
}
TEST_F(HorizontalLoopFusionTest, RMSPropLike) {
HloComputation::Builder builder(TestName());
std::vector<HloInstruction*> all_outputs;
for (int64_t i = 0; i < 48; ++i) {
Shape shape = ShapeUtil::MakeShape(F32, {2, 1024 + i});
HloInstruction* grad = builder.AddInstruction(
HloInstruction::CreateParameter(i * 9 + 0, shape, "grad"));
HloInstruction* ms = builder.AddInstruction(
HloInstruction::CreateParameter(i * 9 + 1, shape, "ms"));
HloInstruction* rho =
builder.AddInstruction(HloInstruction::CreateParameter(
i * 9 + 2, ShapeUtil::MakeShape(F32, {}), "rho"));
HloInstruction* one_minus_rho =
builder.AddInstruction(HloInstruction::CreateParameter(
i * 9 + 3, ShapeUtil::MakeShape(F32, {}), "one_minus_rho"));
HloInstruction* rho_broadcasted =
builder.AddInstruction(HloInstruction::CreateBroadcast(shape, rho, {}));
HloInstruction* one_mins_rho_broadcasted = builder.AddInstruction(
HloInstruction::CreateBroadcast(shape, one_minus_rho, {}));
HloInstruction* grad_squared = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, grad, grad));
HloInstruction* ms_1st_term = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, grad_squared,
one_mins_rho_broadcasted));
HloInstruction* ms_2nd_term =
builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, ms, rho_broadcasted));
HloInstruction* ms_out =
builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, ms_1st_term, ms_2nd_term));
HloInstruction* momentum = builder.AddInstruction(
HloInstruction::CreateParameter(i * 9 + 4, shape, "momemtum"));
HloInstruction* mom = builder.AddInstruction(
HloInstruction::CreateParameter(i * 9 + 5, shape, "mom"));
HloInstruction* lr = builder.AddInstruction(HloInstruction::CreateParameter(
i * 9 + 6, ShapeUtil::MakeShape(F32, {}), "lr"));
HloInstruction* epsilon =
builder.AddInstruction(HloInstruction::CreateParameter(
i * 9 + 7, ShapeUtil::MakeShape(F32, {}), "epsilon"));
HloInstruction* lr_broadcasted =
builder.AddInstruction(HloInstruction::CreateBroadcast(shape, lr, {}));
HloInstruction* epsilon_broadcasted = builder.AddInstruction(
HloInstruction::CreateBroadcast(shape, epsilon, {}));
HloInstruction* mom_1st_term =
builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, momentum, mom));
HloInstruction* ms_eps =
builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, ms_out, epsilon_broadcasted));
HloInstruction* ms_eps_rsq = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kRsqrt, ms_eps));
HloInstruction* grad_ms_eps_rsq =
builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, grad, ms_eps_rsq));
HloInstruction* mom_2nd_term =
builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, lr_broadcasted, grad_ms_eps_rsq));
HloInstruction* mom_out =
builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, mom_1st_term, mom_2nd_term));
HloInstruction* var = builder.AddInstruction(
HloInstruction::CreateParameter(i * 9 + 8, shape, "var"));
HloInstruction* var_out =
builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kSubtract, var, mom_out));
all_outputs.push_back(ms_out);
all_outputs.push_back(mom_out);
all_outputs.push_back(var_out);
}
builder.AddInstruction(HloInstruction::CreateTuple(all_outputs));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{1.0e-5, 1.0e-5}));
}
TEST_F(HorizontalLoopFusionTest, DynamicUpdateSlice) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule NegativeTestForDynamicUpdateSlice
fusion.1 {
p.0 = f16[5,9,10]{2,1,0} parameter(0)
p.1 = s32[] parameter(1)
p.2 = f16[1,9,10]{2,1,0} parameter(2)
c.0 = s32[] constant(0)
ROOT %dynamic-update-slice =
f16[5,9,10]{2,1,0} dynamic-update-slice(p.0, p.2, p.1, c.0, c.0)
}
fusion.2 {
p.0 = f16[5,9,10]{2,1,0} parameter(0)
p.1 = s32[] parameter(1)
p.2 = f16[1,9,10]{2,1,0} parameter(2)
c.0 = s32[] constant(0)
ROOT %dynamic-update-slice =
f16[5,9,10]{2,1,0} dynamic-update-slice(p.0, p.2, p.1, c.0, c.0)
}
ENTRY entry {
p.00 = f16[5,9,10]{2,1,0} parameter(0)
p.01 = f16[5,9,10]{2,1,0} parameter(1)
p.10 = s32[] parameter(2)
p.11 = s32[] parameter(3)
p.20 = f16[1,9,10]{2,1,0} parameter(4)
p.21 = f16[1,9,10]{2,1,0} parameter(5)
f1 = f16[5,9,10] fusion(p.00, p.10, p.20), kind=kLoop, calls=fusion.1
f2 = f16[5,9,10] fusion(p.01, p.11, p.21), kind=kLoop, calls=fusion.2
ROOT tuple = (f16[5,9,10],f16[5,9,10]) tuple(f1, f2)
})")
.value();
EXPECT_TRUE(GpuHorizontalLoopFusion().Run(module.get()).value());
TF_ASSERT_OK(verifier().Run(module.get()).status());
EXPECT_FALSE(HloDCE().Run(module.get()).value());
VLOG(2) << "Dump after horizontal fusion:";
VLOG(2) << module->ToString();
EXPECT_TRUE(RunAndCompareNoHloPasses(std::move(module), ErrorSpec{0, 0}));
}
TEST_F(HorizontalLoopFusionTest, NegativeTestForSharedParam) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule BasicTest
fused_computation.1 {
arg.1 = f16[123]{0} parameter(0)
arg.2 = f16[123]{0} parameter(1)
ROOT mul.1 = f16[123]{0} multiply(arg.1, arg.2)
}
fused_computation.2 {
arg.1 = f16[123]{0} parameter(0)
arg.2 = f16[123]{0} parameter(1)
ROOT add.1 = f16[123]{0} add(arg.1, arg.2)
}
ENTRY entry_computation {
arg.1 = f16[123]{0} parameter(0)
arg.2 = f16[123]{0} parameter(1)
arg.3 = f16[123]{0} parameter(2)
fusion.1 = f16[123]{0}
fusion(arg.1, arg.2), kind=kLoop, calls=fused_computation.1
fusion.2 = f16[123]{0}
fusion(arg.3, arg.2), kind=kLoop, calls=fused_computation.2
ROOT tuple.1 = (f16[123]{0}, f16[123]{0})
tuple(fusion.1, fusion.2)
}
)")
.value();
EXPECT_FALSE(GpuHorizontalLoopFusion().Run(module.get()).value());
}
TEST_F(HorizontalLoopFusionTest, IterativeHorizontalFusion) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule NonfusionInstrs
fused_computation.0 {
arg.0 = f16[] parameter(0)
arg.1 = f16[123]{0} parameter(1)
broadcast.0 = f16[123]{0} broadcast(arg.0), dimensions={}
ROOT mul.1 = f16[123]{0} multiply(broadcast.0, arg.1)
}
fused_computation.1 {
arg.0 = f16[] parameter(0)
arg.1 = f16[456]{0} parameter(1)
broadcast.0 = f16[456]{0} broadcast(arg.0), dimensions={}
ROOT add.1 = f16[456]{0} add(broadcast.0, arg.1)
}
ENTRY entry_computation {
arg.0 = f16[] parameter(0)
arg.1 = f16[] parameter(1)
arg.2 = f16[123]{0} parameter(2)
arg.3 = f16[456]{0} parameter(3)
sqrt.0 = f16[] sqrt(arg.0)
sqrt.1 = f16[] sqrt(arg.1)
fusion.0 = f16[123]{0}
fusion(sqrt.0, arg.2), kind=kLoop, calls=fused_computation.0
fusion.1 = f16[456]{0}
fusion(sqrt.1, arg.3), kind=kLoop, calls=fused_computation.1
ROOT tuple.1 = (f16[123]{0}, f16[456]{0}) tuple(fusion.0, fusion.1)
}
)")
.value();
HloPassFix<HloPassPipeline> iterative_h_fusion("iterative_h_fusion");
iterative_h_fusion.AddPass<GpuHorizontalLoopFusion>();
iterative_h_fusion.AddPass<HloDCE>();
EXPECT_TRUE(iterative_h_fusion.Run(module.get()).value());
const HloInstruction* entry_root =
module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(entry_root,
GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)),
m::GetTupleElement(m::Fusion()))));
EXPECT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_EQ(
absl::c_count_if(module->entry_computation()->instructions(), IsFusion),
2);
}
TEST_F(HorizontalLoopFusionTest, TraversalOrder) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule cluster
%fused_computation (param_0: f32[256,256], param_1: f32[], param_2: f32[])
-> f32[256,256] {
%param_0 = f32[256,256]{1,0} parameter(0)
%param_1 = f32[] parameter(1)
%param_2 = f32[] parameter(2)
%multiply.0 = f32[] multiply(f32[] %param_1, f32[] %param_2)
%broadcast.0 = f32[256,256]{1,0} broadcast(f32[] %multiply.0), dimensions={}
ROOT %multiply.1 = f32[256,256]{1,0}
multiply(f32[256,256]{1,0} %param_0, f32[256,256]{1,0} %broadcast.0)
}
%fused_computation.1 (param_0: f32[256,256], param_1: f32[], param_2: f32[])
-> f32[256,256] {
%param_0 = f32[256,256]{1,0} parameter(0)
%param_1 = f32[] parameter(1)
%param_2 = f32[] parameter(2)
%multiply.0 = f32[] multiply(f32[] %param_1, f32[] %param_2)
%broadcast.0 = f32[256,256]{1,0} broadcast(f32[] %multiply.0), dimensions={}
ROOT %multiply.1 = f32[256,256]{1,0}
multiply(f32[256,256]{1,0} %param_0, f32[256,256]{1,0} %broadcast.0)
}
ENTRY %entry_computation (arg0: f32[256,256], arg1: f32[256,256], arg2: f32[],
arg3: f32[], arg4: f32[], arg5: f32[])
-> (f32[256,256], f32[256,256]) {
%arg0 = f32[256,256]{1,0} parameter(0), parameter_replication={false}
%arg1 = f32[256,256]{1,0} parameter(1), parameter_replication={false}
%arg2 = f32[] parameter(2), parameter_replication={false}
%arg3 = f32[] parameter(3), parameter_replication={false}
%arg4 = f32[] parameter(4), parameter_replication={false}
%arg5 = f32[] parameter(5), parameter_replication={false}
%sqrt = f32[] sqrt(f32[] %arg2)
%sqrt.1 = f32[] sqrt(f32[] %arg3)
%fusion = f32[256,256]{1,0}
fusion(f32[256,256]{1,0} %arg0, f32[] %sqrt, f32[] %sqrt.1),
kind=kLoop, calls=%fused_computation
%sqrt.2 = f32[] sqrt(f32[] %arg4)
%sqrt.3 = f32[] sqrt(f32[] %arg5)
%fusion.1 = f32[256,256]{1,0}
fusion(f32[256,256]{1,0} %arg1, f32[] %sqrt.2, f32[] %sqrt.3),
kind=kLoop, calls=%fused_computation.1
ROOT %tuple.163 = (f32[256,256]{1,0}, f32[256,256]{1,0})
tuple(f32[256,256]{1,0} %fusion.1, f32[256,256]{1,0} %fusion)
}
)")
.value();
HloPassFix<HloPassPipeline> iterative_h_fusion("iterative_h_fusion");
iterative_h_fusion.AddPass<GpuHorizontalLoopFusion>();
EXPECT_TRUE(iterative_h_fusion.Run(module.get()).value());
EXPECT_EQ(
absl::c_count_if(module->entry_computation()->instructions(), IsFusion),
2);
}
TEST_F(HorizontalLoopFusionTest, NoBufferAliasingOfDuplicateParameter) {
const char* hlo_text = R"(
HloModule m
branch_a {
p0 = s32[] parameter(0)
c0 = s32[] constant(1)
c1 = s32[] constant(2)
b0 = s32[4096] broadcast(c0), dimensions={}
b1 = s32[4096] broadcast(c1), dimensions={}
ROOT r = (s32[4096], s32[4096]) tuple(b0, b1)
}
branch_b {
p0 = s32[] parameter(0)
c0 = s32[] constant(1)
c1 = s32[] constant(2)
b0 = s32[4096] broadcast(c0), dimensions={}
b1 = s32[4096] broadcast(c1), dimensions={}
ROOT r = (s32[4096], s32[4096]) tuple(b0, b1)
}
ENTRY e {
p0 = s32[] parameter(0)
c0 = s32[] constant(0)
cond = (s32[4096], s32[4096]) conditional(p0, c0, c0), branch_computations={branch_a, branch_b}
p1 = s32[4096] parameter(1)
gte0 = s32[4096] get-tuple-element(cond), index=0
gte1 = s32[4096] get-tuple-element(cond), index=1
a0 = s32[4096] add(gte1, gte0)
m0 = s32[4096] multiply(gte1, gte0)
ROOT r = (s32[4096], s32[4096]) tuple(m0, a0)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, std::nullopt));
}
TEST_F(HorizontalLoopFusionTest, CopyInsertionFusionControlFlow) {
const char* hlo_text = R"(
HloModule cluster
ENTRY main {
cst = f32[1]{0} constant({0})
cp1 = f32[1]{0} copy(cst)
cp2 = f32[1]{0} copy(cst)
cp3 = f32[1]{0} copy(cst)
cp4 = f32[1]{0} copy(cst), control-predecessors={cp1}
ROOT tuple_out = (f32[1]{0}, f32[1]{0}, f32[1]{0}, f32[1]{0}) tuple(cp1, cp2, cp3, cp4)
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_text).value();
EXPECT_TRUE(GpuHorizontalLoopFusion().Run(module.get()).value());
VLOG(2) << module->ToString();
EXPECT_EQ(
absl::c_count_if(module->entry_computation()->instructions(), IsFusion),
1);
const HloInstruction* entry_root =
module->entry_computation()->root_instruction();
EXPECT_THAT(entry_root,
GmockMatch(m::Tuple(m::Copy(), m::GetTupleElement(m::Fusion()),
m::GetTupleElement(m::Fusion()), m::Copy())));
}
TEST_F(HorizontalLoopFusionTest, DoNotMergeVariadicReductions) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
fused_computation.94 {
tmp_0 = f32[] parameter(0)
tmp_1 = f32[] parameter(1)
tmp_2 = pred[] compare(tmp_0, tmp_1), direction=GE
tmp_3 = f32[] select(tmp_2, tmp_0, tmp_1)
tmp_4 = pred[] compare(tmp_0, tmp_1), direction=EQ
tmp_5 = s32[] parameter(2)
tmp_6 = s32[] parameter(3)
tmp_7 = s32[] minimum(tmp_5, tmp_6)
tmp_8 = s32[] select(tmp_2, tmp_5, tmp_6)
tmp_9 = s32[] select(tmp_4, tmp_7, tmp_8)
ROOT tmp_10 = (f32[], s32[]) tuple(tmp_3, tmp_9)
}
minmax_func.1536 {
tmp_0 = f32[] parameter(0)
tmp_1 = f32[] parameter(2)
tmp_2 = s32[] parameter(1)
tmp_3 = s32[] parameter(3)
ROOT tmp_4 = (f32[], s32[]) fusion(tmp_0, tmp_1, tmp_2, tmp_3), kind=kLoop, calls=fused_computation.94
}
fused_computation {
tmp_0 = f32[554112,10]{1,0} parameter(0)
tmp_1 = s32[554112,10]{1,0} iota(), iota_dimension=1
tmp_2 = f32[] constant(-inf)
tmp_3 = s32[] constant(0)
ROOT tmp_4 = (f32[554112]{0}, s32[554112]{0}) reduce(tmp_0, tmp_1, tmp_2, tmp_3), dimensions={1}, to_apply=minmax_func.1536
}
fused_computation2 {
tmp_0 = f32[554112,10]{1,0} parameter(0)
tmp_1 = s32[554112,10]{1,0} iota(), iota_dimension=1
tmp_2 = f32[] constant(inf)
tmp_3 = s32[] constant(1)
ROOT tmp_4 = (f32[554112]{0}, s32[554112]{0}) reduce(tmp_0, tmp_1, tmp_2, tmp_3), dimensions={1}, to_apply=minmax_func.1536
}
ENTRY e {
tmp_0 = f32[554112,10]{1,0} parameter(0)
tmp_1 = (f32[554112]{0}, s32[554112]{0}) fusion(tmp_0), kind=kLoop, calls=fused_computation
tmp_2 = s32[554112]{0} get-tuple-element(tmp_1), index=1
tmp_3 = f32[554112,10]{1,0} parameter(1)
tmp_4 = (f32[554112]{0}, s32[554112]{0}) fusion(tmp_3), kind=kLoop, calls=fused_computation2
tmp_5 = s32[554112]{0} get-tuple-element(tmp_4), index=1
ROOT tmp_6 = s32[554112]{0} add(tmp_2, tmp_5)
})")
.value();
EXPECT_FALSE(GpuHorizontalLoopFusion().Run(module.get()).value());
}
}
}
} |
2,070 | cpp | tensorflow/tensorflow | triton_fusion_numerics_verifier | third_party/xla/xla/service/gpu/transforms/triton_fusion_numerics_verifier.cc | third_party/xla/xla/service/gpu/transforms/triton_fusion_numerics_verifier_test.cc | #ifndef XLA_SERVICE_GPU_TRITON_FUSION_NUMERICS_VERIFIER_H_
#define XLA_SERVICE_GPU_TRITON_FUSION_NUMERICS_VERIFIER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/functional/any_invocable.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/autotuner_compile_util.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/service/shaped_buffer.h"
#include "xla/shape.h"
#include "xla/stream_executor/stream.h"
namespace xla::gpu {
class TritonFusionNumericsVerifier : public HloModulePass {
public:
explicit TritonFusionNumericsVerifier(const AutotuneConfig& config)
: config_(config) {}
static absl::string_view Name() { return "triton-numerics-verifier"; }
absl::string_view name() const override { return Name(); }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
AutotuneConfig config_;
};
namespace triton_fusion_numerics_pass_internal {
absl::StatusOr<ScopedShapedBuffer> CompileAndRunFusion(
AutotunerCompileUtil& util, const HloFusionInstruction& fusion,
const AutotuneConfig& config, const DebugOptions& debug_opts,
bool clear_backend_config);
absl::Status CompareBuffers(const ScopedShapedBuffer& current,
const ScopedShapedBuffer& expected,
const Shape& shape, const HloModuleConfig& config,
se::Stream* stream);
absl::Status ForAllTritonFusions(
const HloModule& module,
const absl::flat_hash_set<absl::string_view>& execution_threads,
absl::AnyInvocable<absl::Status(const HloFusionInstruction&)> fn);
}
}
#endif
#include "xla/service/gpu/triton_fusion_numerics_verifier.h"
#include <memory>
#include <optional>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/functional/any_invocable.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/executable.h"
#include "xla/service/gpu/autotuner_compile_util.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/buffer_comparator.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/shaped_buffer.h"
#include "xla/shape.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/stream.h"
#include "xla/tools/hlo_decomposer.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
using ProfilingOutput = AutotunerCompileUtil::ProfilingOutput;
absl::StatusOr<const HloFusionInstruction*> AsTritonFusion(
const HloInstruction* hlo) {
if (hlo->opcode() != HloOpcode::kFusion) {
return nullptr;
}
const HloFusionInstruction* fusion = Cast<HloFusionInstruction>(hlo);
TF_ASSIGN_OR_RETURN(auto gpu_config,
fusion->backend_config<GpuBackendConfig>());
const FusionBackendConfig& backend_config =
gpu_config.fusion_backend_config();
if (backend_config.kind() == kTritonFusionKind) {
return fusion;
}
return nullptr;
}
std::unique_ptr<HloModule> NewHloModuleFromFusion(
const HloFusionInstruction& fusion, const DebugOptions& debug_opts,
bool clear_backend_config) {
std::unique_ptr<HloModule> new_module =
ExtractInstructionIntoNewModule(fusion);
if (clear_backend_config) {
new_module->entry_computation()->root_instruction()->clear_backend_config();
}
new_module->mutable_config().set_debug_options(debug_opts);
return new_module;
}
}
namespace triton_fusion_numerics_pass_internal {
absl::StatusOr<ScopedShapedBuffer> CompileAndRunFusion(
AutotunerCompileUtil& util, const HloFusionInstruction& fusion,
const AutotuneConfig& config, const DebugOptions& debug_opts,
bool clear_backend_config) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<Executable> executable,
util.Compile([&](const DebugOptions& opts) {
return NewHloModuleFromFusion(fusion, opts,
clear_backend_config);
}));
TF_ASSIGN_OR_RETURN(auto rz_buffers, RedzoneBuffers::FromInstruction(
fusion, config, debug_opts,
RedzoneBuffers::kAllInputs));
TF_ASSIGN_OR_RETURN(auto stream, config.GetStream());
TF_ASSIGN_OR_RETURN(std::optional<ProfilingOutput> profiling_output,
util.ProfileExecutable(executable.get(), stream,
rz_buffers.input_buffers(),
rz_buffers.input_shapes()));
if (!profiling_output.has_value()) {
return Internal("No output after a successful verification run.");
}
return std::move(profiling_output->output);
}
absl::Status CompareBuffers(const ScopedShapedBuffer& current,
const ScopedShapedBuffer& expected,
const Shape& shape, const HloModuleConfig& config,
se::Stream* stream) {
BufferComparator comparator(shape, config);
TF_ASSIGN_OR_RETURN(bool outputs_match,
comparator.CompareEqual(stream, current.root_buffer(),
expected.root_buffer()));
if (!outputs_match) {
return Internal("Triton fusion output does not match emitters output.");
}
return absl::OkStatus();
}
absl::Status ForAllTritonFusions(
const HloModule& module,
const absl::flat_hash_set<absl::string_view>& execution_threads,
absl::AnyInvocable<absl::Status(const HloFusionInstruction&)> fn) {
for (HloComputation* computation :
module.MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
TF_ASSIGN_OR_RETURN(auto triton_fusion, AsTritonFusion(instruction));
if (triton_fusion != nullptr) {
TF_RETURN_IF_ERROR(fn(*triton_fusion));
}
}
}
return absl::OkStatus();
}
}
namespace {
absl::Status VerifyTritonFusion(AutotunerCompileUtil& util,
const HloFusionInstruction& fusion,
const AutotuneConfig& config,
const DebugOptions& debug_opts) {
TF_ASSIGN_OR_RETURN(auto triton_result,
triton_fusion_numerics_pass_internal::CompileAndRunFusion(
util, fusion, config, debug_opts,
false));
TF_ASSIGN_OR_RETURN(auto emitters_result,
triton_fusion_numerics_pass_internal::CompileAndRunFusion(
util, fusion, config, debug_opts,
true));
TF_ASSIGN_OR_RETURN(auto stream, config.GetStream());
return triton_fusion_numerics_pass_internal::CompareBuffers(
triton_result, emitters_result, fusion.shape(),
fusion.GetModule()->config(), stream);
}
}
absl::StatusOr<bool> TritonFusionNumericsVerifier::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
if (config_.IsDeviceless()) {
return absl::InternalError(
"Cannot run TritonFusionNumericsVerifier on a deviceless compilation.");
}
const DebugOptions& debug_options = module->config().debug_options();
TF_ASSIGN_OR_RETURN(std::optional<AutotunerCompileUtil> opt_compile_util,
AutotunerCompileUtil::Create(config_, debug_options));
TF_RET_CHECK(opt_compile_util.has_value());
TF_RETURN_IF_ERROR(triton_fusion_numerics_pass_internal::ForAllTritonFusions(
*module, execution_threads, [&](const HloFusionInstruction& fusion) {
return VerifyTritonFusion(*opt_compile_util, fusion, config_,
debug_options);
}));
return false;
}
} | #include "xla/service/gpu/triton_fusion_numerics_verifier.h"
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/autotuner_compile_util.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/platform_util.h"
#include "xla/stream_executor/platform.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla::gpu {
namespace {
class TritonFusionNumericsVerifierTest
: public HloTestBase,
public ::testing::WithParamInterface<PrimitiveType> {
public:
DebugOptions GetDebugOptionsForTest() override {
auto options = HloTestBase::GetDebugOptionsForTest();
options.set_xla_gpu_enable_triton_softmax_fusion(true);
options.set_xla_gpu_verify_triton_fusion_numerics(true);
return options;
}
protected:
std::unique_ptr<xla::HloModule> Module(absl::string_view hlo_text_template,
absl::string_view type) {
auto m = GetOptimizedModule(absl::Substitute(hlo_text_template, type));
TF_EXPECT_OK(m);
return std::move(m.value());
}
const HloFusionInstruction* TritonFusion(const xla::HloModule& module) {
const HloFusionInstruction* fusion_result = nullptr;
absl::Status res =
triton_fusion_numerics_pass_internal::ForAllTritonFusions(
module, {},
[&](const HloFusionInstruction& fusion) -> absl::Status {
EXPECT_EQ(fusion_result, nullptr);
fusion_result = &fusion;
return absl::OkStatus();
});
return fusion_result;
}
AutotuneConfig CreateAutotuneConfig() {
se::Platform* platform = PlatformUtil::GetDefaultPlatform().value();
auto executors_or = PlatformUtil::GetStreamExecutors(platform);
TF_EXPECT_OK(executors_or);
return AutotuneConfig{DeviceConfig{executors_or->at(0), nullptr},
GetDebugOptionsForTest()};
}
AutotunerCompileUtil CreateAutotunerCompileUtil(AutotuneConfig& config) {
auto opt_compile_util_or =
AutotunerCompileUtil::Create(config, GetDebugOptionsForTest());
TF_EXPECT_OK(opt_compile_util_or);
EXPECT_TRUE(opt_compile_util_or->has_value());
return std::move(opt_compile_util_or->value());
}
};
constexpr absl::string_view kSoftmaxHlo = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
add_computation {
arg_0.1 = $0[] parameter(0)
arg_1.1 = $0[] parameter(1)
ROOT add = $0[] add(arg_0.1, arg_1.1)
}
ENTRY main {
param_0 = $0[127,125]{1,0} parameter(0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = $0[127,125]{1,0} broadcast(reduce), dimensions={0}
subtract = $0[127,125]{1,0} subtract(param_0, broadcast)
exponential = $0[127,125]{1,0} exponential(subtract)
constant_zero = $0[] constant(0)
second_reduce = $0[127]{0} reduce(exponential, constant_zero), dimensions={1}, to_apply=add_computation
second_broadcast = $0[127,125]{1,0} broadcast(second_reduce), dimensions={0}
ROOT divide = $0[127,125]{1,0} divide(exponential, second_broadcast)
}
)";
bool HloPassHasRun(const HloModule& module, absl::string_view pass_name) {
for (const auto& pass_metadata : module.metadata().proto().pass_metadata()) {
if (pass_metadata.pass_name() == pass_name) {
return true;
}
}
return false;
}
TEST_P(TritonFusionNumericsVerifierTest, VerifyExactSoftmaxFusionNumerics) {
PrimitiveType data_type = GetParam();
auto module = Module(kSoftmaxHlo,
primitive_util::LowercasePrimitiveTypeName(data_type));
EXPECT_TRUE(HloPassHasRun(*module, TritonFusionNumericsVerifier::Name()));
auto fusion = TritonFusion(*module);
EXPECT_NE(fusion, nullptr);
}
TEST_F(TritonFusionNumericsVerifierTest, CheckMismatch) {
auto module_f16 = Module(kSoftmaxHlo, "f16");
auto fusion_f16 = TritonFusion(*module_f16);
EXPECT_NE(fusion_f16, nullptr);
auto module_f32 = Module(kSoftmaxHlo, "f32");
auto fusion_f32 = TritonFusion(*module_f32);
EXPECT_NE(fusion_f32, nullptr);
AutotuneConfig autotune_config = CreateAutotuneConfig();
AutotunerCompileUtil compile_util =
CreateAutotunerCompileUtil(autotune_config);
const DebugOptions& debug_options = GetDebugOptionsForTest();
auto f16_result = triton_fusion_numerics_pass_internal::CompileAndRunFusion(
compile_util, *fusion_f16, autotune_config, debug_options,
false);
TF_EXPECT_OK(f16_result);
auto f32_result = triton_fusion_numerics_pass_internal::CompileAndRunFusion(
compile_util, *fusion_f32, autotune_config, debug_options,
false);
TF_EXPECT_OK(f32_result);
auto stream = autotune_config.GetStream();
TF_EXPECT_OK(stream);
auto cmp = triton_fusion_numerics_pass_internal::CompareBuffers(
*f16_result, *f32_result, fusion_f16->shape(),
fusion_f16->GetModule()->config(), *stream);
EXPECT_FALSE(cmp.ok());
}
INSTANTIATE_TEST_SUITE_P(TritonFusionNumericsVerifierTestSuite,
TritonFusionNumericsVerifierTest,
::testing::Values(F32, F16, BF16));
}
} |
2,071 | cpp | tensorflow/tensorflow | cublas_pad_for_gemms | third_party/xla/xla/service/gpu/transforms/cublas_pad_for_gemms.cc | third_party/xla/xla/service/gpu/transforms/cublas_pad_for_gemms_test.cc | #ifndef XLA_SERVICE_GPU_CUBLAS_PAD_FOR_GEMMS_H_
#define XLA_SERVICE_GPU_CUBLAS_PAD_FOR_GEMMS_H_
#include <cstdint>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
class CublasPadForGemms : public HloModulePass {
public:
CublasPadForGemms(const se::GpuComputeCapability gpu_compute_capability,
PrimitiveType datatype, int32_t pad_to_multiple_of)
: gpu_compute_capability_(gpu_compute_capability),
datatype_(datatype),
pad_to_multiple_of_(pad_to_multiple_of) {}
absl::string_view name() const override { return "cublas-pad-for-gemms"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
const se::GpuComputeCapability gpu_compute_capability_;
PrimitiveType datatype_;
int32_t pad_to_multiple_of_;
};
}
}
#endif
#include "xla/service/gpu/cublas_pad_for_gemms.h"
#include <cstdint>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/literal_util.h"
#include "xla/service/gpu/gemm_fusion.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/triton_support.h"
#include "xla/shape.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
static absl::StatusOr<bool> PadForGemm(HloDotInstruction* dot,
PrimitiveType datatype,
int pad_to_multiple_of) {
auto* lhs = dot->mutable_operand(0);
auto* rhs = dot->mutable_operand(1);
Shape lshape = lhs->shape();
Shape rshape = rhs->shape();
Shape result_shape = dot->shape();
if (lshape.element_type() != datatype || rshape.element_type() != datatype) {
return false;
}
auto pad_dim = [&](Shape& s, int dim) {
s.set_dimensions(dim,
RoundUpTo<int64_t>(s.dimensions(dim), pad_to_multiple_of));
};
auto pad_matrix_dims = [&pad_dim](Shape s) {
pad_dim(s, s.rank() - 2);
pad_dim(s, s.rank() - 1);
return s;
};
Shape new_lshape = pad_matrix_dims(lshape);
Shape new_rshape = pad_matrix_dims(rshape);
Shape new_result_shape = pad_matrix_dims(result_shape);
if (new_lshape == lshape && new_rshape == rshape) {
return false;
}
VLOG(3) << "old shape: " << lshape << " " << rshape << " " << result_shape;
VLOG(3) << "new shape: " << new_lshape << " " << new_rshape << " "
<< new_result_shape;
auto create_padding_config = [](Shape& shape, Shape& new_shape) {
PaddingConfig padding_config;
for (int i = 0; i < shape.rank(); ++i) {
auto dimension = padding_config.add_dimensions();
dimension->set_edge_padding_high(new_shape.dimensions()[i] -
shape.dimensions()[i]);
dimension->set_edge_padding_low(0);
dimension->set_interior_padding(0);
}
return padding_config;
};
auto l_padding_config = create_padding_config(lshape, new_lshape);
auto r_padding_config = create_padding_config(rshape, new_rshape);
HloComputation* parent = dot->parent();
HloInstruction* zero_float = parent->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(datatype)));
zero_float->set_metadata(dot->metadata());
HloInstruction* lpad = parent->AddInstruction(
HloInstruction::CreatePad(new_lshape, lhs, zero_float, l_padding_config));
lpad->set_metadata(dot->metadata());
HloInstruction* rpad = parent->AddInstruction(
HloInstruction::CreatePad(new_rshape, rhs, zero_float, r_padding_config));
rpad->set_metadata(dot->metadata());
HloInstruction* new_dot = parent->AddInstruction(
dot->CloneWithNewOperands(new_result_shape, {lpad, rpad}));
std::vector<int64_t> start_indices(result_shape.rank(), 0);
std::vector<int64_t> strides(result_shape.rank(), 1);
HloInstruction* slice = parent->AddInstruction(
HloInstruction::CreateSlice(result_shape, new_dot, start_indices,
result_shape.dimensions(), strides));
slice->set_metadata(dot->metadata());
bool is_root = dot->user_count() == 0;
TF_CHECK_OK(parent->ReplaceInstruction(dot, slice));
if (is_root) {
parent->set_root_instruction(slice);
}
return true;
}
namespace {
bool CheckCanonical(HloDotInstruction* dot) {
const auto& dimension_numbers = dot->dot_dimension_numbers();
if (dimension_numbers.lhs_batch_dimensions_size() + 2 !=
dot->operand(0)->shape().rank() ||
dimension_numbers.rhs_batch_dimensions_size() + 2 !=
dot->operand(1)->shape().rank()) {
VLOG(2)
<< dot->ToString()
<< " is not canonical: Expected all dimensions but 2 to be "
"batch_dimensions. Hence, this dot is not a candidate for padding.";
return false;
}
std::vector<int64_t> canonical_batch_dims(
dimension_numbers.lhs_batch_dimensions_size());
absl::c_iota(canonical_batch_dims, 0);
if (!absl::c_equal(dimension_numbers.lhs_batch_dimensions(),
canonical_batch_dims) ||
!absl::c_equal(dimension_numbers.rhs_batch_dimensions(),
canonical_batch_dims)) {
VLOG(2)
<< dot->ToString()
<< " is not canonical: Expected batch dimensions to be all "
"dimensions except for the last 2 ones. Hence, this dot is not a "
"candidate for padding.";
return false;
}
return true;
}
}
static std::vector<HloDotInstruction*> GetRelevantDots(
const se::GpuComputeCapability& gpu_compute_capability,
HloComputation* comp, PrimitiveType datatype) {
std::vector<HloDotInstruction*> gemms;
for (HloInstruction* instr : comp->instructions()) {
if (IsMatrixMultiplication(*instr)) {
HloDotInstruction* dot = Cast<HloDotInstruction>(instr);
if (instr->operand(0)->shape().element_type() == datatype &&
CheckCanonical(dot) &&
!(instr->GetModule()
->config()
.debug_options()
.xla_gpu_enable_triton_gemm() &&
legacy_triton::IsTritonSupportedInstruction(
*dot, gpu_compute_capability) &&
ShouldTritonHandleGEMM(*dot, gpu_compute_capability))) {
gemms.push_back(dot);
}
}
}
return gemms;
}
absl::StatusOr<bool> CublasPadForGemms::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* comp :
module->MakeNonfusionComputations(execution_threads)) {
for (HloDotInstruction* dot :
GetRelevantDots(gpu_compute_capability_, comp, datatype_)) {
TF_ASSIGN_OR_RETURN(bool result,
PadForGemm(dot, datatype_, pad_to_multiple_of_));
changed |= result;
}
}
return changed;
}
}
} | #include "xla/service/gpu/cublas_pad_for_gemms.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
namespace m = ::xla::match;
namespace xla {
namespace gpu {
namespace {
class CublasGemmPadForTensorCoresTest : public HloTestBase {
protected:
bool PadForF16Gemms(HloModule* module) {
return CublasPadForGemms(se::CudaComputeCapability(7, 0),
PrimitiveType::F16, 8)
.Run(module)
.value();
}
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_gpu_triton_gemm_any(false);
return debug_options;
}
};
TEST_F(CublasGemmPadForTensorCoresTest, OneDotRootComputation) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
%param1 = f16[2048,1024] parameter(0)
%param2 = f16[1024,33708] parameter(1)
ROOT %dot.2309 = f16[2048,33708]{1,0} dot(f16[2048,1024]{1,0} %param1,
f16[1024,33708]{0,1} %param2),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})")
.value();
EXPECT_TRUE(PadForF16Gemms(module.get()));
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
GmockMatch(
m::Slice(m::Dot(m::Pad(m::Parameter().WithShape(F16, {2048, 1024}),
m::Constant().WithShape(F16, {}))
.WithShape(F16, {2048, 1024}),
m::Pad(m::Parameter().WithShape(F16, {1024, 33708}),
m::Constant().WithShape(F16, {}))
.WithShape(F16, {1024, 33712}))
.WithShape(F16, {2048, 33712})
.WithContractingDims({1},
{0}))
.WithShape(F16, {2048, 33708})));
}
TEST_F(CublasGemmPadForTensorCoresTest, OneDotS8RootComputation) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
%param1 = s8[2047,1023] parameter(0)
%param2 = s8[1023,33707] parameter(1)
ROOT %dot.2309 = s32[2047,33707]{1,0} dot(s8[2047,1023]{1,0} %param1,
s8[1023,33707]{0,1} %param2),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})")
.value();
EXPECT_TRUE(
CublasPadForGemms(se::CudaComputeCapability(7, 0), PrimitiveType::S8, 4)
.Run(module.get())
.value());
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
GmockMatch(
m::Slice(m::Dot(m::Pad(m::Parameter().WithShape(S8, {2047, 1023}),
m::Constant().WithShape(S8, {}))
.WithShape(S8, {2048, 1024}),
m::Pad(m::Parameter().WithShape(S8, {1023, 33707}),
m::Constant().WithShape(S8, {}))
.WithShape(S8, {1024, 33708}))
.WithShape(S32, {2048, 33708})
.WithContractingDims({1},
{0}))
.WithShape(S32, {2047, 33707})));
}
TEST_F(CublasGemmPadForTensorCoresTest, TwoDotsComputation) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
%param1 = f16[2048, 1024] parameter(0)
%param2 = f16[1024, 33708] parameter(1)
%param3 = f16[33708, 1] parameter(2)
%dot1 = f16[2048, 33708]{1,0} dot(f16[2048, 1024]{1,0} %param1,
f16[1024, 33708]{0,1} %param2),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT %dot2 = f16[2048, 1]{1,0} dot(f16[2048, 33708]{1,0} %dot1,
f16[33708, 1]{0,1} %param3),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})")
.value();
EXPECT_TRUE(PadForF16Gemms(module.get()));
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* dot2 = nullptr;
ASSERT_THAT(
root,
GmockMatch(
m::Slice(
m::Dot(
m::Pad(m::Slice(m::Dot(&dot2,
m::Pad().WithShape(F16, {2048, 1024}),
m::Pad().WithShape(F16, {1024, 33712}))
.WithContractingDims(
{1},
{0})
.WithShape(F16, {2048, 33712}))
.WithShape(F16, {2048, 33708}),
m::Constant().WithShape(F16, {}))
.WithShape(F16, {2048, 33712}),
m::Pad(m::Parameter().WithShape(F16, {33708, 1}),
m::Constant().WithShape(F16, {}))
.WithShape(F16, {33712, 8}))
.WithShape(F16, {2048, 8})
.WithContractingDims({1},
{0}))
.WithShape(F16, {2048, 1})));
EXPECT_THAT(
dot2,
GmockMatch(m::Dot(m::Pad(m::Parameter().WithShape(F16, {2048, 1024}),
m::Constant().WithShape(F16, {}))
.WithShape(F16, {2048, 1024}),
m::Pad(m::Parameter().WithShape(F16, {1024, 33708}),
m::Constant().WithShape(F16, {}))
.WithShape(F16, {1024, 33712}))
.WithContractingDims({1},
{0})));
}
TEST_F(CublasGemmPadForTensorCoresTest, DotWithBatchDimensions) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
%param1 = f16[3, 5, 2048, 1024] parameter(0)
%param2 = f16[3, 5, 1024, 33708] parameter(1)
ROOT %dot.2309 = f16[3, 5, 2048, 33708]{3, 2, 1,0} dot(f16[3, 5, 2048, 1024]{3, 2, 1,0} %param1,
f16[3, 5, 1024, 33708]{2, 3, 0,1} %param2), lhs_batch_dims={0, 1}, rhs_batch_dims={0, 1}, lhs_contracting_dims={3}, rhs_contracting_dims={2}})")
.value();
EXPECT_TRUE(PadForF16Gemms(module.get()));
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
GmockMatch(
m::Slice(
m::Dot(m::Pad(m::Parameter().WithShape(F16, {3, 5, 2048, 1024}),
m::Constant().WithShape(F16, {}))
.WithShape(F16, {3, 5, 2048, 1024}),
m::Pad(m::Parameter().WithShape(F16, {3, 5, 1024, 33708}),
m::Constant().WithShape(F16, {}))
.WithShape(F16, {3, 5, 1024, 33712}))
.WithShape(F16, {3, 5, 2048, 33712})
.WithContractingDims({3},
{2}))
.WithShape(F16, {3, 5, 2048, 33708})));
}
TEST_F(CublasGemmPadForTensorCoresTest, NoDotComputation) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %maximum = f32[] maximum(f32[] %x, f32[] %y)
})")
.value();
EXPECT_FALSE(PadForF16Gemms(module.get()));
}
TEST_F(CublasGemmPadForTensorCoresTest, F32DotComputation) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
%param1 = f32[2048,1024] parameter(0)
%param2 = f32[1024,33708] parameter(1)
ROOT %dot.2309 = f32[2048,33708]{1,0} dot(f32[2048,1024]{1,0} %param1,
f32[1024,33708]{0,1} %param2),
lhs_contracting_dims={1}, rhs_contracting_dims={0}})")
.value();
EXPECT_FALSE(PadForF16Gemms(module.get()));
}
TEST_F(CublasGemmPadForTensorCoresTest, F64DotComputation) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
%param1 = f64[2048,1024] parameter(0)
%param2 = f64[1024,33708] parameter(1)
ROOT %dot.2309 = f64[2048,33708]{1,0} dot(f64[2048,1024]{1,0} %param1,
f64[1024,33708]{0,1} %param2),
lhs_contracting_dims={1}, rhs_contracting_dims={0}})")
.value();
EXPECT_FALSE(PadForF16Gemms(module.get()));
}
TEST_F(CublasGemmPadForTensorCoresTest, MultiplesOf8DotComputation) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
%param1 = f16[2048,1024] parameter(0)
%param2 = f16[1024,33712] parameter(1)
ROOT %dot.2309 = f16[2048,33712]{1,0} dot(f16[2048,1024]{1,0} %param1,
f16[1024,33712]{0,1} %param2),
lhs_contracting_dims={1}, rhs_contracting_dims={0}})")
.value();
EXPECT_FALSE(PadForF16Gemms(module.get()));
}
TEST_F(CublasGemmPadForTensorCoresTest, CheckSavingMetadata) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
%param1 = f16[2048,1024] parameter(0)
%param2 = f16[1024,33708] parameter(1)
ROOT %dot.2309 = f16[2048,33708]{1,0} dot(f16[2048,1024]{1,0} %param1,
f16[1024,33708]{0,1} %param2),
lhs_contracting_dims={1}, rhs_contracting_dims={0},
metadata={op_type="MatMul" op_name="transformer_v2/Transformer/decode/embedding_shared_weights_1/presoftmax_linear/MatMul"}
})")
.value();
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(PadForF16Gemms(module.get()));
auto metadata = module->entry_computation()->root_instruction()->metadata();
EXPECT_EQ("MatMul", metadata.op_type());
EXPECT_EQ(
"transformer_v2/Transformer/decode/embedding_shared_weights_1/"
"presoftmax_linear/MatMul",
metadata.op_name());
}
TEST_F(CublasGemmPadForTensorCoresTest, NotCanonicalizedDot) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
%param1 = f16[3, 5, 2048, 1024] parameter(0)
%param2 = f16[3, 5, 1024, 33708] parameter(1)
ROOT %dot.2309 = f16[3,2048, 33708]{2, 1, 0} dot(f16[3, 5, 2048, 1024]{3, 2, 1, 0} %param1, f16[3, 5, 1024, 33708]{3, 2, 1, 0} %param2), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={3, 1}, rhs_contracting_dims={2, 1}})")
.value();
EXPECT_FALSE(PadForF16Gemms(module.get()));
}
}
}
} |
2,072 | cpp | tensorflow/tensorflow | collective_permute_cycle_decomposer | third_party/xla/xla/service/gpu/transforms/collective_permute_cycle_decomposer.cc | third_party/xla/xla/service/gpu/transforms/collective_permute_cycle_decomposer_test.cc | #ifndef XLA_SERVICE_GPU_COLLECTIVE_PERMUTE_CYCLE_DECOMPOSER_H_
#define XLA_SERVICE_GPU_COLLECTIVE_PERMUTE_CYCLE_DECOMPOSER_H_
#include <cstdint>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class CollectivePermuteCycleDecomposer : public HloModulePass {
public:
explicit CollectivePermuteCycleDecomposer(int64_t threshold_in_bytes)
: threshold_in_bytes_(threshold_in_bytes) {}
absl::string_view name() const override {
return "collective-permute-cycle-decomposer";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
int64_t threshold_in_bytes_;
};
}
#endif
#include "xla/service/gpu/collective_permute_cycle_decomposer.h"
#include <cstdint>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/literal_util.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
using SourceTargetPair = std::pair<int64_t, int64_t>;
using SourceTargetPairs = std::vector<SourceTargetPair>;
enum class CycleType { kUnknown, kForward, kBackward };
CycleType ShouldDecomposeWithCycleType(
const HloCollectivePermuteInstruction& collective_permute,
int64_t threshold_in_bytes) {
if (!collective_permute.channel_id().has_value()) {
return CycleType::kUnknown;
}
if (collective_permute.operand_count() != 1) {
return CycleType::kUnknown;
}
const Shape& result_shape = collective_permute.shape();
if (result_shape.IsTuple()) {
return CycleType::kUnknown;
}
CHECK(result_shape.IsArray());
if (ShapeUtil::ByteSizeOf(result_shape) < threshold_in_bytes) {
return CycleType::kUnknown;
}
const SourceTargetPairs& pairs = collective_permute.source_target_pairs();
if (pairs.size() == 1) {
return CycleType::kUnknown;
}
return IsForwardCycle(pairs) ? CycleType::kForward
: IsBackwardCycle(pairs) ? CycleType::kBackward
: CycleType::kUnknown;
}
absl::Status GetFrontendAttributes(HloCollectivePermuteInstruction* cp,
CycleType cycle_type,
xla::FrontendAttributes& cp1_attr,
xla::FrontendAttributes& cp2_attr) {
cp1_attr = cp->frontend_attributes();
cp2_attr = cp->frontend_attributes();
auto validation_it =
cp->frontend_attributes().map().find(kSendRecvValidationAttr);
if (validation_it == cp->frontend_attributes().map().end() ||
validation_it->second == "invalid") {
return absl::OkStatus();
}
auto statusor_bounds = ParseReplicaGroupsOnly(validation_it->second);
if (!statusor_bounds.ok()) {
return statusor_bounds.status();
}
const std::vector<ReplicaGroup>& bounds = statusor_bounds.value();
if (bounds.size() < 2) {
return Internal("Invalid number of replica groups");
}
int64_t num_pairs = bounds.size();
auto backedge_start = cycle_type == CycleType::kBackward
? bounds.begin()
: bounds.begin() + num_pairs - 1;
auto other_edges_start =
cycle_type == CycleType::kBackward ? bounds.begin() + 1 : bounds.begin();
std::vector<ReplicaGroup> cp1_bounds(backedge_start, backedge_start + 1);
std::vector<ReplicaGroup> cp2_bounds(other_edges_start,
other_edges_start + num_pairs - 1);
auto bounds_to_string = [](const std::vector<ReplicaGroup> groups) {
return "{" +
absl::StrJoin(groups, ",",
[](std::string* out, const ReplicaGroup& value) {
absl::StrAppend(out, "{", value.replica_ids(0), ",",
value.replica_ids(1), "}");
}) +
"}";
};
std::string cp1_validation_str = bounds_to_string(cp1_bounds);
std::string cp2_validation_str = bounds_to_string(cp2_bounds);
(*cp1_attr.mutable_map())[kSendRecvValidationAttr] = cp1_validation_str;
(*cp2_attr.mutable_map())[kSendRecvValidationAttr] = cp2_validation_str;
return absl::OkStatus();
}
absl::Status DecomposeCollectivePermuteCycle(
HloCollectivePermuteInstruction* cp, HloComputation* computation,
HloModule* module, int64_t next_channel_id, CycleType cycle_type) {
const SourceTargetPairs& pairs = cp->source_target_pairs();
int64_t num_pairs = pairs.size();
auto backedge_start = cycle_type == CycleType::kBackward
? pairs.begin()
: pairs.begin() + num_pairs - 1;
auto other_edges_start =
cycle_type == CycleType::kBackward ? pairs.begin() + 1 : pairs.begin();
SourceTargetPairs backedge(backedge_start, backedge_start + 1);
SourceTargetPairs other_edges(other_edges_start,
other_edges_start + num_pairs - 1);
const OpMetadata& metadata = cp->metadata();
xla::FrontendAttributes cp1_attr, cp2_attr;
TF_RETURN_IF_ERROR(GetFrontendAttributes(cp, cycle_type, cp1_attr, cp2_attr));
HloInstruction* cp1 =
computation->AddInstruction(HloInstruction::CreateCollectivePermute(
cp->shape(), cp->mutable_operand(0), backedge,
cp->channel_id().value()));
cp1->set_metadata(metadata);
cp1->set_frontend_attributes(cp1_attr);
int64_t cp1_receiver = backedge.back().second;
HloInstruction* cp2 =
computation->AddInstruction(HloInstruction::CreateCollectivePermute(
cp->shape(), cp->mutable_operand(0), other_edges, next_channel_id));
cp2->set_metadata(metadata);
cp2->set_frontend_attributes(cp2_attr);
HloInstruction* partition =
computation->AddInstruction(HloInstruction::CreatePartitionId());
HloInstruction* constant = computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0(U32, cp1_receiver)));
HloInstruction* compare0 = computation->AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), partition,
constant, Comparison::Direction::kEq));
HloInstruction* compare =
computation->AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(PRED, cp1->shape().dimensions()), compare0, {}));
HloInstruction* recv_data =
computation->AddInstruction(HloInstruction::CreateTernary(
cp1->shape(), HloOpcode::kSelect, compare, cp1, cp2));
TF_RETURN_IF_ERROR(cp->ReplaceAllUsesWith(recv_data));
TF_RETURN_IF_ERROR(computation->RemoveInstructionAndUnusedOperands(cp));
return absl::OkStatus();
}
}
absl::StatusOr<bool> CollectivePermuteCycleDecomposer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
int64_t next_channel_id;
for (auto comp : module->computations(execution_threads)) {
for (auto hlo : comp->MakeInstructionPostOrder()) {
if (hlo->opcode() != HloOpcode::kCollectivePermute) {
continue;
}
auto collective_permute = Cast<HloCollectivePermuteInstruction>(hlo);
CycleType cycle_type = ShouldDecomposeWithCycleType(*collective_permute,
threshold_in_bytes_);
if (cycle_type != CycleType::kUnknown) {
if (changed == false) {
next_channel_id = hlo_query::NextChannelId(*module);
changed = true;
}
TF_RETURN_IF_ERROR(DecomposeCollectivePermuteCycle(
collective_permute, comp, module, next_channel_id++, cycle_type));
}
}
}
return changed;
}
} | #include "xla/service/gpu/collective_permute_cycle_decomposer.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ::testing::HasSubstr;
using CollectivePermuteCycleDecomposerTest = HloTestBase;
using ::testing::HasSubstr;
using CollectivePermuteDecomposerTest = HloTestBase;
TEST_F(CollectivePermuteDecomposerTest, DefaultChannelNotTransformed) {
const absl::string_view kModuleStr = R"(
HloModule test
ENTRY test_computation {
p = u32[] replica-id()
ROOT start = u32[] collective-permute(p),
source_target_pairs={{0,1},{1,0}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
CollectivePermuteCycleDecomposer decomposer(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CollectivePermuteCycleDecomposerTest, TrivialNotTransformed) {
const absl::string_view kModuleStr = R"(
HloModule test
ENTRY test_computation {
p = u32[] partition-id()
ROOT start = u32[] collective-permute(p), channel_id=1,
source_target_pairs={{0,0}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
CollectivePermuteCycleDecomposer decomposer(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CollectivePermuteCycleDecomposerTest, BelowThresholdNotTransformed) {
const absl::string_view kModuleStr = R"(
HloModule test
ENTRY test_computation {
p = u32[] partition-id()
ROOT start = u32[] collective-permute(p), channel_id=1,
source_target_pairs={{0,1},{1,2},{2,3},{3,0}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
CollectivePermuteCycleDecomposer decomposer(33);
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CollectivePermuteCycleDecomposerTest, ForwardCycle) {
const absl::string_view kModuleStr = R"(
HloModule test
ENTRY test_computation {
p = u32[] partition-id()
ROOT start = u32[3,2] collective-permute(p), channel_id=1,
source_target_pairs={{0,1},{1,2},{2,3},{3,0}},
frontend_attributes={_xla_send_recv_validation="{{0,7},{1,8},{2,9},{3,10}}"},
metadata={op_name="op1/op2/add" source_file="foo/bar/mysource.py" source_line=35}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
CollectivePermuteCycleDecomposer decomposer(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
auto check_metadata = [](const HloInstruction* inst) {
EXPECT_EQ(inst->metadata().op_name(), "op1/op2/add");
EXPECT_EQ(inst->metadata().source_file(), "foo/bar/mysource.py");
EXPECT_EQ(inst->metadata().source_line(), 35);
};
HloCollectivePermuteInstruction* cp1 =
DynCast<HloCollectivePermuteInstruction>(
FindInstruction(module.get(), "collective-permute"));
HloCollectivePermuteInstruction* cp2 =
DynCast<HloCollectivePermuteInstruction>(
FindInstruction(module.get(), "collective-permute.1"));
EXPECT_NE(cp1, nullptr);
EXPECT_NE(cp2, nullptr);
EXPECT_EQ(cp1->operand(0), cp2->operand(0));
EXPECT_GT(cp2->channel_id().value(), cp1->channel_id().value());
EXPECT_THAT(cp1->ToString(), HasSubstr("source_target_pairs={{3,0}}"));
EXPECT_THAT(cp1->ToString(),
HasSubstr("_xla_send_recv_validation=\"{{3,10}}\""));
EXPECT_THAT(cp2->ToString(),
HasSubstr("source_target_pairs={{0,1},{1,2},{2,3}}"));
EXPECT_THAT(cp2->ToString(),
HasSubstr("_xla_send_recv_validation=\"{{0,7},{1,8},{2,9}}\""));
check_metadata(cp1);
check_metadata(cp2);
}
TEST_F(CollectivePermuteCycleDecomposerTest, BackwardCycle) {
const absl::string_view kModuleStr = R"(
HloModule test
ENTRY test_computation {
p = u32[] partition-id()
ROOT start = u32[] collective-permute(p), channel_id=1,
source_target_pairs={{0,3},{1,0},{2,1},{3,2}},
frontend_attributes={_xla_send_recv_validation="{{0,7},{1,8},{2,9},{3,10}}"},
metadata={op_name="op1/op2/add" source_file="foo/bar/mysource.py" source_line=35}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
CollectivePermuteCycleDecomposer decomposer(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
auto check_metadata = [](const HloInstruction* inst) {
EXPECT_EQ(inst->metadata().op_name(), "op1/op2/add");
EXPECT_EQ(inst->metadata().source_file(), "foo/bar/mysource.py");
EXPECT_EQ(inst->metadata().source_line(), 35);
};
HloCollectivePermuteInstruction* cp1 =
DynCast<HloCollectivePermuteInstruction>(
FindInstruction(module.get(), "collective-permute"));
HloCollectivePermuteInstruction* cp2 =
DynCast<HloCollectivePermuteInstruction>(
FindInstruction(module.get(), "collective-permute.1"));
EXPECT_NE(cp1, nullptr);
EXPECT_NE(cp2, nullptr);
EXPECT_EQ(cp1->operand(0), cp2->operand(0));
EXPECT_GT(cp2->channel_id().value(), cp1->channel_id().value());
EXPECT_THAT(cp1->ToString(), HasSubstr("source_target_pairs={{0,3}}"));
EXPECT_THAT(cp1->ToString(),
HasSubstr("_xla_send_recv_validation=\"{{0,7}}\""));
EXPECT_THAT(cp2->ToString(),
HasSubstr("source_target_pairs={{1,0},{2,1},{3,2}}"));
EXPECT_THAT(cp2->ToString(),
HasSubstr("_xla_send_recv_validation=\"{{1,8},{2,9},{3,10}}\""));
check_metadata(cp1);
check_metadata(cp2);
}
}
} |
2,073 | cpp | tensorflow/tensorflow | cudnn_fused_conv_rewriter | third_party/xla/xla/service/gpu/transforms/cudnn_fused_conv_rewriter.cc | third_party/xla/xla/service/gpu/transforms/cudnn_fused_conv_rewriter_test.cc | #ifndef XLA_SERVICE_GPU_CUDNN_FUSED_CONV_REWRITER_H_
#define XLA_SERVICE_GPU_CUDNN_FUSED_CONV_REWRITER_H_
#include <cstdint>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/dnn.h"
namespace xla {
namespace gpu {
class CudnnFusedConvRewriter : public HloModulePass {
public:
CudnnFusedConvRewriter(se::CudaComputeCapability cc,
se::dnn::VersionInfo dnn_version,
int32_t toolkit_version)
: compute_capability_(cc),
dnn_version_(dnn_version),
toolkit_version_(toolkit_version) {}
CudnnFusedConvRewriter(se::RocmComputeCapability cc,
se::dnn::VersionInfo dnn_version,
int32_t toolkit_version)
: compute_capability_(cc),
dnn_version_(dnn_version),
toolkit_version_(toolkit_version) {}
absl::string_view name() const override {
return "cudnn-fused-convolution-rewriter";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
const se::GpuComputeCapability compute_capability_;
const se::dnn::VersionInfo dnn_version_;
const int32_t toolkit_version_;
};
}
}
#endif
#include "xla/service/gpu/cudnn_fused_conv_rewriter.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <functional>
#include <limits>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "xla/comparison_util.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/ml_dtypes.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
namespace m = match;
bool IsConvCustomCall(const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kCustomCall &&
(instr->custom_call_target() == kCudnnConvForwardCallTarget ||
instr->custom_call_target() ==
kCudnnConvBiasActivationForwardCallTarget);
}
bool IsConvDepthwise(const HloInstruction* instr) {
int64_t feature_group_count = instr->feature_group_count();
if (feature_group_count == 1) {
return false;
}
const HloInstruction* input = instr->operand(0);
int64_t input_feature_dimension =
instr->convolution_dimension_numbers().input_feature_dimension();
int64_t input_feature_count =
input->shape().dimensions(input_feature_dimension);
return input_feature_count == feature_group_count;
}
bool IsNonDepthwiseConvCustomCall(const HloInstruction* instr) {
return IsConvCustomCall(instr) && !IsConvDepthwise(instr);
}
bool IsROCm(se::GpuComputeCapability cc) {
return std::holds_alternative<se::RocmComputeCapability>(cc);
}
bool ShouldUseCudnnRuntimeFusion(const DebugOptions& debug_opts,
se::GpuComputeCapability cc) {
const auto* cuda_cc = std::get_if<se::CudaComputeCapability>(&cc);
if (cuda_cc != nullptr)
return debug_opts.xla_gpu_use_runtime_fusion() && cuda_cc->IsAtLeast(7, 5);
else
return true;
}
bool IsSuitableForCudnnRuntimeFusion(HloInstruction* conv) {
if (conv->operands().size() > 3) {
return false;
}
if (conv->operand(0)->shape().element_type() != F16) {
return false;
}
const Shape& shape = conv->operand(1)->shape();
int64_t num_input_features = shape.dimensions(
conv->convolution_dimension_numbers().kernel_input_feature_dimension());
int64_t num_output_features = shape.dimensions(
conv->convolution_dimension_numbers().kernel_output_feature_dimension());
if (num_input_features % 2 != 0 || num_output_features % 2 != 0) {
return false;
}
return true;
}
bool IsLosslesslyConvertibleTo(const HloInstruction* instr,
PrimitiveType dst_ty) {
if (instr->shape().element_type() == dst_ty) {
return true;
}
if (Match(instr, m::Convert(m::Op().WithElementType(dst_ty)))) {
return primitive_util::CastPreservesValues(dst_ty,
instr->shape().element_type());
}
if (instr->opcode() == HloOpcode::kConstant) {
if (!instr->shape().IsArray()) {
return false;
}
PrimitiveType orig_ty = instr->shape().element_type();
absl::StatusOr<Literal> converted1 = instr->literal().Convert(dst_ty);
if (!converted1.ok()) {
return false;
}
absl::StatusOr<Literal> converted2 = converted1->Convert(orig_ty);
if (!converted2.ok()) {
return false;
}
return instr->literal() == *converted2;
}
if (instr->opcode() == HloOpcode::kBroadcast ||
instr->opcode() == HloOpcode::kReshape ||
instr->opcode() == HloOpcode::kTranspose) {
return IsLosslesslyConvertibleTo(instr->operand(0), dst_ty);
}
return false;
}
bool IsLosslesslyConvertibleToS8(const HloInstruction* instr) {
return IsLosslesslyConvertibleTo(instr, S8);
}
bool IsLosslesslyConvertibleToF16(const HloInstruction* instr) {
return IsLosslesslyConvertibleTo(instr, F16);
}
absl::StatusOr<HloInstruction*> EnsureIsConvBiasActivation(
HloInstruction* conv) {
CHECK_EQ(conv->opcode(), HloOpcode::kCustomCall);
if (conv->custom_call_target() == kCudnnConvBiasActivationForwardCallTarget) {
return conv;
}
if (conv->custom_call_target() == kCudnnConvForwardCallTarget) {
HloComputation* comp = conv->parent();
const Shape& shape = conv->shape().tuple_shapes(0);
int64_t num_output_features = shape.dimensions(
conv->convolution_dimension_numbers().output_feature_dimension());
PrimitiveType bias_ty;
if (primitive_util::IsIntegralType(shape.element_type())) {
bias_ty = F32;
} else {
bias_ty = shape.element_type();
}
auto bias = BroadcastZeros(comp, bias_ty, {num_output_features});
absl::InlinedVector<HloInstruction*, 3> new_operands(
conv->operands().begin(), conv->operands().end());
new_operands.push_back(bias);
HloInstruction* new_conv = comp->AddInstruction(
conv->CloneWithNewOperands(conv->shape(), new_operands));
TF_RETURN_IF_ERROR(comp->ReplaceInstruction(conv, new_conv));
new_conv->set_custom_call_target(kCudnnConvBiasActivationForwardCallTarget);
comp->parent()->SetAndUniquifyInstrName(new_conv,
"cudnn-conv-bias-activation");
return new_conv;
}
return FailedPrecondition("Unsupported conv: %s", conv->ToString());
}
absl::StatusOr<bool> FuseConvertTypeIntoConv(HloComputation* comp,
PrimitiveType conv_type,
PrimitiveType cvt_type) {
bool changed = false;
for (auto instr : comp->MakeInstructionPostOrder()) {
HloInstruction* conv = nullptr;
auto tuple_elem =
m::GetTupleElement(m::Op(&conv).WithPredicate(IsConvCustomCall), 0)
.WithElementType(conv_type);
auto pattern =
m::Convert(tuple_elem.WithOneUser()).WithElementType(cvt_type);
if (!Match(instr, pattern)) {
continue;
}
if (!ConsumeFuel("cudnn-fused-convolution-rewriter", [&] {
return absl::StrCat("FuseConvertTypeIntoConv: ", conv->ToString());
})) {
continue;
}
Shape new_shape = conv->shape();
new_shape.mutable_tuple_shapes(0)->set_element_type(cvt_type);
HloInstruction* new_conv =
comp->AddInstruction(conv->CloneWithNewShape(new_shape));
comp->parent()->SetAndUniquifyInstrName(new_conv, conv->name());
TF_ASSIGN_OR_RETURN(HloInstruction * new_gte,
MakeGetTupleElementHlo(new_conv, 0));
TF_RETURN_IF_ERROR(comp->ReplaceInstruction(instr, new_gte));
changed = true;
}
return changed;
}
struct ConvConvertTypes {
PrimitiveType convolution_type;
PrimitiveType conversion_type;
};
absl::StatusOr<bool> FuseRemoveConvertInConv(HloComputation* comp) {
bool changed = false;
std::array<ConvConvertTypes, 3> types{{
{S32, F32},
{S8, F32},
{F32, S8},
}};
for (auto [conv_type, cvt_type] : types) {
TF_ASSIGN_OR_RETURN(bool curr_change,
FuseConvertTypeIntoConv(comp, conv_type, cvt_type));
changed |= curr_change;
}
return changed;
}
absl::StatusOr<bool> FuseConvAlpha(HloComputation* comp) {
bool changed = false;
for (auto instr : comp->MakeInstructionPostOrder()) {
HloInstruction* conv = nullptr;
HloInstruction* gte = nullptr;
HloInstruction* alpha = nullptr;
auto pattern = m::MultiplyAnyOrder(
m::GetTupleElement(
>e, m::Op(&conv).WithPredicate(IsNonDepthwiseConvCustomCall), 0)
.WithOneUse(),
m::Broadcast(m::ConstantEffectiveScalar(&alpha)));
if (!Match(instr, pattern)) {
continue;
}
PrimitiveType alpha_ty = gte->shape().element_type() == F64 ? F64 : F32;
if (!IsLosslesslyConvertibleTo(alpha, alpha_ty)) {
continue;
}
TF_ASSIGN_OR_RETURN(auto gpu_config,
conv->backend_config<GpuBackendConfig>());
CudnnConvBackendConfig& config =
*gpu_config.mutable_cudnn_conv_backend_config();
if (config.conv_result_scale() != 1) {
continue;
}
if (!ConsumeFuel("cudnn-fused-convolution-rewriter", [&] {
return absl::StrCat("FuseConvAlpha: ", conv->ToString());
})) {
continue;
}
TF_ASSIGN_OR_RETURN(conv, EnsureIsConvBiasActivation(conv));
TF_ASSIGN_OR_RETURN(Literal alpha_f64, alpha->literal().Convert(F64));
config.set_conv_result_scale(alpha_f64.GetFirstElement<double>());
TF_RETURN_IF_ERROR(conv->set_backend_config(gpu_config));
TF_RETURN_IF_ERROR(conv->parent()->ReplaceInstruction(instr, gte));
changed = true;
}
return changed;
}
class GraphString {
public:
GraphString() = default;
bool AppendOp(std::string op_name, HloInstruction* op,
std::vector<HloInstruction*> operands = {}) {
std::optional<int64_t> operand_uid;
int num_operands_in_graph = 0;
for (HloInstruction* operand : operands) {
if (OpInGraph(operand->unique_id())) {
num_operands_in_graph++;
if (num_operands_in_graph > 1) {
return false;
}
operand_uid = operand->unique_id();
}
}
graph_.emplace_back(OpDescriptor(
{op->unique_id(), op->shape().element_type(), op_name, operand_uid}));
return true;
}
void ChangeDataType(PrimitiveType type) {
DCHECK(!graph_.empty());
graph_.back().output_type = type;
}
std::string Graph() const {
std::string graph;
for (OpDescriptor op : graph_) {
graph.append(std::to_string(op.uid));
graph.append(":[" +
primitive_util::LowercasePrimitiveTypeName(op.output_type) +
"]");
graph.append(op.name);
graph.append("(");
if (op.operand.has_value()) {
graph.append(std::to_string(*op.operand));
}
graph.append(");");
}
return graph;
}
bool OpInGraph(int64_t uid, std::string op_name = "") const {
auto op_filter = [&](OpDescriptor op) -> bool {
if (op_name.empty()) {
return op.uid == uid;
} else {
return op.uid == uid && op.name == op_name;
}
};
return std::find_if(graph_.begin(), graph_.end(), op_filter) !=
graph_.end();
}
private:
struct OpDescriptor {
int64_t uid;
PrimitiveType output_type;
std::string name;
std::optional<int64_t> operand;
};
std::vector<OpDescriptor> graph_;
};
bool IsF8Type(const HloInstruction* instr) {
return primitive_util::IsF8Type(instr->shape().element_type());
}
bool IsScalar(const HloInstruction* instr) {
return ShapeUtil::IsScalar(instr->shape());
}
std::optional<PrimitiveType> IsSaturatingCastToF8(HloInstruction* instr) {
HloInstruction *op, *clamp_lower, *clamp_upper;
if (Match(instr,
m::Convert(
&op,
m::Clamp(m::Broadcast(m::ConstantScalar(&clamp_lower)), m::Op(),
m::Broadcast(m::ConstantScalar(&clamp_upper))))) &&
((op->shape().element_type() == F8E4M3FN &&
clamp_lower->literal().IsAllFloat(static_cast<float>(
std::numeric_limits<tsl::float8_e4m3fn>::lowest())) &&
clamp_upper->literal().IsAllFloat(static_cast<float>(
std::numeric_limits<tsl::float8_e4m3fn>::max()))) ||
(op->shape().element_type() == F8E5M2 &&
clamp_lower->literal().IsAllFloat(static_cast<float>(
std::numeric_limits<tsl::float8_e5m2>::lowest())) &&
clamp_upper->literal().IsAllFloat(static_cast<float>(
std::numeric_limits<tsl::float8_e5m2>::max()))))) {
return op->shape().element_type();
}
return std::nullopt;
}
bool AppliesMaxReduce(HloInstruction* op) {
HloComputation* reduce_comp = op->to_apply();
HloInstruction* reduce_comp_root = reduce_comp->root_instruction();
return ShapeUtil::IsScalar(op->shape()) &&
ShapeUtil::IsScalar(op->operand(1)->shape()) &&
op->operand(1)->IsConstant() &&
op->operand(1)->literal().GetAsDouble({}) <= 0. &&
reduce_comp_root->opcode() == HloOpcode::kMaximum &&
reduce_comp_root->operand(0)->opcode() == HloOpcode::kParameter &&
reduce_comp_root->operand(1)->opcode() == HloOpcode::kParameter;
}
void CaptureConvGraphRecursive(HloInstruction* instr,
std::vector<HloInstruction*>& operands,
std::vector<HloInstruction*>& aux_outputs,
GraphString& graph_string,
absl::flat_hash_set<int>& visited_instrs,
HloInstruction*& final_instr) {
if (!visited_instrs.emplace(instr->unique_id()).second) {
return;
}
final_instr = instr;
GraphString init_graph_string = graph_string;
std::vector<HloInstruction*> init_operands = operands,
init_aux_outputs = aux_outputs;
int num_linear_users = 0, num_nonlinear_users = 0;
for (HloInstruction* user : instr->users()) {
HloInstruction *op, *operand0, *operand1;
if (Match(user, m::AddAnyOrder(&op, m::Op(&operand0), m::Op(&operand1)))) {
if (graph_string.AppendOp("add", op, {operand0, operand1})) {
operands.push_back(operand0 == instr ? operand1 : operand0);
num_linear_users++;
CaptureConvGraphRecursive(user, operands, aux_outputs, graph_string,
visited_instrs, final_instr);
}
continue;
}
if (Match(user, m::MultiplyAnyOrder(&op, m::Op(&operand0),
m::Broadcast(m::Op(&operand1)))) &&
ShapeUtil::IsScalar(operand1->shape())) {
if (graph_string.AppendOp("scale", op, {operand0, operand1})) {
operands.push_back(operand1);
num_linear_users++;
CaptureConvGraphRecursive(user, operands, aux_outputs, graph_string,
visited_instrs, final_instr);
}
continue;
}
if (Match(user, m::Divide(&op, m::Op(&operand0),
m::Broadcast(m::Op(&operand1)))) &&
ShapeUtil::IsScalar(operand1->shape())) {
if (graph_string.AppendOp("invscale", op, {operand0, operand1})) {
operands.push_back(operand1);
num_linear_users++;
CaptureConvGraphRecursive(user, operands, aux_outputs, graph_string,
visited_instrs, final_instr);
}
continue;
}
if (Match(user, m::MaximumAnyOrder(&op, m::Op(&operand0),
m::Broadcast(m::ConstantScalar(0))))) {
if (graph_string.AppendOp("relu", op, {operand0})) {
num_linear_users++;
CaptureConvGraphRecursive(user, operands, aux_outputs, graph_string,
visited_instrs, final_instr);
}
continue;
}
if (Match(user, m::Reduce(&op, m::Op(&operand0), m::Op())) &&
graph_string.OpInGraph(operand0->unique_id(), "relu") &&
AppliesMaxReduce(op)) {
if (graph_string.AppendOp("amax", op, {operand0})) {
aux_outputs.emplace_back(op);
num_nonlinear_users++;
}
continue;
}
if (!user->users().empty()) {
HloInstruction* users_user = user->users()[0];
std::optional<PrimitiveType> f8_type = IsSaturatingCastToF8(users_user);
if (f8_type.has_value()) {
graph_string.ChangeDataType(f8_type.value());
num_linear_users++;
CaptureConvGraphRecursive(users_user, operands, aux_outputs,
graph_string, visited_instrs, final_instr);
continue;
}
if (Match(users_user,
m::Reduce(&op, m::Abs(m::Op(&operand0)), m::Op())) &&
AppliesMaxReduce(op)) {
if (graph_string.AppendOp("amax", op, {operand0})) {
aux_outputs.emplace_back(op);
num_nonlinear_users++;
}
continue;
}
}
}
if (num_linear_users > 1 || num_nonlinear_users > 1 ||
num_linear_users + num_nonlinear_users < instr->user_count()) {
graph_string = init_graph_string;
operands = init_operands;
aux_outputs = init_aux_outputs;
final_instr = instr;
}
}
absl::StatusOr<
std::tuple<std::vector<HloInstruction*>, std::vector<HloInstruction*>,
GraphString, HloInstruction*>>
CaptureConvGraph(HloInstruction* instr, HloInstruction* convolution,
HloInstruction* wide_input, HloInstruction* wide_filter,
HloInstruction* input_scale, HloInstruction* filter_scale,
bool x_mult_scale, bool w_mult_scale) {
GraphString graph_string;
graph_string.AppendOp("conv", instr);
HloInstruction *input_scaled_conv, *filter_scaled_conv;
if (input_scale) {
TF_RETURN_IF_ERROR(convolution->ReplaceOperandWith(0, wide_input));
HloInstruction* bcast_input_scale = instr->AddInstruction(
HloInstruction::CreateBroadcast(instr->shape(), input_scale, {}));
input_scaled_conv = instr->AddInstruction(HloInstruction::CreateBinary(
instr->shape(),
x_mult_scale ? HloOpcode::kMultiply : HloOpcode::kDivide, instr,
bcast_input_scale));
TF_RETURN_IF_ERROR(instr->ReplaceAllUsesWith(input_scaled_conv));
}
if (filter_scale) {
TF_RETURN_IF_ERROR(convolution->ReplaceOperandWith(1, wide_filter));
HloInstruction* bcast_filter_scale = instr->AddInstruction(
HloInstruction::CreateBroadcast(instr->shape(), filter_scale, {}));
filter_scaled_conv = instr->AddInstruction(HloInstruction::CreateBinary(
instr->shape(),
w_mult_scale ? HloOpcode::kMultiply : HloOpcode::kDivide,
input_scale ? input_scaled_conv : instr, bcast_filter_scale));
TF_RETURN_IF_ERROR((input_scale ? input_scaled_conv : instr)
->ReplaceAllUsesWith(filter_scaled_conv));
}
std::vector<HloInstruction*> operands, aux_outputs;
absl::flat_hash_set<int> visited_instrs;
HloInstruction* final_instr;
CaptureConvGraphRecursive(instr, operands, aux_outputs, graph_string,
visited_instrs, final_instr);
return std::make_tuple(operands, aux_outputs, graph_string, final_instr);
}
absl::StatusOr<bool> F8GraphConv(HloComputation* comp,
se::CudaComputeCapability cc,
se::dnn::VersionInfo dnn_version,
int32_t toolkit_version) {
bool changed = false;
if (dnn_version < se::dnn::VersionInfo(8, 9, 0)) {
return false;
}
if (toolkit_version < 12000) {
return false;
}
if (!cc.IsAtLeast(se::CudaComputeCapability::HOPPER)) {
return false;
}
for (auto instr : comp->MakeInstructionPostOrder()) {
HloInstruction *convolution, *gte, *input, *filter,
*input_scale = nullptr, *filter_scale = nullptr,
*input_scale_op = nullptr, *filter_scale_op = nullptr,
*wide_input = nullptr, *wide_filter = nullptr;
auto conv_operand_maybe_scaled = [](HloInstruction** operand,
HloInstruction** wide_operand,
HloInstruction** scale_op,
HloInstruction** scale) {
return m::AnyOf<HloInstruction>(
m::Op(operand).WithPredicate(IsF8Type),
m::Convert(wide_operand, m::Op(operand).WithPredicate(IsF8Type)),
m::Divide(
scale_op,
m::Convert(wide_operand, m::Op(operand).WithPredicate(IsF8Type)),
m::Broadcast(m::Op(scale).WithPredicate(IsScalar))),
m::MultiplyAnyOrder(
scale_op,
m::Convert(wide_operand, m::Op(operand).WithPredicate(IsF8Type)),
m::Broadcast(m::Op(scale).WithPredicate(IsScalar))));
};
auto pattern = m::GetTupleElement(
>e, | #include "xla/service/gpu/cudnn_fused_conv_rewriter.h"
#include <array>
#include <memory>
#include <string>
#include <string_view>
#include <thread>
#include <utility>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "xla/comparison_util.h"
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/dnn.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/platform/statusor.h"
#if GOOGLE_CUDA
#include "third_party/gpus/cuda/include/cuda.h"
#elif TENSORFLOW_USE_ROCM
#include "rocm/rocm_config.h"
#endif
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/convert_mover.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/gpu_conv_rewriter.h"
#include "xla/service/gpu/tests/gpu_codegen_test.h"
#include "xla/service/hlo_constant_folding.h"
#include "xla/service/hlo_pass_fix.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/service/reshape_mover.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace gpu {
namespace {
namespace m = match;
using ::testing::HasSubstr;
using ::testing::Not;
const auto* kf16f32f64 = new std::vector<std::string>({"f16", "f32", "f64"});
const auto* kf16f32 = new std::vector<std::string>({"f16", "f32"});
class CudnnFusedConvRewriterHloTest : public HloTestBase {
public:
bool IsCuda() {
return std::holds_alternative<se::CudaComputeCapability>(
backend()
.default_stream_executor()
->GetDeviceDescription()
.gpu_compute_capability());
}
se::CudaComputeCapability GetCudaComputeCapability() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
}
stream_executor::dnn::VersionInfo GetDnnVersion() {
return GetDnnVersionInfoOrDefault(backend().default_stream_executor());
}
int32_t GetToolkitVersion() const {
#if GOOGLE_CUDA
return CUDA_VERSION;
#elif TENSORFLOW_USE_ROCM
return TF_ROCM_VERSION;
#endif
return 0;
}
CudnnFusedConvRewriterHloTest()
: HloTestBase(false,
false,
{}) {}
};
class CudnnFusedConvRewriterTest : public GpuCodegenTest {
public:
bool IsCuda() {
return std::holds_alternative<se::CudaComputeCapability>(
backend()
.default_stream_executor()
->GetDeviceDescription()
.gpu_compute_capability());
}
se::CudaComputeCapability GetCudaComputeCapability() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
}
stream_executor::dnn::VersionInfo GetDnnVersion() {
return GetDnnVersionInfoOrDefault(backend().default_stream_executor());
}
int32_t GetToolkitVersion() const {
#if GOOGLE_CUDA
return CUDA_VERSION;
#elif TENSORFLOW_USE_ROCM
return TF_ROCM_VERSION;
#endif
return 0;
}
protected:
std::string GetOptimizedHlo(absl::string_view hlo_string) {
HloModuleConfig config = GetModuleConfigForTest();
DebugOptions debug_opts = config.debug_options();
debug_opts.add_xla_disable_hlo_passes("cudnn_vectorize_convolutions");
debug_opts.set_xla_gpu_use_runtime_fusion(true);
config.set_debug_options(debug_opts);
auto result = backend().compiler()->RunHloPasses(
ParseAndReturnVerifiedModule(hlo_string, config).value(),
backend().default_stream_executor(), backend().memory_allocator());
if (!result.status().ok()) {
TF_EXPECT_OK(result.status())
<< "HLO compilation failed: " << result.status();
return "";
}
HloPrintOptions print_opts;
print_opts.set_print_operand_shape(false);
return (*result)->ToString(print_opts);
}
void TestMatchWithAllTypes(absl::string_view hlo_string) {
for (absl::string_view type : *(IsCuda() ? kf16f32f64 : kf16f32)) {
const std::string hlo_with_new_type =
absl::StrReplaceAll(hlo_string, {{"TYPE", type}});
std::string optimized_hlo_string = GetOptimizedHlo(hlo_with_new_type);
EXPECT_THAT(optimized_hlo_string,
Not(HasSubstr(kCudnnConvForwardCallTarget)))
<< optimized_hlo_string;
EXPECT_THAT(optimized_hlo_string,
HasSubstr(kCudnnConvBiasActivationForwardCallTarget));
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_with_new_type));
DebugOptions debug_opts = module->config().debug_options();
debug_opts.set_xla_gpu_use_runtime_fusion(true);
module->mutable_config().set_debug_options(debug_opts);
EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{0.01}))
<< optimized_hlo_string;
}
}
void TestClamp(absl::string_view pre_hlo_string,
absl::string_view post_hlo_string) {
std::string alpha_conv_scalar, alpha_side_input_scalar;
std::string elementwise_type;
std::string optimized_hlo_string = GetOptimizedHlo(pre_hlo_string);
EXPECT_THAT(optimized_hlo_string, Not(HasSubstr("Convert")));
EXPECT_THAT(optimized_hlo_string, HasSubstr("__cudnn$conv"));
EXPECT_TRUE(RunAndCompare(pre_hlo_string, ErrorSpec{0.01}))
<< pre_hlo_string;
absl::StatusOr<bool> filecheck_result =
RunFileCheck(optimized_hlo_string, post_hlo_string);
ASSERT_TRUE(filecheck_result.ok()) << filecheck_result.status();
EXPECT_TRUE(*filecheck_result);
}
void TestNotMatchWithAllTypes(absl::string_view hlo_string) {
for (absl::string_view type : *(IsCuda() ? kf16f32f64 : kf16f32)) {
const std::string hlo_with_new_type =
absl::StrReplaceAll(hlo_string, {{"TYPE", type}});
std::string optimized_hlo_string = GetOptimizedHlo(hlo_with_new_type);
SCOPED_TRACE(optimized_hlo_string);
EXPECT_THAT(optimized_hlo_string, HasSubstr(kCudnnConvForwardCallTarget));
EXPECT_THAT(optimized_hlo_string,
Not(HasSubstr(kCudnnConvBiasActivationForwardCallTarget)));
}
}
void TestF8(std::string pre_hlo_string, std::string custom_call_string,
std::string serialized_graph_string) {
if (!IsCuda()) return;
if (GetCudaComputeCapability().IsAtLeast(
se::CudaComputeCapability::HOPPER)) {
std::string optimized_hlo_string = GetOptimizedHlo(pre_hlo_string);
EXPECT_THAT(optimized_hlo_string, Not(HasSubstr("Convert")));
EXPECT_THAT(optimized_hlo_string, HasSubstr("__cudnn$conv"));
EXPECT_TRUE(RunAndCompare(pre_hlo_string, ErrorSpec{0.15, 0.15}))
<< pre_hlo_string;
absl::StatusOr<bool> filecheck_result =
RunFileCheck(optimized_hlo_string, custom_call_string);
ASSERT_TRUE(filecheck_result.ok()) << filecheck_result.status();
EXPECT_TRUE(*filecheck_result);
filecheck_result =
RunFileCheck(optimized_hlo_string, serialized_graph_string);
ASSERT_TRUE(filecheck_result.ok()) << filecheck_result.status();
EXPECT_TRUE(*filecheck_result);
} else {
std::string::size_type p0 = custom_call_string.find(':');
std::string::size_type p1 = custom_call_string.find("custom-call");
custom_call_string.erase(p0 + 1, p1 - p0 - 2);
p0 = custom_call_string.find(", dim_labels");
custom_call_string.erase(p0);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(pre_hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed, RunHloPass(GpuConvRewriter(GetCudaComputeCapability()),
module.get()));
EXPECT_TRUE(changed);
RunAndFilecheckHloRewrite(
module->ToString(HloPrintOptions{}.set_print_operand_shape(false)),
CudnnFusedConvRewriter(
se::CudaComputeCapability{se::CudaComputeCapability::HOPPER, 0},
GetDnnVersion(), GetToolkitVersion()),
custom_call_string);
RunAndFilecheckHloRewrite(
module->ToString(HloPrintOptions{}.set_print_operand_shape(false)),
CudnnFusedConvRewriter(
se::CudaComputeCapability{se::CudaComputeCapability::HOPPER, 0},
GetDnnVersion(), GetToolkitVersion()),
serialized_graph_string);
}
}
void TestF8Parameterized(std::string template_pre_hlo_string,
std::string template_custom_call_string,
std::string template_serialized_graph_string) {
std::array<absl::string_view, 2> types = {"f8e4m3fn", "f8e5m2"};
std::array<absl::string_view, 2> clamp_lower = {"-448.", "-57344."};
std::array<absl::string_view, 2> clamp_upper = {"448.", "57344."};
absl::flat_hash_map<absl::string_view, absl::string_view> replacements;
for (int i = 0; i < 2; ++i) {
replacements["<<InputType>>"] = types[i];
for (int j = 0; j < 2; ++j) {
replacements["<<FilterType>>"] = types[j];
for (int k = 0; k < 2; ++k) {
replacements["<<OutputType>>"] = types[k];
replacements["<<ClampLower>>"] = clamp_lower[k];
replacements["<<ClampUpper>>"] = clamp_upper[k];
TestF8(absl::StrReplaceAll(template_pre_hlo_string, replacements),
absl::StrReplaceAll(template_custom_call_string, replacements),
absl::StrReplaceAll(template_serialized_graph_string,
replacements));
}
}
}
}
};
#if GOOGLE_CUDA
#if (CUDA_VERSION < 12000 || CUDNN_VERSION < 8900)
#define MAYBE_SKIP_TEST(CAUSE) \
do { \
if (absl::string_view(CAUSE) == "F8") \
GTEST_SKIP() << "FP8 convolutions require CUDA 12 and cuDNN 8.9."; \
} while (0)
#else
#define MAYBE_SKIP_TEST(CAUSE)
#endif
#else
#define MAYBE_SKIP_TEST(CAUSE) \
do { \
GTEST_SKIP() << "ROCm does not support " CAUSE " fusion"; \
} while (0)
#endif
TEST_F(CudnnFusedConvRewriterTest, TestConvOnly) {
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,32,9,9] broadcast(zero), dimensions={}
input = TYPE[1,17,9,9] parameter(0)
filter = TYPE[3,3,17,32] parameter(1)
conv = TYPE[1,32,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1
ROOT relu = TYPE[1,32,9,9] maximum(zeros, conv)
})");
}
TEST_F(CudnnFusedConvRewriterTest, DontFuseReluWithDepthwiseConv) {
TestNotMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,17,9,9] broadcast(zero), dimensions={}
input = TYPE[1,17,9,9] parameter(0)
filter = TYPE[3,3,1,17] parameter(1)
conv = TYPE[1,17,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=17
ROOT relu = TYPE[1,17,9,9] maximum(zeros, conv)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestBias) {
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,64,64] parameter(1)
bias = TYPE[64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3}
add1 = TYPE[1,3,3,64] add(conv, broadcasted_bias)
ROOT relu = TYPE[1,3,3,64] maximum(zeros, add1)
})");
}
TEST_F(CudnnFusedConvRewriterTest, Test3D) {
std::string body = R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,5,7,64] broadcast(zero), dimensions={}
input = TYPE[1,3,5,7,64] parameter(0)
filter = TYPE[3,3,3,64,64] parameter(1)
bias = TYPE[64] parameter(2)
conv = TYPE[1,3,5,7,64] convolution(input, filter), window={size=3x3x3 pad=1_1x1_1x1_1}, dim_labels=b012f_012io->b012f, feature_group_count=1
broadcasted_bias = TYPE[1,3,5,7,64] broadcast(bias), dimensions={4}
add1 = TYPE[1,3,5,7,64] add(conv, broadcasted_bias)
)";
std::string relu = R"(
ROOT relu = TYPE[1,3,5,7,64] maximum(zeros, add1)
})";
std::string elu = R"(
cmp = pred[1,3,5,7,64] compare(add1, zeros), direction=GT
expm1 = TYPE[1,3,5,7,64] exponential-minus-one(add1)
ROOT elu = TYPE[1,3,5,7,64] select(cmp, add1, expm1)
})";
TestMatchWithAllTypes(body + relu);
if (!IsCuda()) TestMatchWithAllTypes(body + elu);
}
TEST_F(CudnnFusedConvRewriterTest, TestBiasMultiCall) {
std::string code = R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,<<<format>>>,64] broadcast(zero), dimensions={}
input = TYPE[1,<<<format>>>,64] parameter(0)
filter = TYPE[3,3,64,64] parameter(1)
bias = TYPE[64] parameter(2)
conv = TYPE[1,<<<format>>>,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
broadcasted_bias = TYPE[1,<<<format>>>,64] broadcast(bias), dimensions={3}
add1 = TYPE[1,<<<format>>>,64] add(conv, broadcasted_bias)
ROOT relu = TYPE[1,<<<format>>>,64] maximum(zeros, add1)
})";
absl::flat_hash_map<absl::string_view, absl::string_view> replacements;
replacements["<<<format>>>"] = "3,3";
TestMatchWithAllTypes(absl::StrReplaceAll(code, replacements));
replacements["<<<format>>>"] = "5,5";
TestMatchWithAllTypes(absl::StrReplaceAll(code, replacements));
replacements["<<<format>>>"] = "3,3";
TestMatchWithAllTypes(absl::StrReplaceAll(code, replacements));
}
TEST_F(CudnnFusedConvRewriterTest, TestBiasNoRelu) {
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,64,64] parameter(1)
bias = TYPE[64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3}
ROOT add1 = TYPE[1,3,3,64] add(conv, broadcasted_bias)
})");
}
TEST_F(CudnnFusedConvRewriterTest, DontFuseBiasWithDepthwiseConv) {
TestNotMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,1,64] parameter(1)
bias = TYPE[64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=64
broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3}
add1 = TYPE[1,3,3,64] add(conv, broadcasted_bias)
ROOT relu = TYPE[1,3,3,64] maximum(zeros, add1)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestElu) {
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,64,64] parameter(1)
bias = TYPE[64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3}
sum = TYPE[1,3,3,64] add(conv, broadcasted_bias)
cmp = pred[1,3,3,64] compare(sum, zeros), direction=GT
expm1 = TYPE[1,3,3,64] exponential-minus-one(sum)
ROOT elu = TYPE[1,3,3,64] select(cmp, sum, expm1)
})");
}
TEST_F(CudnnFusedConvRewriterTest, DontFuseEluWithDepthwiseConv) {
TestNotMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,1,64] parameter(1)
bias = TYPE[64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=64
broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3}
sum = TYPE[1,3,3,64] add(conv, broadcasted_bias)
cmp = pred[1,3,3,64] compare(sum, zeros), direction=GT
expm1 = TYPE[1,3,3,64] exponential-minus-one(sum)
ROOT elu = TYPE[1,3,3,64] select(cmp, sum, expm1)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestRelu6) {
if (IsCuda() && !GetCudaComputeCapability().IsAtLeast(
se::CudaComputeCapability::AMPERE)) {
GTEST_SKIP() << "Conv-Bias-Relu6 fusion is supported and recommended with "
"the Nvidia Ampere+ GPUs.";
}
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
six = TYPE[] constant(6)
sixes = TYPE[1,3,3,64] broadcast(six), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,64,64] parameter(1)
bias = TYPE[64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3}
sum = TYPE[1,3,3,64] add(conv, broadcasted_bias)
ROOT relu6 = TYPE[1,3,3,64] clamp(zeros, sum, sixes)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestRelu6OddChannels) {
if (IsCuda() && !GetCudaComputeCapability().IsAtLeast(
se::CudaComputeCapability::AMPERE)) {
GTEST_SKIP() << "Conv-Bias-Relu6 fusion is supported and recommended with "
"the Nvidia Ampere+ GPUs.";
}
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zeros = TYPE[1,384,1024,32] broadcast(TYPE[] constant(0)), dimensions={}
sixes = TYPE[1,384,1024,32] broadcast(TYPE[] constant(6)), dimensions={}
input = TYPE[1,769,2049,3] parameter(0)
filter = TYPE[32,3,3,3] parameter(1)
bias = TYPE[32] parameter(2)
conv = TYPE[1,384,1024,32] convolution(input, filter), window={size=3x3 stride=2x2}, dim_labels=b01f_o01i->b01f
broadcasted_bias = TYPE[1,384,1024,32] broadcast(bias), dimensions={3}
sum = add(conv, broadcasted_bias)
ROOT relu6 = clamp(zeros, sum, sixes)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestLeakyRelu) {
if (IsCuda() && !GetCudaComputeCapability().IsAtLeast(
se::CudaComputeCapability::AMPERE)) {
GTEST_SKIP()
<< "Conv-Bias-LeakyRelu fusion is supported and recommended with "
"the Nvidia Ampere+ GPUs.";
}
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
alpha = TYPE[] constant(0.2)
alphas = TYPE[1,3,3,64] broadcast(alpha), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,64,64] parameter(1)
bias = TYPE[64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3}
sum = TYPE[1,3,3,64] add(conv, broadcasted_bias)
cmp = pred[1,3,3,64] compare(sum, zeros), direction=GT
mul = TYPE[1,3,3,64] multiply(sum, alphas)
ROOT elu = TYPE[1,3,3,64] select(cmp, sum, mul)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestSideInputOnly) {
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,64,64] parameter(1)
side_input = TYPE[1,3,3,64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
add1 = TYPE[1,3,3,64] add(conv, side_input)
ROOT relu = TYPE[1,3,3,64] maximum(zeros, add1)
})");
}
TEST_F(CudnnFusedConvRewriterTest, DontFuseSideInputWithDepthwiseConv) {
TestNotMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,1,64] parameter(1)
side_input = TYPE[1,3,3,64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=64
add1 = TYPE[1,3,3,64] add(conv, side_input)
ROOT relu = TYPE[1,3,3,64] maximum(zeros, add1)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestBiasAndSideInput) {
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,64,64] parameter(1)
side_input = TYPE[1,3,3,64] parameter(2)
bias = TYPE[64] parameter(3)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3}
add1 = TYPE[1,3,3,64] add(conv, broadcasted_bias)
add2 = TYPE[1,3,3,64] add(add1, side_input)
ROOT relu = TYPE[1,3,3,64] maximum(zeros, add2)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestScaledConv) {
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,32,9,9] broadcast(zero), dimensions={}
alpha_conv_scalar = TYPE[] constant(0.999994934)
input = TYPE[1,17,9,9] parameter(0)
filter = TYPE[3,3,17,32] parameter(1)
conv = TYPE[1,32,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1
alpha_conv = TYPE[1,32,9,9] broadcast(alpha_conv_scalar), dimensions={}
scaled_conv = TYPE[1,32,9,9] multiply(conv, alpha_conv)
ROOT relu = TYPE[1,32,9,9] maximum(zeros, scaled_conv)
})");
}
TEST_F(CudnnFusedConvRewriterTest, DontFuseScaledDepthwiseConv) {
TestNotMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,17,9,9] broadcast(zero), dimensions={}
alpha_conv_scalar = TYPE[] constant(0.999994934)
input = TYPE[1,17,9,9] parameter(0)
filter = TYPE[3,3,1,17] parameter(1)
conv = TYPE[1,17,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=17
alpha_conv = TYPE[1,17,9,9] broadcast(alpha_conv_scalar), dimensions={}
scaled_conv = TYPE[1,17,9,9] multiply(conv, alpha_conv)
ROOT relu = TYPE[1,17,9,9] maximum(zeros, scaled_conv)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestNoCrashOnInf) {
EXPECT_TRUE(RunAndCompare(R"(
HloModule Test
ENTRY Test {
zero = f32[] constant(inf)
zeros = f32[1,32,9,9] broadcast(zero), dimensions={}
alpha_conv_scalar = f32[] constant(0.999994934)
input = f32[1,17,9,9] parameter(0)
filter = f32[3,3,17,32] parameter(1)
conv = f32[1,32,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1
alpha_conv = f32[1,32,9,9] broadcast(alpha_conv_scalar), dimensions={}
scaled_conv = f32[1,32,9,9] multiply(conv, alpha_conv)
ROOT relu = f32[1,32,9,9] maximum(zeros, scaled_conv)
})",
ErrorSpec{0.01}));
}
TEST_F(CudnnFusedConvRewriterTest, TestConvAndScaledSideInput) {
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
alpha_side_input_scalar = TYPE[] constant(0.899994934)
alpha_side_input = TYPE[1,3,3,64] broadcast(alpha_side_input_scalar), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,64,64] parameter(1)
side_input = TYPE[1,3,3,64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
scaled_side_input = TYPE[1,3,3,64] multiply(side_input, alpha_side_input)
add1 = TYPE[1,3,3,64] add(conv, scaled_side_input)
ROOT relu = TYPE[1,3,3,64] maximum(zeros, add1)
})");
}
TEST_F(CudnnFusedConvRewriterTest, DontFuseDepthwiseConvWithScaledSideInput) {
TestNotMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
alpha_side_input_scalar = TYPE[] constant(0.899994934)
alpha_side_input = TYPE[1,3,3,64] broadcast(alpha_side_input_scalar), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,1,64] parameter(1)
side_input = TYPE[1,3,3,64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=64
scaled_side_input = TYPE[1,3,3,64] multiply(side_input, alpha_side_input)
add1 = TYPE[1,3,3,64] add(conv, scaled_side_input)
ROOT relu = TYPE[1,3,3,64] maximum(zeros, add1)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestScaledConvAndScaledSideInput) {
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
alpha_conv_scalar = TYPE[] constant(0.999994934)
alpha_conv = TYPE[1,3,3,64] broadcast(alpha_conv_scalar), dimensions={}
alpha_side_input_scalar = TYPE[] constant(0.899994934)
alpha_side_input = TYPE[1,3,3,64] broadcast(alpha_side_input_scalar), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,64,64] parameter(1)
side_input = TYPE[1,3,3,64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
scaled_conv = TYPE[1,3,3,64] multiply(conv, alpha_conv)
scaled_side_input = TYPE[1,3,3,64] multiply(side_input, alpha_side_input)
add1 = TYPE[1,3,3,64] add(scaled_conv, scaled_side_input)
ROOT relu = TYPE[1,3,3,64] maximum(zeros, add1)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestScaledConvAndScaledSideInputWithBias) {
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
alpha_conv_scalar = TYPE[] constant(0.999994934)
alpha_conv = TYPE[1,3,3,64] broadcast(alpha_conv_scalar), dimensions={}
alpha_side_input_scalar = TYPE[] constant(0.899994934)
alpha_side_input = TYPE[1,3,3,64] broadcast(alpha_side_input_scalar), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,64,64] parameter(1)
side_input = TYPE[1,3,3,64] parameter(2)
bias = TYPE[64] parameter(3)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
scaled_conv = TYPE[1,3,3,64] multiply(conv, alpha_conv)
scaled_side_input = TYPE[1,3,3,64] multiply(side_input, alpha_side_input)
broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3}
add1 = TYPE[1,3,3,64] add(scaled_conv, broadcasted_bias)
add2 = TYPE[1,3,3,64] add(add1, scaled_side_input)
ROOT relu = TYPE[1,3,3,64] maximum(zeros, add2)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestMatchMaxZeroOnly) {
TestNotMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
point_one = TYPE[] constant(0.1)
point_ones = TYPE[1,32,9,9] broadcast(point_one), dimensions={}
input = TYPE[1,17,9,9] parameter(0)
filter = TYPE[3,3,17,32] parameter(1)
conv = TYPE[1,32,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1
ROOT relu = TYPE[1,32,9,9] maximum(point_ones, conv)
})");
}
TEST_F(CudnnFusedConvRewriterTest, PreservesMetadata) {
const char* kHloString = R"(
HloModule Test
ENTRY Test {
zero = f32[] constant(0)
zeros = f32[1,32,9,9] broadcast(zero), dimensions={}
input = f32[1,17,9,9] parameter(0)
filter = f32[3,3,17,32] parameter(1)
conv = f32[1,32,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1, metadata={op_type="foo" op_name="bar"}
ROOT relu = f32[1,32,9,9] maximum(zeros, conv)
})";
const std::string optimized_hlo_string =
backend()
.compiler()
->RunHloPasses(
ParseAndReturnVer |
2,074 | cpp | tensorflow/tensorflow | kernel_reuse_cache | third_party/xla/xla/service/gpu/kernel_reuse_cache.cc | third_party/xla/xla/service/gpu/kernel_reuse_cache_test.cc | #ifndef XLA_SERVICE_GPU_KERNEL_REUSE_CACHE_H_
#define XLA_SERVICE_GPU_KERNEL_REUSE_CACHE_H_
#include <cstdint>
#include <functional>
#include <optional>
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/service/gpu/executable.pb.h"
#include "xla/service/gpu/kernel_arguments.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/stream_executor/launch_dim.h"
namespace xla {
namespace gpu {
class KernelReuseCache {
public:
struct Entry {
std::string kernel_name;
LaunchDimensions launch_dimensions;
std::optional<se::ClusterDim> cluster_dim;
int64_t shmem_bytes = 0;
std::string binary;
};
struct NamedBinary {
std::string name;
std::vector<uint8_t> binary;
};
absl::Status Load(const CompilationCacheProto& proto);
CompilationCacheProto Export() const;
bool IsEmpty() const { return cache_.empty(); }
void Clear() {
cache_.clear();
hits_.clear();
}
std::pair<absl::StatusOr<const Entry*>, bool > GetWithStatus(
const HloComputation* fused_computation,
absl::Span<const KernelArgument> kernel_arguments,
absl::string_view discriminator,
const std::function<absl::StatusOr<Entry>()>& generator);
std::pair<absl::StatusOr<const Entry*>, bool > GetWithStatus(
std::string fingerprint,
const std::function<absl::StatusOr<Entry>()>& generator);
private:
absl::flat_hash_map<std::string , Entry> cache_;
absl::flat_hash_set<std::string> hits_;
};
absl::Status UpdateDiskKernelCache(
absl::string_view path, bool do_append,
const CompilationCacheProto& current_cache,
absl::Span<const KernelReuseCache::NamedBinary> binaries_to_cache);
std::string GetComputationFingerprint(
const HloComputation* fused_computation,
absl::Span<const KernelArgument> kernel_arguments,
absl::string_view discriminator = "");
}
}
#endif
#include "xla/service/gpu/kernel_reuse_cache.h"
#include <functional>
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/kernel_arguments.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace gpu {
namespace {
std::string GetArgumentFingerprint(
absl::Span<const KernelArgument> kernel_arguments) {
return absl::StrJoin(
kernel_arguments, ",", [](std::string* s, const KernelArgument& arg) {
if (arg.first_with_same_slice().has_value()) {
absl::StrAppend(s, "=", arg.first_with_same_slice().value());
return;
}
absl::StrAppend(s, arg.alignment());
if (arg.aliased()) {
absl::StrAppend(s, "a");
}
if (arg.written()) {
absl::StrAppend(s, "w");
}
});
}
}
std::string GetComputationFingerprint(
const HloComputation* fused_computation,
absl::Span<const KernelArgument> kernel_arguments,
absl::string_view discriminator) {
auto print_options = HloPrintOptions::Fingerprint()
.set_print_only_essential_constants(false)
.set_print_operand_shape(false);
return absl::StrCat(discriminator, "(",
GetArgumentFingerprint(kernel_arguments), ")",
fused_computation->ToString(print_options));
}
absl::Status KernelReuseCache::Load(const CompilationCacheProto& proto) {
for (const auto& [name, entry] : proto.entries()) {
std::optional<se::ClusterDim> cluster_dim;
if (entry.has_cluster_dim()) {
cluster_dim =
se::ClusterDim{entry.cluster_dim().x(), entry.cluster_dim().y(),
entry.cluster_dim().z()};
}
TF_RET_CHECK(
cache_
.insert(
{entry.fingerprint(),
Entry{name,
LaunchDimensions{
entry.launch_dimensions().num_blocks(),
entry.launch_dimensions().num_threads_per_block()},
cluster_dim, entry.shmem_bytes(), entry.binary()}})
.second);
}
return absl::OkStatus();
}
CompilationCacheProto KernelReuseCache::Export() const {
CompilationCacheProto proto;
for (const auto& [fingerprint, cache_entry] : cache_) {
if (!hits_.contains(fingerprint)) {
VLOG(5) << "Not exporting unused " << cache_entry.kernel_name;
continue;
}
auto [it, inserted] = proto.mutable_entries()->emplace(
cache_entry.kernel_name, CompilationCacheEntryProto{});
CHECK(inserted) << cache_entry.kernel_name;
CompilationCacheEntryProto& proto_entry = it->second;
proto_entry.set_fingerprint(fingerprint);
LaunchDimensionsProto launch_dimensions_proto;
launch_dimensions_proto.set_num_blocks(
cache_entry.launch_dimensions.num_blocks());
launch_dimensions_proto.set_num_threads_per_block(
cache_entry.launch_dimensions.num_threads_per_block());
*proto_entry.mutable_launch_dimensions() = launch_dimensions_proto;
if (cache_entry.cluster_dim.has_value()) {
ClusterDimProto cluster_dim_proto;
cluster_dim_proto.set_x(cache_entry.cluster_dim->x);
cluster_dim_proto.set_y(cache_entry.cluster_dim->y);
cluster_dim_proto.set_z(cache_entry.cluster_dim->z);
*proto_entry.mutable_cluster_dim() = cluster_dim_proto;
}
proto_entry.set_shmem_bytes(cache_entry.shmem_bytes);
proto_entry.set_binary(cache_entry.binary);
}
return proto;
}
absl::Status UpdateDiskKernelCache(
absl::string_view path, const bool do_append,
const CompilationCacheProto& current_cache,
absl::Span<const KernelReuseCache::NamedBinary> binaries_to_cache) {
CompilationCacheProto disk_cache;
if (do_append) {
std::string serialized;
TF_RETURN_IF_ERROR(tsl::ReadFileToString(tsl::Env::Default(),
std::string(path), &serialized));
if (!disk_cache.ParseFromString(std::string(serialized))) {
return Internal("Failed to parse serialized CompilationCacheProto.");
}
}
auto entries = disk_cache.mutable_entries();
int stored_kernel_count = 0;
for (const auto& [name, binary] : binaries_to_cache) {
auto it_current = current_cache.entries().find(name);
TF_RET_CHECK(it_current != current_cache.entries().end());
auto [it_disk, inserted] = entries->insert({name, it_current->second});
TF_RET_CHECK(inserted);
TF_RET_CHECK(!binary.empty());
it_disk->second.set_binary(reinterpret_cast<const char*>(binary.data()),
binary.size());
VLOG(5) << "Cached kernel: " << name << ": " << binary.size();
++stored_kernel_count;
}
if (stored_kernel_count > 0) {
TF_RETURN_IF_ERROR(tsl::WriteStringToFile(tsl::Env::Default(),
std::string(path),
disk_cache.SerializeAsString()));
VLOG(2) << "Stored " << stored_kernel_count << " / "
<< binaries_to_cache.size() << " kernels in the cache file.";
}
return absl::OkStatus();
}
std::pair<absl::StatusOr<const KernelReuseCache::Entry*>, bool>
KernelReuseCache::GetWithStatus(
const HloComputation* fused_computation,
absl::Span<const KernelArgument> kernel_arguments,
absl::string_view discriminator,
const std::function<absl::StatusOr<KernelReuseCache::Entry>()>& generator) {
std::string fingerprint = GetComputationFingerprint(
fused_computation, kernel_arguments, discriminator);
VLOG(4) << "Fingerprint: ";
XLA_VLOG_LINES(4, fingerprint);
return GetWithStatus(std::move(fingerprint), generator);
}
std::pair<absl::StatusOr<const KernelReuseCache::Entry*>, bool>
KernelReuseCache::GetWithStatus(
std::string fingerprint,
const std::function<absl::StatusOr<KernelReuseCache::Entry>()>& generator) {
hits_.insert(fingerprint);
auto it = cache_.find(fingerprint);
if (it != cache_.end()) {
return {&it->second, true};
}
absl::StatusOr<Entry> entry = generator();
if (entry.ok()) {
it =
cache_.insert({std::move(fingerprint), std::move(entry.value())}).first;
return {&it->second, false};
}
return {entry.status(), false};
}
}
} | #include "xla/service/gpu/kernel_reuse_cache.h"
#include <gtest/gtest.h>
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
namespace xla {
namespace gpu {
namespace {
using KernelReuseTest = ::testing::Test;
TEST_F(KernelReuseTest, ExportAndLoadWork) {
KernelReuseCache cache;
EXPECT_TRUE(cache.IsEmpty());
auto [result, was_cached] = cache.GetWithStatus(
"fingerprint", []() { return KernelReuseCache::Entry{}; });
TF_EXPECT_OK(result);
EXPECT_NE(result.value(), nullptr);
EXPECT_FALSE(was_cached);
EXPECT_FALSE(cache.IsEmpty());
const CompilationCacheProto proto = cache.Export();
cache.Clear();
EXPECT_TRUE(cache.IsEmpty());
TF_EXPECT_OK(cache.Load(proto));
EXPECT_FALSE(cache.IsEmpty());
}
TEST_F(KernelReuseTest, UpdatingDiskKernelCacheWorks) {
std::string cache_file_path;
CHECK(tsl::Env::Default()->LocalTempFilename(&cache_file_path));
{
const CompilationCacheProto proto = [](std::string kernel_name) {
KernelReuseCache cache;
auto [result, was_cached] = cache.GetWithStatus("fingerprint", [&]() {
return KernelReuseCache::Entry{.kernel_name = kernel_name};
});
return cache.Export();
}("k1");
TF_EXPECT_OK(UpdateDiskKernelCache(cache_file_path, false,
proto,
{{.name = "k1", .binary = {5, 6}}}));
}
{
const CompilationCacheProto proto = [](std::string kernel_name) {
KernelReuseCache cache;
auto [result, was_cached] = cache.GetWithStatus("fingerprint", [&]() {
return KernelReuseCache::Entry{.kernel_name = kernel_name};
});
return cache.Export();
}("k2");
TF_EXPECT_OK(UpdateDiskKernelCache(cache_file_path, true,
proto,
{{.name = "k2", .binary = {7, 8}}}));
}
std::string serialized;
TF_EXPECT_OK(
tsl::ReadFileToString(tsl::Env::Default(), cache_file_path, &serialized));
CompilationCacheProto proto;
EXPECT_TRUE(proto.ParseFromString(std::string(serialized)));
EXPECT_EQ(proto.entries_size(), 2);
}
}
}
} |
2,075 | cpp | tensorflow/tensorflow | conv_algorithm_picker | third_party/xla/xla/service/gpu/autotuning/conv_algorithm_picker.cc | third_party/xla/xla/service/gpu/autotuning/conv_algorithm_picker_test.cc | #ifndef XLA_SERVICE_GPU_CONV_ALGORITHM_PICKER_H_
#define XLA_SERVICE_GPU_CONV_ALGORITHM_PICKER_H_
#include <optional>
#include <string>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/autotune_results.pb.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/autotuner_compile_util.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/gpu_conv_runner.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/shape.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/stream_executor.h"
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA)
#include "xla/stream_executor/gpu/redzone_allocator.h"
#endif
namespace xla {
namespace gpu {
class GpuConvAlgorithmPicker : public HloModulePass {
public:
explicit GpuConvAlgorithmPicker(AutotuneConfig config) : config_(config) {}
absl::string_view name() const override {
return "gpu-conv-algorithm-picker";
}
static bool IsEnabled(const HloModule* module) {
return module->config().debug_options().xla_gpu_autotune_level() != 0;
}
static bool IsCandidate(const HloInstruction* instr) {
return IsCustomCallToDnnConvolution(*instr);
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
absl::StatusOr<bool> RunOnComputation(HloComputation* computation);
absl::StatusOr<bool> RunOnInstruction(HloInstruction* instr);
absl::StatusOr<AutotuneResult> PickBestAlgorithm(
const HloCustomCallInstruction* instr);
absl::StatusOr<AutotuneResult> PickBestAlgorithmNoCache(
const HloCustomCallInstruction* instr);
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA)
struct ReferenceResult {
stream_executor::dnn::AlgorithmDesc algorithm;
std::vector<stream_executor::DeviceMemoryBase> buffers;
};
struct AutotuneRuntimeArguments {
const HloModuleConfig hlo_module_config;
RedzoneBuffers rz_buffers;
const GpuConvConfig gpu_conv_config;
std::optional<std::string> canonical_hlo;
static absl::StatusOr<AutotuneRuntimeArguments> FromInstruction(
const HloCustomCallInstruction* instr, const AutotuneConfig& config,
const DebugOptions& debug_options);
};
absl::StatusOr<AutotuneResult> AutotuneOneConvRunner(
GenericConvRunner* runner,
std::optional<ReferenceResult>* reference_result,
absl::Span<const stream_executor::dnn::AlgorithmDesc> disabled_algos,
std::optional<AutotuneCacheKey> instruction_info,
const AutotuneRuntimeArguments& runtime_arguments);
absl::StatusOr<AutotuneResult> PickBestAlgorithmNoCacheCuda(
const HloCustomCallInstruction* instr);
#endif
absl::StatusOr<AutotuneResult> PickBestAlgorithmNoCacheRocm(
const HloCustomCallInstruction* instr);
private:
AutotuneConfig config_;
};
}
}
#endif
#include "xla/service/gpu/conv_algorithm_picker.h"
#include <algorithm>
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "xla/autotuning.pb.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/literal_util.h"
#include "xla/service/gpu/autotuner_compile_util.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/gpu_autotuning.pb.h"
#include "xla/service/gpu/gpu_conv_runner.h"
#include "xla/service/gpu/hlo_algorithm_denylist.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/slow_operation_alarm.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/cuda/cuda_platform_id.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/lazy_op_runner.h"
#include "xla/stream_executor/numeric_options.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/rocm/rocm_platform_id.h"
#include "xla/stream_executor/scratch_allocator.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/util/env_var.h"
#include "xla/tsl/util/proto/proto_utils.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/numbers.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA)
#include "third_party/gpus/cudnn/cudnn.h"
#include "third_party/gpus/cudnn/cudnn_version.h"
#if CUDNN_VERSION >= 90000
#include "third_party/gpus/cudnn/cudnn_ops.h"
#else
#include "third_party/gpus/cudnn/cudnn_ops_infer.h"
#endif
#include "xla/service/gpu/buffer_comparator.h"
#include "xla/stream_executor/gpu/redzone_allocator.h"
#endif
namespace xla {
namespace gpu {
namespace {
using se::DeviceMemoryBase;
using se::dnn::AlgorithmDesc;
using std::optional;
class ScratchAllocator : public se::ScratchAllocator {
public:
ScratchAllocator(int device_ordinal,
se::DeviceMemoryAllocator* memory_allocator)
: device_ordinal_(device_ordinal), memory_allocator_(memory_allocator) {}
int64_t GetMemoryLimitInBytes() override {
return ScratchAllocator::GetDefaultMemoryLimitInBytes();
}
int64_t TotalAllocatedBytes() { return total_allocated_bytes_; }
static int64_t GetDefaultMemoryLimitInBytes() {
int64_t value;
TF_CHECK_OK(tsl::ReadInt64FromEnvVar("TF_CUDNN_WORKSPACE_LIMIT_IN_MB",
1LL << 12, &value));
return value * (1LL << 20);
}
absl::StatusOr<se::DeviceMemory<uint8_t>> AllocateBytes(
int64_t byte_size) override;
template <typename T>
absl::StatusOr<se::DeviceMemory<T>> Allocate(int64_t num_elements) {
TF_ASSIGN_OR_RETURN(se::DeviceMemory<uint8_t> bytes,
AllocateBytes(num_elements * sizeof(T)));
return se::DeviceMemory<T>(bytes);
}
private:
const int device_ordinal_;
se::DeviceMemoryAllocator* memory_allocator_;
std::vector<se::OwningDeviceMemory> allocated_buffers_;
int64_t total_allocated_bytes_ = 0;
};
absl::StatusOr<se::DeviceMemory<uint8_t>> ScratchAllocator::AllocateBytes(
int64_t byte_size) {
CHECK_GE(byte_size, 0) << "byte_size must be positive.";
if (byte_size > GetMemoryLimitInBytes()) {
return absl::ResourceExhaustedError(absl::StrFormat(
"Allocating %d bytes exceeds the memory limit of %d bytes.", byte_size,
GetMemoryLimitInBytes()));
}
TF_ASSIGN_OR_RETURN(se::OwningDeviceMemory allocated_buffer,
memory_allocator_->Allocate(device_ordinal_, byte_size,
false));
total_allocated_bytes_ += byte_size;
se::DeviceMemoryBase buffer_addr = *allocated_buffer;
allocated_buffers_.push_back(std::move(allocated_buffer));
return se::DeviceMemory<uint8_t>(buffer_addr);
}
absl::StatusOr<std::vector<GenericConvRunner>> GetAlgorithms(
const GpuConvConfig& config, se::Stream* stream, bool use_cudnn_frontend,
bool use_fallback, const se::NumericOptions& numeric_options) {
TF_ASSIGN_OR_RETURN(se::dnn::ConvolutionKind kind,
GetDNNConvKindFromCudnnConvKind(config.kind));
TF_ASSIGN_OR_RETURN(se::dnn::DataType input_type,
GetDNNDataTypeFromPrimitiveType(config.input_type));
TF_ASSIGN_OR_RETURN(se::dnn::DataType output_type,
GetDNNDataTypeFromPrimitiveType(config.output_type));
se::StreamExecutor* stream_exec = stream->parent();
std::vector<GenericConvRunner> result;
auto dnn = stream_exec->AsDnn();
if (dnn == nullptr) {
return absl::InvalidArgumentError("No DNN in stream executor.");
}
switch (kind) {
default:
return Internal("Unknown ConvolutionKind %d", kind);
case se::dnn::ConvolutionKind::FORWARD_BIAS_ACTIVATION: {
if (!config.fusion) {
return Internal(
"GpuConvConfig had fusion ConvolutionKind but no FusionConfig.");
}
std::vector<std::unique_ptr<const se::dnn::FusedConvRunner>> runners;
TF_RETURN_IF_ERROR(dnn->GetFusedConvolveRunners(
use_cudnn_frontend,
se::dnn::ConvolutionKind::FORWARD, input_type,
BiasTypeForInputType(input_type), output_type,
config.conv_result_scale,
config.fusion->side_input_scale,
config.fusion->leakyrelu_alpha, stream,
config.input_descriptor, config.filter_descriptor,
config.bias_descriptor, config.output_descriptor, config.conv_desc,
use_fallback, config.fusion->mode, numeric_options, &runners));
for (auto& runner : runners) {
TF_ASSIGN_OR_RETURN(
auto runner_cache,
se::dnn::LazyOpRunner<se::dnn::FusedConvOp>::FromOpRunner(
std::move(runner)));
result.emplace_back(std::move(runner_cache));
}
break;
}
case se::dnn::ConvolutionKind::FORWARD_GRAPH: {
std::vector<std::unique_ptr<const se::dnn::GraphConvRunner>> runners;
TF_RETURN_IF_ERROR(dnn->GetGraphConvolveRunners(
kind, input_type, output_type, stream, config.input_descriptor,
config.filter_descriptor, config.output_descriptor, config.conv_desc,
use_fallback, numeric_options, &runners, config.serialized_graph));
for (auto& runner : runners) {
TF_ASSIGN_OR_RETURN(
auto runner_cache,
se::dnn::LazyOpRunner<se::dnn::GraphConvOp>::FromOpRunner(
std::move(runner)));
result.emplace_back(std::move(runner_cache));
}
break;
}
case se::dnn::ConvolutionKind::FORWARD:
case se::dnn::ConvolutionKind::BACKWARD_DATA:
case se::dnn::ConvolutionKind::BACKWARD_FILTER: {
std::vector<std::unique_ptr<const se::dnn::ConvRunner>> runners;
TF_RETURN_IF_ERROR(dnn->GetConvolveRunners(
use_cudnn_frontend, kind, input_type, output_type, stream,
config.input_descriptor,
DeviceMemoryBase(nullptr),
config.filter_descriptor,
DeviceMemoryBase(nullptr),
config.output_descriptor,
DeviceMemoryBase(nullptr), config.conv_desc,
use_fallback, nullptr, numeric_options, &runners));
for (auto& runner : runners) {
TF_ASSIGN_OR_RETURN(
auto runner_cache,
se::dnn::LazyOpRunner<se::dnn::ConvOp>::FromOpRunner(
std::move(runner)));
result.emplace_back(std::move(runner_cache));
}
break;
}
}
return result;
}
absl::StatusOr<std::vector<std::unique_ptr<const se::dnn::ConvRunner>>>
GetMIOpenAlgorithms(const HloCustomCallInstruction* instr,
absl::Span<se::DeviceMemoryBase> operand_buffers,
absl::Span<se::DeviceMemoryBase> result_buffers,
se::StreamExecutor* stream_exec,
ScratchAllocator* scratch_allocator, se::Stream* stream,
const se::NumericOptions& numeric_options) {
TF_ASSIGN_OR_RETURN(GpuConvConfig config, GetGpuConvConfig(instr));
TF_ASSIGN_OR_RETURN(se::dnn::ConvolutionKind kind,
GetDNNConvKindFromCudnnConvKind(config.kind));
TF_ASSIGN_OR_RETURN(se::dnn::DataType dtype,
GetDNNDataTypeFromPrimitiveType(config.output_type));
TF_ASSIGN_OR_RETURN(
GpuConvParams params,
GetGpuConvParams(config, operand_buffers, result_buffers));
std::vector<std::unique_ptr<const se::dnn::ConvRunner>> runners;
auto dnn = stream_exec->AsDnn();
if (dnn == nullptr) {
return absl::InvalidArgumentError("No DNN in stream executor.");
}
TF_RETURN_IF_ERROR(dnn->GetConvolveRunners(
false, kind, dtype, dtype, stream,
params.config->input_descriptor, params.input_buf,
params.config->filter_descriptor, params.filter_buf,
params.config->output_descriptor, params.output_buf,
params.config->conv_desc,
false, scratch_allocator, numeric_options,
&runners));
return runners;
}
std::string NumBytesToString(int64_t bytes) {
return absl::StrCat(tsl::strings::HumanReadableNumBytes(bytes), " (", bytes,
"B)");
}
CudnnVersion GetCudnnVersion(se::StreamExecutor* stream_executor) {
se::dnn::VersionInfo version = GetDnnVersionInfoOrDefault(stream_executor);
CudnnVersion cudnn_version;
cudnn_version.set_major(version.major_version());
cudnn_version.set_minor(version.minor_version());
cudnn_version.set_patch(version.patch());
return cudnn_version;
}
ComputeCapability GetComputeCapability(se::StreamExecutor* stream_executor) {
ComputeCapability cc;
se::CudaComputeCapability se_cc =
stream_executor->GetDeviceDescription().cuda_compute_capability();
cc.set_major(se_cc.major);
cc.set_minor(se_cc.minor);
return cc;
}
void PrintPlatformInfo(const se::Stream* stream) {
auto* se = stream->parent();
const auto& desc = se->GetDeviceDescription();
LOG(ERROR) << "Device: " << desc.name();
LOG(ERROR) << "Platform: " << desc.platform_version();
LOG(ERROR) << "Driver: " << desc.driver_version();
LOG(ERROR) << "Runtime: " << desc.runtime_version();
auto dnn_version = GetDnnVersionInfo(se);
if (dnn_version.ok()) {
auto v = dnn_version.value();
LOG(ERROR) << "cudnn version: " << v.major_version() << "."
<< v.minor_version() << "." << v.patch();
}
}
absl::StatusOr<bool> CheckRedzones(const se::RedzoneAllocator& allocator,
se::Stream* stream, absl::string_view name,
std::string_view instr_str,
AutotuneResult* result) {
XLA_SCOPED_LOGGING_TIMER_LEVEL("CudnnConvAlgorithmPicker checking redzones",
2);
using RedzoneCheckStatus = se::RedzoneAllocator::RedzoneCheckStatus;
TF_ASSIGN_OR_RETURN(RedzoneCheckStatus redzone_check,
allocator.CheckRedzones());
if (redzone_check.ok()) {
return true;
}
auto* fail = result->mutable_failure();
fail->set_kind(AutotuneResult::REDZONE_MODIFIED);
*fail->mutable_msg() = redzone_check.RedzoneFailureMsg();
fail->set_buffer_address(
reinterpret_cast<uint64_t>(redzone_check.user_buffer_address));
LOG(ERROR) << absl::StreamFormat(
"Detected cudnn out-of-bounds write in conv %s buffer! This is likely a "
"cudnn bug. We will skip this algorithm in the future, but your GPU "
"state may already be corrupted, leading to incorrect results. Within "
"Google, no action is needed on your part. Outside of Google, please "
"ensure you're running the latest version of cudnn. If that doesn't fix "
"the problem, please file a bug with this full error message and we'll "
"contact nvidia.",
name);
LOG(ERROR) << redzone_check.RedzoneFailureMsg();
LOG(ERROR) << "HloInstruction " << instr_str;
PrintPlatformInfo(stream);
return false;
}
}
bool ShouldInitConvData(const HloModuleConfig& hlo_module_config) {
const int32_t conv_autotune_level =
hlo_module_config.debug_options().xla_gpu_autotune_level();
return conv_autotune_level >= 2;
}
bool ShouldCheckConv(const HloModuleConfig& hlo_module_config) {
const int32_t conv_autotune_level =
hlo_module_config.debug_options().xla_gpu_autotune_level();
return conv_autotune_level >= 4;
}
absl::StatusOr<AutotuneResult> GpuConvAlgorithmPicker::PickBestAlgorithm(
const HloCustomCallInstruction* instr) {
return AutotunerUtil::Autotune(
instr, config_, [&] { return PickBestAlgorithmNoCache(instr); });
}
absl::StatusOr<AutotuneResult> GpuConvAlgorithmPicker::PickBestAlgorithmNoCache(
const HloCustomCallInstruction* instr) {
if (config_.IsDeviceless()) {
AutotuneResult result;
result.mutable_algorithm()->set_algo_id(-1);
return result;
}
se::StreamExecutor* stream_exec = config_.GetExecutor();
absl::MutexLock lock(&GetGpuMutex(stream_exec));
if (!stream_exec->SynchronizeAllActivity()) {
return Internal(
"Failed to synchronize GPU for autotuning conv instruction");
}
absl::StatusOr<AutotuneResult> result_or(Internal("Unknown platform."));
se::Platform::Id platform_id = stream_exec->GetPlatform()->id();
if (platform_id == se::rocm::kROCmPlatformId) {
result_or = PickBestAlgorithmNoCacheRocm(instr);
} else if (platform_id == se::cuda::kCudaPlatformId) {
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA)
result_or = PickBestAlgorithmNoCacheCuda(instr);
#endif
}
return result_or;
}
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA)
absl::StatusOr<GpuConvAlgorithmPicker::AutotuneRuntimeArguments>
GpuConvAlgorithmPicker::AutotuneRuntimeArguments::FromInstruction(
const HloCustomCallInstruction* instr, const AutotuneConfig& config,
const DebugOptions& debug_options) {
TF_ASSIGN_OR_RETURN(auto rz_buffers,
RedzoneBuffers::FromInstruction(
*instr, config, debug_options,
RedzoneBuffers::kAllInputsOutputsNoScratch));
std::string canonical_hlo(
AutotuneCacheKey(config.GetExecutor()->GetDeviceDescription().model_str(),
*instr)
.GetHlo());
TF_ASSIGN_OR_RETURN(GpuConvConfig gpu_conv_config, GetGpuConvConfig(instr));
GpuConvAlgorithmPicker::AutotuneRuntimeArguments runtime_arguments = {
instr->GetModule()->config(),
std::move(rz_buffers),
std::move(gpu_conv_config),
{canonical_hlo}};
return runtime_arguments;
}
struct CudnnVersionRange {
using TupleVersion = std::tuple<int, int, int>;
TupleVersion begin;
TupleVersion end;
bool IsInRange(const CudnnVersion& other) const {
TupleVersion other_version{other.major(), other.minor(), other.patch()};
return begin <= other_version && other_version < end;
}
CudnnVersionRange(const CudnnVersion& begin, const CudnnVersion& end)
: begin(begin.major(), begin.minor(), begin.patch()),
end(end.major(), end.minor(), end.patch()) {}
CudnnVersionRange(const TupleVersion& begin, const TupleVersion& end)
: begin(begin), end(end) {}
};
struct ComputeCapabilityRange {
using TupleComputeCapability = std::tuple<int, int>;
TupleComputeCapability begin;
TupleComputeCapability end;
bool IsInRange(const ComputeCapability& other) const {
TupleComputeCapability other_cc{other.major(), other.minor()};
return begin <= other_cc && other_cc < end;
}
};
struct DisabledAlgorithm {
CudnnVersionRange cudnn_version_range;
ComputeCapabilityRange compute_capability_range;
int algo_id;
};
static const DisabledAlgorithm kDisabledAlgorithms[] = {
{{{9, 0, 0}, {10, 0, 0}},
{{6, 0}, {8, 0}},
14}};
absl::StatusOr<AutotuneResult> GpuConvAlgorithmPicker::AutotuneOneConvRunner(
GenericConvRunner* const runner,
std::optional<ReferenceResult>* reference_result,
absl::Span<const AlgorithmDesc> disabled_algos,
std::optional<AutotuneCacheKey> instruction_info,
const AutotuneRuntimeArguments& runtime_arguments) {
auto alg = runner->ToAlgorithmDesc();
se::StreamExecutor* stream_exec = config_.GetExecutor();
XLA_SCOPED_LOGGING_TIMER_LEVEL(
absl::StrCat("CudnnConvAlgorithmPicker::PickBestAlgorithm algo ",
alg.ToString()),
2);
auto make_failure = [&alg](AutotuneResult::FailureKind kind,
absl::string_view msg) {
AutotuneResult result;
*result.mutable_algorithm() = alg.ToProto();
result.mutable_failure()->set_kind(kind);
result.mutable_failure()->set_msg( msg.data(), msg.size());
return result;
};
AlgorithmDesc alg_key(alg.algo_id(), alg.tensor_ops_enabled(), std::nullopt);
std::string instr_str = instruction_info.has_value()
? std::string(instruction_info->GetHlo())
: "<unknown>";
for (const auto& disabled_algo : kDisabledAlgorithms) {
if (disabled_algo.cudnn_version_range.IsInRange(
GetCudnnVersion(stream_exec)) &&
disabled_algo.compute_capability_range.IsInRange(
GetComputeCapability(stream_exec)) &&
disabled_algo.algo_id == alg.algo_id()) {
LOG(INFO) << "Omitted potentially buggy algorithm " << alg.ToString()
<< " for conv " << instr_str;
return make_failure(AutotuneResult::DISQUALIFIED,
"Disqualified for being known-buggy.");
}
}
if (absl::c_linear_search(disabled_algos, alg_key)) {
LOG(INFO) << "Omitted potentially buggy algorithm " << alg.ToString()
<< " for conv " << instr_str;
return make_failure(AutotuneResult::DISQUALIFIED,
"Disqualified for being known-buggy.");
}
GpuConvConfig config = runtime_arguments.gpu_conv_config;
auto activation_mode =
config.fusion ? config.fusion->mode : se::dnn::ActivationMode::kNone;
if (!alg.is_cudnn_frontend() &&
config.kind == CudnnConvKind::kForwardActivation &&
activation_mode == se::dnn::ActivationMode::kNone &&
alg.algo_id() != CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM) {
return make_failure(AutotuneResult::DISQUALIFIED,
"Disqualified for implicit RELU.");
}
TF_ASSIGN_OR_RETURN(
se::RedzoneAllocator scratch_allocator,
AutotunerUtil::CreateRedzoneAllocator(
config_, runtime_arguments.hlo_module_config.debug_options()));
se::dnn::ProfileResult profile_result;
VLOG(4) << "Trying algorithm " << alg.ToString() << " for " << instr_str;
SlowOperationAlarm alarm(absl::Seconds(1), [&] {
return absl::StrFormat(
"Trying algorithm %s for conv %s is taking a while...", alg.ToString(),
instr_str);
});
std::optional<size_t> workspace_size =
runner->ToAlgorithmDesc().workspace_size();
if (!workspace_size) {
return make_failure(AutotuneResult::UNKNOWN,
"Internal error: missing workspace size from "
"OpRunner::ToAlgorithmDesc()");
}
auto scratch_or = scratch_allocator.AllocateBytes(*workspace_size);
if (!scratch_or.ok()) {
return make_failure(AutotuneResult::DISQUALIFIED,
absl::StrCat("Scratch allocation failed: ",
scratch_or.status().ToString()));
}
se::DeviceMemoryBase scratch_memory = scratch_or.value();
RunConvOptions options;
options.runner_cache = runner;
float max_time = 0;
float min_time = std::numeric_limits<float>::max();
absl::Status launch_status;
std::vector<se::DeviceMemoryBase> operand_buffers =
runtime_arguments.rz_buffers.input_buffers();
std::vector<se::DeviceMemoryBase> result_buffers =
runtime_arguments.rz_buffers.output_buffers();
TF_ASSIGN_OR_RETURN(se::Stream* const stream, config_.GetStream());
launch_status = RunGpuConv(config, operand_buffers, result_buffers,
scratch_memory, stream, options);
options.profile_result = &profile_result;
profile_result.set_warmup_run_executed(true);
constexpr int kMaxIter = 10;
int num_iters = 0;
for (; num_iters < kMaxIter && launch_status.ok(); ++num_iters) {
launch_status = RunGpuConv(config, operand_buffers, result_buffers,
scratch_memory, stream, options);
if (!profile_result.is_valid()) {
break;
}
float old_min_time = min_time;
min_time = std::min(min_time, profile_result.elapsed_time_in_ms());
max_time = std::max(max_time, profile_result.elapsed_time_in_ms());
constexpr float kThreshold = 0.05f;
if (std::abs(profile_result.elapsed_time_in_ms() - old_min_time) /
old_min_time <
kThreshold) {
break;
}
}
if (!launch_status.ok()) {
VLOG(5) << "Launch failed: " << launch_status;
return make_failure(
AutotuneResult::DISQUALIFIED,
absl::StrCat("Profiling failure on cuDNN engine ", alg.ToString(), ": ",
launch_status.ToString()));
}
if (!profile_result.is_valid()) {
VLOG(5) << "Launch succeeded but profile result is invalid.";
return make_failure(
AutotuneResult::UNKNOWN,
absl::StrCat("Launch succeeded but profile result is invalid, "
"with cuDNN engine ",
alg.ToString(), ": ", launch_sta | #include "xla/service/gpu/conv_algorithm_picker.h"
#include <cstdint>
#include <variant>
#include <vector>
#include "absl/strings/string_view.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/gpu/gpu_conv_rewriter.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/service/platform_util.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/platform.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::gpu {
namespace {
namespace m = ::xla::match;
class GpuConvAlgorithmPickerTest : public HloTestBase {
public:
GpuConvAlgorithmPickerTest() { AutotunerUtil::ClearAutotuneResults(); }
};
TEST_F(GpuConvAlgorithmPickerTest, SetAlgorithm) {
constexpr absl::string_view kHlo = R"(
HloModule module
ENTRY main {
%arg0 = f32[3,56,56,16]{2,1,0,3} parameter(0)
%arg1 = f32[3,3,3,64]{2,1,0,3} parameter(1)
ROOT %conv = f32[54,54,16,64]{1,0,3,2} convolution(%arg0, %arg1), window={size=3x3}, dim_labels=f01b_i01o->01bf
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kHlo));
se::Platform* platform = PlatformUtil::GetDefaultPlatform().value();
TF_ASSERT_OK_AND_ASSIGN(std::vector<se::StreamExecutor*> executors,
PlatformUtil::GetStreamExecutors(platform));
ASSERT_GT(executors.size(), 0);
se::StreamExecutor* stream_exec = executors[0];
const se::GpuComputeCapability& cc = backend()
.default_stream_executor()
->GetDeviceDescription()
.gpu_compute_capability();
bool changed = false;
TF_ASSERT_OK_AND_ASSIGN(changed, RunHloPass(GpuConvRewriter(cc), m.get()));
changed = false;
DebugOptions opts = DefaultDebugOptionsIgnoringFlags();
AutotuneConfig cfg{DeviceConfig{stream_exec, nullptr}, opts};
TF_ASSERT_OK_AND_ASSIGN(changed,
RunHloPass(GpuConvAlgorithmPicker(cfg), m.get()));
ASSERT_TRUE(changed);
AutotuneResults results;
TF_ASSERT_OK(AutotunerUtil::SerializeAutotuneResults(&results));
ASSERT_EQ(results.results_size(), 1);
auto& result = *results.mutable_results(0)->mutable_result();
int64_t old_scratch_bytes = result.scratch_bytes();
int64_t new_scratch_bytes = old_scratch_bytes + 1;
result.set_scratch_bytes(new_scratch_bytes);
AutotunerUtil::ClearAutotuneResults();
TF_ASSERT_OK(AutotunerUtil::LoadAutotuneResults(results));
TF_ASSERT_OK_AND_ASSIGN(m, ParseAndReturnVerifiedModule(kHlo));
changed = false;
TF_ASSERT_OK_AND_ASSIGN(changed, RunHloPass(GpuConvRewriter(cc), m.get()));
changed = false;
TF_ASSERT_OK_AND_ASSIGN(changed,
RunHloPass(GpuConvAlgorithmPicker(cfg), m.get()));
ASSERT_TRUE(changed);
TF_ASSERT_OK(RunHloPass(TupleSimplifier(), m.get()).status());
SCOPED_TRACE(m->ToString());
HloInstruction* conv;
ASSERT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(m::CustomCall(&conv))));
EXPECT_THAT(
conv->shape(),
GmockMatch(m::Shape().WithSubshape(
{1}, m::Shape().WithElementType(U8).WithDims({new_scratch_bytes}))));
TF_ASSERT_OK_AND_ASSIGN(auto dnn_version, GetDnnVersionInfo(stream_exec));
if (dnn_version.major_version() >= 9 && dnn_version.major_version() < 10 &&
std::holds_alternative<stream_executor::CudaComputeCapability>(cc) &&
std::get<stream_executor::CudaComputeCapability>(cc).major == 7 &&
std::get<stream_executor::CudaComputeCapability>(cc).minor == 0) {
EXPECT_TRUE(conv->backend_config<GpuBackendConfig>()
->has_cudnn_conv_backend_config() &&
conv->backend_config<GpuBackendConfig>()
->cudnn_conv_backend_config()
.algorithm()
.algo_id() != 14);
}
}
}
} |
2,076 | cpp | tensorflow/tensorflow | scatter_slice_simplifier | third_party/xla/xla/service/gpu/transforms/scatter_slice_simplifier.cc | third_party/xla/xla/service/gpu/transforms/scatter_slice_simplifier_test.cc | #ifndef XLA_SERVICE_GPU_SCATTER_SLICE_SIMPLIFIER_H_
#define XLA_SERVICE_GPU_SCATTER_SLICE_SIMPLIFIER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class ScatterSliceSimplifier : public HloModulePass {
public:
absl::string_view name() const override { return "scatter-slice-simplifier"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/gpu/scatter_slice_simplifier.h"
#include <cstdint>
#include <iterator>
#include <optional>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
bool IsValidIntermediaryUser(const HloInstruction* instruction) {
return instruction->IsElementwise() ||
instruction->opcode() == HloOpcode::kGetTupleElement;
}
class ScatterSliceMatcher {
public:
explicit ScatterSliceMatcher(const HloScatterInstruction* scatter)
: scatter_(scatter),
operand_dimensions_(
scatter->scatter_operands()[0]->shape().dimensions()),
result_dimensions_(operand_dimensions_.begin(),
operand_dimensions_.end()) {}
std::optional<Shape> InferShape() {
VLOG(10) << "Evaluating scatter " << scatter_->name();
if (!AreAllUsersValid(scatter_)) {
return std::nullopt;
}
std::vector<Shape> result_shapes;
absl::c_transform(scatter_->scatter_operands(),
std::back_inserter(result_shapes),
[&](const HloInstruction* op) {
return ShapeUtil::MakeShape(op->shape().element_type(),
result_dimensions_);
});
return ShapeUtil::MakeMaybeTupleShape(result_shapes);
}
private:
bool UpdateDimensions(const HloSliceInstruction* slice) {
int64_t rank = slice->shape().rank();
for (int64_t i = 0; i < rank; ++i) {
if (slice->slice_starts(i) != 0 || slice->slice_strides(i) != 1) {
return false;
}
if (slice->slice_limits(i) != result_dimensions_[i]) {
if (result_dimensions_[i] != operand_dimensions_[i]) {
return false;
}
auto& update_window_dims =
scatter_->scatter_dimension_numbers().update_window_dims();
if (absl::c_binary_search(update_window_dims, i)) {
return false;
}
result_dimensions_[i] = slice->slice_limits(i);
VLOG(10) << "Dimension " << i << " truncated to size "
<< result_dimensions_[i];
}
}
return true;
}
bool IsUserValid(const HloInstruction* op) {
VLOG(10) << "Visiting user " << op->name();
if (auto* slice = DynCast<HloSliceInstruction>(op)) {
return UpdateDimensions(slice);
}
bool is_valid = visited_set_.contains(op) ||
(IsValidIntermediaryUser(op) && AreAllUsersValid(op));
if (is_valid) {
visited_set_.emplace(op);
}
return is_valid;
}
bool AreAllUsersValid(const HloInstruction* op) {
if (op->user_count() == 0) {
return !op->IsRoot();
}
return absl::c_all_of(op->users(), [this](const HloInstruction* user) {
return IsUserValid(user);
});
}
const HloScatterInstruction* scatter_;
absl::flat_hash_set<const HloInstruction*> visited_set_;
absl::Span<const int64_t> operand_dimensions_;
DimensionVector result_dimensions_;
};
HloInstruction* CreateSliceFrom(HloInstruction* operand, const Shape& shape) {
std::vector<int64_t> start_indices(shape.rank(), 0);
std::vector<int64_t> limit_indices(shape.rank());
std::vector<int64_t> strides(shape.rank(), 1);
for (int64_t i = 0; i < shape.rank(); ++i) {
limit_indices[i] = shape.dimensions(i);
}
return operand->AddInstruction(HloInstruction::CreateSlice(
shape, operand, start_indices, limit_indices, strides));
}
HloInstruction* CreateScatterFrom(HloScatterInstruction* scatter,
const Shape& shape) {
std::vector<HloInstruction*> operands(scatter->scatter_operand_count());
for (int64_t i = 0; i < operands.size(); ++i) {
operands[i] =
CreateSliceFrom(scatter->scatter_operands()[i],
shape.IsTuple() ? shape.tuple_shapes(i) : shape);
}
return scatter->AddInstruction(HloInstruction::CreateScatter(
shape, absl::MakeSpan(operands), scatter->scatter_indices(),
scatter->scatter_updates(), scatter->called_computations()[0],
scatter->scatter_dimension_numbers(), scatter->indices_are_sorted(),
scatter->unique_indices()));
}
class ScatterSliceSimplifierVisitor : public DfsHloRewriteVisitor {
public:
absl::Status HandleScatter(HloInstruction* instruction) override {
auto* scatter = Cast<HloScatterInstruction>(instruction);
std::optional<Shape> result_shape =
ScatterSliceMatcher(scatter).InferShape();
if (!result_shape.has_value()) {
return absl::OkStatus();
}
VLOG(2) << "Matched scatter " << scatter->name() << " with shape "
<< scatter->shape().ToString() << ", inferred result shape "
<< result_shape->ToString() << " (from the slice users)";
HloInstruction* new_scatter = CreateScatterFrom(scatter, *result_shape);
return ReplaceAllUsersRecursive(scatter, new_scatter);
}
private:
absl::Status ReplaceAllUsersRecursive(HloInstruction* old_instruction,
HloInstruction* new_instruction) {
replacements_[old_instruction] = new_instruction;
std::vector<HloInstruction*> users = old_instruction->users();
for (HloInstruction* user : users) {
if (user->parent() == nullptr) {
VLOG(3) << "Skipping user " << user->name() << " (already replaced)";
continue;
}
TF_RETURN_IF_ERROR(ReplaceUserRecursive(user, new_instruction));
}
return absl::OkStatus();
}
absl::Status ReplaceUserRecursive(HloInstruction* user,
HloInstruction* operand) {
VLOG(3) << "Replacing scatter user " << user->name();
if (user->opcode() == HloOpcode::kSlice) {
return ReplaceInstruction(user, operand);
}
HloInstruction* new_user = nullptr;
if (user->IsElementwise()) {
auto new_shape = [operand](HloInstruction* from) {
return ShapeUtil::MakeShape(from->shape().element_type(),
operand->shape().dimensions());
};
std::vector<HloInstruction*> new_operands;
absl::c_transform(user->operands(), std::back_inserter(new_operands),
[&](HloInstruction* op) {
auto it = replacements_.find(op);
return it != replacements_.end()
? it->second
: CreateSliceFrom(op, new_shape(op));
});
new_user = user->AddInstruction(
user->CloneWithNewOperands(new_shape(user), new_operands));
} else {
auto* gte = Cast<HloGetTupleElementInstruction>(user);
TF_ASSIGN_OR_RETURN(new_user,
MakeGetTupleElementHlo(operand, gte->tuple_index(),
&user->metadata()));
}
return ReplaceAllUsersRecursive(user, new_user);
}
absl::flat_hash_map<HloInstruction*, HloInstruction*> replacements_;
};
}
absl::StatusOr<bool> ScatterSliceSimplifier::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
return ScatterSliceSimplifierVisitor{}.RunOnModule(module, execution_threads);
}
} | #include "xla/service/gpu/scatter_slice_simplifier.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
namespace m = ::xla::match;
using ScatterSliceSimplifierTest = HloTestBase;
TEST_F(ScatterSliceSimplifierTest, Scatter1D) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
%add_F32 {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY main {
%indices = s32[4] parameter(0)
%updates = f32[4] parameter(1)
%operands = f32[9] constant(0)
%scatter = f32[9] scatter(%operands, %indices, %updates), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32
ROOT %slice = f32[8] slice(%scatter), slice={[0:8]}
}
)")
.value();
ScatterSliceSimplifier test_pass;
ASSERT_TRUE(RunHloPass(&test_pass, module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Scatter(m::Slice(m::Constant()), m::Parameter(0),
m::Parameter(1))
.WithShape(F32, {8})));
}
TEST_F(ScatterSliceSimplifierTest, Scatter3D) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
%add_F32 {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY main {
%indices = s32[2] parameter(0)
%updates = f32[2,4,4] parameter(1)
%operands = f32[5,4,4] constant(0)
%scatter = f32[5,4,4] scatter(%operands, %indices, %updates), update_window_dims={1,2}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32
ROOT %slice = f32[4,4,4] slice(%scatter), slice={[0:4], [0:4], [0:4]}
}
)")
.value();
ScatterSliceSimplifier test_pass;
ASSERT_TRUE(RunHloPass(&test_pass, module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Scatter(m::Slice(m::Constant()), m::Parameter(0),
m::Parameter(1))
.WithShape(F32, {4, 4, 4})));
}
TEST_F(ScatterSliceSimplifierTest, ScatterMultiOutput) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
%add_F32_add_F16 {
%lhs.0 = f32[] parameter(0)
%rhs.0 = f32[] parameter(2)
%add.0 = f32[] add(%lhs.0, %rhs.0)
%lhs.1 = f16[] parameter(1)
%rhs.1 = f16[] parameter(3)
%add.1 = f16[] add(%lhs.1, %rhs.1)
ROOT %tuple = (f32[], f16[]) tuple(%add.0, %add.1)
}
ENTRY main {
%indices = s32[4] parameter(0)
%updates.0 = f32[4] parameter(1)
%updates.1 = f16[4] parameter(2)
%operands.0 = f32[9] constant(0)
%operands.1 = f16[9] constant(0)
%scatter = (f32[9], f16[9]) scatter(%operands.0, %operands.1, %indices, %updates.0, %updates.1), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32_add_F16
%gte.0 = f32[9] get-tuple-element(%scatter), index=0
%slice.0 = f32[8] slice(%gte.0), slice={[0:8]}
%gte.1 = f16[9] get-tuple-element(%scatter), index=1
%slice.1 = f16[8] slice(%gte.1), slice={[0:8]}
ROOT %tuple = (f32[8], f16[8]) tuple(%slice.0, %slice.1)
}
)")
.value();
ScatterSliceSimplifier test_pass;
ASSERT_TRUE(RunHloPass(&test_pass, module.get()).value());
auto expected_scatter =
m::Scatter(m::Slice(m::Constant()), m::Slice(m::Constant()),
m::Parameter(0), m::Parameter(1), m::Parameter(2));
Shape expected_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {8}), ShapeUtil::MakeShape(F16, {8})});
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::GetTupleElement(expected_scatter),
m::GetTupleElement(expected_scatter))
.WithShapeEqualTo(&expected_shape)));
}
TEST_F(ScatterSliceSimplifierTest, NotMatching) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
%add_F32 {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
slice_not_truncation {
%indices = s32[4] parameter(0)
%updates = f32[4] parameter(1)
%operands = f32[9] constant(0)
%scatter = f32[9] scatter(%operands, %indices, %updates), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32
ROOT %slice = f32[8] slice(%scatter), slice={[1:9]}
}
slice_with_stride {
%indices = s32[4] parameter(0)
%updates = f32[4] parameter(1)
%operands = f32[9] constant(0)
%scatter = f32[9] scatter(%operands, %indices, %updates), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32
ROOT %slice = f32[4] slice(%scatter), slice={[0:8:2]}
}
scatter_multiple_users {
%indices = s32[4] parameter(0)
%updates = f32[4] parameter(1)
%operands = f32[9] constant(0)
%scatter = f32[9] scatter(%operands, %indices, %updates), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32
%slice = f32[8] slice(%scatter), slice={[0:8]}
ROOT %tuple = (f32[9], f32[8]) tuple(%scatter, %slice)
}
scatter_incompatible_slices {
%indices = s32[2] parameter(0)
%updates = f32[2,4] parameter(1)
%operands = f32[4,4] constant(0)
%scatter = f32[4,4] scatter(%operands, %indices, %updates), update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32
%slice.0 = f32[3,4] slice(%scatter), slice={[0:3], [0:4]}
%slice.1 = f32[4,3] slice(%scatter), slice={[0:4], [0:3]}
ROOT %tuple = (f32[3,4], f32[4,3]) tuple(%slice.0, %slice.1)
}
slice_not_found {
%indices = s32[4] parameter(0)
%updates = f32[4] parameter(1)
%operands = f32[8] constant(0)
%scatter = f32[8] scatter(%operands, %indices, %updates), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32
ROOT %exp = f32[8] exponential(%scatter)
}
slice_update_dimensions {
%indices = s32[10] parameter(0)
%updates = f32[10,1,128] parameter(1)
%operands = f32[100,128] constant(0)
%scatter = f32[100,128] scatter(%operands, %indices, %updates), update_window_dims={1,2}, inserted_window_dims={}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32
ROOT %slice = f32[100,64] slice(%scatter), slice={[0:100], [0:64]}
}
)")
.value();
ScatterSliceSimplifier test_pass;
ASSERT_FALSE(RunHloPass(&test_pass, module.get()).value());
}
TEST_F(ScatterSliceSimplifierTest, IntermediaryUsers) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
%add_F32 {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY main {
%indices = s32[4] parameter(0)
%updates = f32[4] parameter(1)
%operands = f32[9] constant(0)
%scatter = f32[9] scatter(%operands, %indices, %updates), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32
%unary = f32[9] abs(%scatter)
%slice.0 = f32[8] slice(%unary), slice={[0:8]}
%binary = f32[9] maximum(%scatter, %operands)
%slice.1 = f32[8] slice(%binary), slice={[0:8]}
ROOT %tuple = (f32[8], f32[8]) tuple(%slice.0, %slice.1)
}
)")
.value();
ScatterSliceSimplifier test_pass;
ASSERT_TRUE(RunHloPass(&test_pass, module.get()).value());
auto expected_scatter =
m::Scatter(m::Slice(m::Constant()), m::Parameter(0), m::Parameter(1));
Shape expected_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {8}), ShapeUtil::MakeShape(F32, {8})});
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::Abs(expected_scatter),
m::Maximum(expected_scatter, m::Slice(m::Constant())))
.WithShapeEqualTo(&expected_shape)));
}
TEST_F(ScatterSliceSimplifierTest, IntermediaryChain) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
%add_F32 {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY main {
%indices = s32[4] parameter(0)
%updates = f32[4] parameter(1)
%operands = f32[9] constant(0)
%scatter = f32[9] scatter(%operands, %indices, %updates), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32
%elementwise.0 = f32[9] abs(%scatter)
%elementwise.1 = f32[9] exponential(%elementwise.0)
%elementwise.2 = f32[9] add(%elementwise.0, %elementwise.1)
ROOT %result = f32[8] slice(%elementwise.2), slice={[0:8]}
}
)")
.value();
ScatterSliceSimplifier test_pass;
ASSERT_TRUE(RunHloPass(&test_pass, module.get()).value());
auto expected_scatter =
m::Scatter(m::Slice(m::Constant()), m::Parameter(0), m::Parameter(1));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Add(m::Abs(expected_scatter),
m::Exp(m::Abs(expected_scatter)))
.WithShape(F32, {8})));
}
TEST_F(ScatterSliceSimplifierTest, DiamondShape) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
%add_F32_mul_F32 {
%lhs.0 = f32[] parameter(0)
%rhs.0 = f32[] parameter(2)
%add.0 = f32[] add(%lhs.0, %rhs.0)
%lhs.1 = f32[] parameter(1)
%rhs.1 = f32[] parameter(3)
%mul.1 = f32[] multiply(%lhs.1, %rhs.1)
ROOT %tuple = (f32[], f32[]) tuple(%add.0, %mul.1)
}
ENTRY main {
%indices = s32[4] parameter(0)
%updates.0 = f32[4] parameter(1)
%updates.1 = f32[4] parameter(2)
%operands.0 = f32[9] constant(0)
%operands.1 = f32[9] constant(0)
%scatter = (f32[9], f32[9]) scatter(%operands.0, %operands.1, %indices, %updates.0, %updates.1), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32_mul_F32
%gte.0 = f32[9] get-tuple-element(%scatter), index=0
%gte.1 = f32[9] get-tuple-element(%scatter), index=1
%consumer = f32[9] add(%gte.0, %gte.1)
ROOT %slice = f32[8] slice(%consumer), slice={[0:8]}
}
)")
.value();
ScatterSliceSimplifier test_pass;
ASSERT_TRUE(RunHloPass(&test_pass, module.get()).value());
auto expected_scatter =
m::Scatter(m::Slice(m::Constant()), m::Slice(m::Constant()),
m::Parameter(0), m::Parameter(1), m::Parameter(2));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Add(m::GetTupleElement(expected_scatter),
m::GetTupleElement(expected_scatter))
.WithShape(F32, {8})));
}
TEST_F(ScatterSliceSimplifierTest, ElementwiseSelect) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
%add_F32 {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY main {
%indices = s32[4] parameter(0)
%updates = f32[4] parameter(1)
%operands = f32[9] constant(0)
%scatter = f32[9] scatter(%operands, %indices, %updates), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32
%pred_ = pred[9] parameter(2)
%select = f32[9] select(%pred_, %scatter, %operands)
ROOT %slice = f32[8] slice(%select), slice={[0:8]}
}
)")
.value();
ScatterSliceSimplifier test_pass;
ASSERT_TRUE(RunHloPass(&test_pass, module.get()).value());
auto expected_scatter =
m::Scatter(m::Slice(m::Constant()), m::Parameter(0), m::Parameter(1));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Select(m::Slice(m::Parameter(2)), expected_scatter,
m::Slice(m::Constant()))
.WithShape(F32, {8})));
}
}
} |
2,077 | cpp | tensorflow/tensorflow | gpu_compiler | third_party/xla/xla/service/gpu/gpu_compiler.cc | third_party/xla/xla/service/gpu/gpu_compiler_test.cc | #ifndef XLA_SERVICE_GPU_GPU_COMPILER_H_
#define XLA_SERVICE_GPU_GPU_COMPILER_H_
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "llvm/IR/Module.h"
#include "xla/autotune_results.pb.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_module_group.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/compiler.h"
#include "xla/service/executable.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/gpu/buffer_sharing.h"
#include "xla/service/gpu/compile_module_to_llvm_ir.h"
#include "xla/service/gpu/executable.pb.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/llvm_compiler.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_description.pb.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace gpu {
class GpuCompiler : public LLVMCompiler {
public:
GpuCompiler(se::Platform::Id platform_id, const char* target_triple,
const char* data_layout);
using LLVMCompiler::Compile;
absl::StatusOr<std::unique_ptr<HloModule>> RunHloPasses(
std::unique_ptr<HloModule> module, se::StreamExecutor* stream_exec,
const CompileOptions& options) override;
absl::StatusOr<std::unique_ptr<Executable>> RunBackend(
std::unique_ptr<HloModule> module, se::StreamExecutor* stream_exec,
const CompileOptions& options) override;
absl::StatusOr<std::vector<std::unique_ptr<AotCompilationResult>>>
CompileAheadOfTime(std::unique_ptr<HloModuleGroup> module_group,
AotCompilationOptions const& options) override;
se::Platform::Id PlatformId() const override { return platform_id_; }
HloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const override;
absl::StatusOr<std::unique_ptr<AotCompilationResult>>
LoadAotCompilationResult(const std::string& serialized_aot_result) override;
static absl::StatusOr<std::unique_ptr<AotCompilationResult>>
LoadAotCompilationResultStatic(const std::string& serialized_aot_result);
absl::StatusOr<std::unique_ptr<AotCompilationResult>> Export(
Executable* executable) const override;
absl::Status RunPostSchedulingPipelines(
HloModule* module, int64_t scheduler_mem_limit,
const se::DeviceDescription& gpu_device_info) const;
std::string target_triple() const { return target_triple_; }
std::string data_layout() const { return data_layout_; }
const char* GetDataLayout() const { return data_layout_; }
const char* GetTargetTriple() const { return target_triple_; }
int64_t GetPointerSize() const { return pointer_size_; }
static absl::StatusOr<Compiler::TargetConfig> GetTargetConfig(
const Compiler::CompileOptions& options, const DebugOptions& debug_opts,
se::StreamExecutor* executor);
virtual HloDataflowAnalysis::CanShareBuffer GetCanShareBuffer() const {
return &FusionCanShareBufferHint;
}
virtual int32_t GetToolkitVersion() const = 0;
virtual absl::StatusOr<bool> CanUseLinkModules(
const HloModuleConfig& config) {
return false;
}
protected:
struct BackendCompileResult {
std::string asm_text;
std::vector<uint8_t> binary;
Thunk::BinaryMap dnn_compiled_graphs;
};
virtual absl::Status OptimizeHloPostLayoutAssignment(
HloModule* hlo_module, se::StreamExecutor* stream_exec,
const CompileOptions& options, const TargetConfig& gpu_target_config,
tsl::thread::ThreadPool* thread_pool);
virtual bool RequiresCollectiveScheduleLinearizer(
const HloModule* module, se::StreamExecutor* stream_exec) {
return false;
}
virtual absl::Status AddConvAndGemmAutotuningPasses(
HloPassPipeline* pipeline, HloModule* hlo_module,
AutotuneConfig& autotune_config, tsl::thread::ThreadPool* thread_pool) {
return absl::OkStatus();
}
virtual absl::Status AddGemmFusionAutotuningPasses(
HloPassPipeline* pipeline, HloModule* hlo_module,
AutotuneConfig& autotune_config, tsl::thread::ThreadPool* thread_pool,
const MultiProcessKeyValueStore& key_value_store) {
return absl::OkStatus();
}
virtual absl::Status AddCustomKernelReplacementPasses(
HloPassPipeline* pipeline, const DebugOptions& debug_options) {
return absl::OkStatus();
}
virtual absl::Status RunCudnnFusionCompilerPass(
HloModule* module, se::StreamExecutor* stream_exec,
Thunk::BinaryMap* dnn_compiled_graphs) {
return absl::OkStatus();
}
AlgebraicSimplifierOptions GetAlgebraicSimplifierOptions(
const HloModuleConfig& config);
private:
struct CompileResultWithMetadata {
BackendCompileResult backend_result;
CompileModuleResults compile_module_results;
};
absl::StatusOr<CompileResultWithMetadata> CompileToBackendResult(
HloModule* module, llvm::LLVMContext* llvm_context,
se::StreamExecutor* executor, const CompileOptions& options,
const se::DeviceDescription& gpu_device_info);
absl::StatusOr<BackendCompileResult> CompileAndLink(
const HloModuleConfig& module_config,
CompileModuleResults& compile_module_results,
se::GpuComputeCapability gpu_version, se::StreamExecutor* stream_exec,
const CompileOptions& options, const HloModule* debug_module);
absl::StatusOr<BackendCompileResult> CompileSingleModule(
const HloModuleConfig& module_config,
se::GpuComputeCapability gpu_version, const HloModule* debug_module,
llvm::Module* llvm_module, bool relocatable,
const CompileOptions& options, std::optional<int> shard_number);
absl::Status LoadAutotuneResultsFromFile(const DebugOptions& debug_options);
absl::Status SerializeAutotuneResultsToFile(
const DebugOptions& debug_options);
absl::Status RunPreSchedulingPasses(HloModule* module,
se::StreamExecutor* stream_exec);
absl::Status OptimizeHloModule(HloModule* hlo_module,
se::StreamExecutor* stream_exec,
const CompileOptions& options,
const TargetConfig& gpu_target_config);
virtual absl::Status OptimizeHloConvolutionCanonicalization(
HloModule* hlo_module, se::GpuComputeCapability gpu_version,
se::dnn::VersionInfo dnn_version,
se::DeviceMemoryAllocator* device_allocator) = 0;
virtual absl::StatusOr<BackendCompileResult> CompileTargetBinary(
const HloModuleConfig& module_config, llvm::Module* llvm_module,
se::GpuComputeCapability gpu_version, bool relocatable,
const HloModule* debug_module, const CompileOptions& options) = 0;
absl::Status PrepareHloModuleForIrEmitting(HloModule* hlo_module);
virtual absl::StatusOr<std::vector<uint8_t>> LinkModules(
se::StreamExecutor* stream_exec,
std::vector<std::vector<uint8_t>> modules,
const DebugOptions& debug_options) {
return Unimplemented("LinkModules is not implemented.");
}
se::Platform::Id platform_id_;
const char* target_triple_;
const char* data_layout_;
const int64_t pointer_size_;
GpuCompiler(const GpuCompiler&) = delete;
GpuCompiler& operator=(const GpuCompiler&) = delete;
};
}
}
#endif
#include "xla/service/gpu/gpu_compiler.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <variant>
#include <vector>
#include "absl/base/call_once.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "absl/types/variant.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/AsmParser/Parser.h"
#include "llvm/Bitcode/BitcodeReader.h"
#include "llvm/Bitcode/BitcodeWriter.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/DiagnosticPrinter.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Verifier.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Transforms/Utils/SplitModule.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/Support/LLVM.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_module_group.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/maybe_owning.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/all_gather_broadcast_reorder.h"
#include "xla/service/all_gather_combiner.h"
#include "xla/service/all_reduce_combiner.h"
#include "xla/service/all_reduce_contiguous.h"
#include "xla/service/all_reduce_folder.h"
#include "xla/service/all_reduce_promotion.h"
#include "xla/service/all_reduce_reassociate.h"
#include "xla/service/all_reduce_splitter.h"
#include "xla/service/async_collective_creator.h"
#include "xla/service/batchnorm_expander.h"
#include "xla/service/bitcast_dtypes_expander.h"
#include "xla/service/broadcast_canonicalizer.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/call_inliner.h"
#include "xla/service/collective_permute_decomposer.h"
#include "xla/service/collective_pipeliner.h"
#include "xla/service/collectives_schedule_linearizer.h"
#include "xla/service/comparison_expander.h"
#include "xla/service/compiler.h"
#include "xla/service/conditional_canonicalizer.h"
#include "xla/service/conditional_simplifier.h"
#include "xla/service/convert_memory_placement_to_internal_annotations.h"
#include "xla/service/convert_mover.h"
#include "xla/service/convolution_4d_expander.h"
#include "xla/service/convolution_pred_expander.h"
#include "xla/service/copy_insertion.h"
#include "xla/service/cpu_gpu_shape_verifier.h"
#include "xla/service/dot_decomposer.h"
#include "xla/service/dot_merger.h"
#include "xla/service/dump.h"
#include "xla/service/dynamic_dimension_inference.h"
#include "xla/service/dynamic_dimension_simplifier.h"
#include "xla/service/dynamic_index_splitter.h"
#include "xla/service/dynamic_padder.h"
#include "xla/service/eigh_expander.h"
#include "xla/service/executable.h"
#include "xla/service/export_hlo.h"
#include "xla/service/flatten_call_graph.h"
#include "xla/service/float_normalization.h"
#include "xla/service/float_support.h"
#include "xla/service/gather_expander.h"
#include "xla/service/gather_simplifier.h"
#include "xla/service/gpu/algorithm_checker.h"
#include "xla/service/gpu/all_reduce_blueconnect.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/gpu/collective_permute_cycle_decomposer.h"
#include "xla/service/gpu/command_buffer_scheduling.h"
#include "xla/service/gpu/compile_module_to_llvm_ir.h"
#include "xla/service/gpu/conv_layout_normalization.h"
#include "xla/service/gpu/custom_kernel_fusion_rewriter.h"
#include "xla/service/gpu/dot_dimension_sorter.h"
#include "xla/service/gpu/dot_operand_converter.h"
#include "xla/service/gpu/double_buffer_loop_unrolling.h"
#include "xla/service/gpu/dynamic_slice_fusion_rewriter.h"
#include "xla/service/gpu/execution_stream_assignment.h"
#include "xla/service/gpu/fusion_pipeline.h"
#include "xla/service/gpu/fusion_wrapper.h"
#include "xla/service/gpu/gemm_broadcast_folding_rewriter.h"
#include "xla/service/gpu/gemm_fusion.h"
#include "xla/service/gpu/gemm_rewriter.h"
#include "xla/service/gpu/gemv_rewriter.h"
#include "xla/service/gpu/gpu_algebraic_simplifier.h"
#include "xla/service/gpu/gpu_all_gather_optimizer.h"
#include "xla/service/gpu/gpu_async_collective_annotator.h"
#include "xla/service/gpu/gpu_conv_rewriter.h"
#include "xla/service/gpu/gpu_convert_async_collectives_to_sync.h"
#include "xla/service/gpu/gpu_executable.h"
#include "xla/service/gpu/gpu_float_support.h"
#include "xla/service/gpu/gpu_hlo_schedule.h"
#include "xla/service/gpu/gpu_latency_hiding_scheduler.h"
#include "xla/service/gpu/gpu_layout_assignment.h"
#include "xla/service/gpu/gpu_p2p_pipeliner.h"
#include "xla/service/gpu/gpu_reduce_scatter_creator.h"
#include "xla/service/gpu/gpu_sanitize_constant_names.h"
#include "xla/service/gpu/gpu_scatter_expander.h"
#include "xla/service/gpu/gpu_spmd_pipeline.h"
#include "xla/service/gpu/gpu_windowed_einsum_handler.h"
#include "xla/service/gpu/hlo_fusion_stats.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/ir_emitter_context.h"
#include "xla/service/gpu/ir_emitter_unnested.h"
#include "xla/service/gpu/kernel_reuse_cache.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/metrics.h"
#include "xla/service/gpu/model/gpu_cost_model_stats_collection.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/gpu/move_copy_to_users.h"
#include "xla/service/gpu/pipelined_p2p_rewriter.h"
#include "xla/service/gpu/prepare_hlo_for_ir_emitting_pipeline.h"
#include "xla/service/gpu/reduction_degenerate_dim_remover.h"
#include "xla/service/gpu/reduction_dimension_grouper.h"
#include "xla/service/gpu/reduction_layout_normalizer.h"
#include "xla/service/gpu/reduction_splitter.h"
#include "xla/service/gpu/reduction_utils.h"
#include "xla/service/gpu/rename_fusions.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/service/gpu/runtime_intrinsics.h"
#include "xla/service/gpu/scatter_slice_simplifier.h"
#include "xla/service/gpu/softmax_rewriter_triton.h"
#include "xla/service/gpu/stream_attribute_annotator.h"
#include "xla/service/gpu/stream_attribute_async_wrapper.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/gpu/topk_specializer.h"
#include "xla/service/gpu/topk_splitter.h"
#include "xla/service/gpu/tree_reduction_rewriter.h"
#include "xla/service/gpu/triton_fusion_numerics_verifier.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_computation_deduplicator.h"
#include "xla/service/hlo_constant_folding.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_cse.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_pass_fix.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/hlo_rematerialization.h"
#include "xla/service/hlo_verifier.h"
#include "xla/service/host_memory_transfer_asyncifier.h"
#include "xla/service/host_offload_legalize.h"
#include "xla/service/host_offloader.h"
#include "xla/service/layout_assignment.h"
#include "xla/service/layout_normalization.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/service/logistic_expander.h"
#include "xla/service/operand_upcaster.h"
#include "xla/service/optimization_barrier_expander.h"
#include "xla/service/optimize_input_output_buffer_alias.h"
#include "xla/service/qr_expander.h"
#include "xla/service/real_imag_expander.h"
#include "xla/service/reduce_decomposer.h"
#include "xla/service/reduce_scatter_combiner.h"
#include "xla/service/reduce_scatter_reassociate.h"
#include "xla/service/reduce_window_rewriter.h"
#include "xla/service/reshape_decomposer.h"
#include "xla/service/reshape_mover.h"
#include "xla/service/result_caster.h"
#include "xla/service/rng_bit_generator_expander.h"
#include "xla/service/rng_expander.h"
#include "xla/service/scatter_expander.h"
#include "xla/service/scatter_simplifier.h"
#include "xla/service/sharding_remover.h"
#include "xla/service/simplify_fp_conversions.h"
#include "xla/service/slice_sinker.h"
#include "xla/service/slow_operation_alarm.h"
#include "xla/service/sort_simplifier.h"
#include "xla/service/stable_sort_expander.h"
#include "xla/service/stochastic_convert_decomposer.h"
#include "xla/service/sub_byte_normalization.h"
#include "xla/service/topk_rewriter.h"
#include "xla/service/transpose_folding.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/service/while_loop_all_reduce_code_motion.h"
#include "xla/service/while_loop_constant_sinking.h"
#include "xla/service/while_loop_simplifier.h"
#include "xla/service/while_loop_trip_count_annotator.h"
#include "xla/service/zero_sized_hlo_elimination.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_description.pb.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/gpu/gpu_driver.h"
#include "xla/stream_executor/integrations/device_mem_allocator.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/blocking_counter.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/cpu_info.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/numbers.h"
#include "tsl/platform/path.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tsl/profiler/lib/traceme.h"
#ifdef PLATFORM_GOOGLE
#include "xla/hlo/experimental/auto_sharding/auto_sharding.h"
#endif
namespace xla {
namespace gpu {
namespace {
using MaybeOwningThreadPool = MaybeOwning<tsl::thread::ThreadPool>;
MaybeOwningThreadPool CreateMaybeOwningThreadPool(
int parallelism, tsl::thread::ThreadPool* default_thread_pool,
int default_parallelism) {
CHECK_GE(parallelism, 0);
CHECK_GE(default_parallelism, 1);
CHECK(default_thread_pool == nullptr ||
default_thread_pool->CurrentThreadId() == -1);
auto create_thread_pool = [&](int num_threads) {
CHECK_GE(num_threads, 1);
return std::make_unique<tsl::thread::ThreadPool>(tsl::Env::Default(), "",
num_threads);
};
switch (parallelism) {
case 0:
if (default_thread_pool == nullptr && default_parallelism > 1) {
return MaybeOwningThreadPool(create_thread_pool(default_parallelism));
}
return MaybeOwningThreadPool(default_thread_pool);
case 1:
return MaybeOwningThreadPool(nullptr);
default:
return MaybeOwningThreadPool(create_thread_pool(parallelism));
}
}
absl::StatusOr<AutotuneConfig> GetAutotuneConfig(
se::StreamExecutor* stream_exec, const DebugOptions& debug_options,
const GpuCompiler::CompileOptions& options,
const Compiler::TargetConfig& gpu_target_config) {
if (stream_exec) {
return AutotuneConfig{DeviceConfig{stream_exec, options.device_allocator},
debug_options};
}
return AutotuneConfig{
DevicelessConfig{gpu_target_config.device_description_str},
debug_options};
}
se::GpuComputeCapability GetGpuVersion(const se::StreamExecutor* stream_exec) {
return stream_exec->GetDeviceDescription().gpu_compute_capability();
}
class GpuThunkAotCompilationResult : public AotCompilationResult {
public:
static absl::StatusOr<std::unique_ptr<GpuThunkAotCompilationResult>>
FromModule(const HloModule* hlo_module,
const BufferAssignment* buffer_assignment,
std::string_view asm_text, absl::Span<const uint8_t> binary,
const Thunk::BinaryMap& dnn_compiled_graphs) {
CompilationResultProto proto;
*proto.mutable_hlo_module_with_config() = hlo_module->ToProtoWithConfig();
*proto.mutable_buffer_assignment() = buffer_assignment->ToProto();
proto.set_asm_text(std::string(asm_text));
proto.set_binary(binary.data(), binary.size());
proto.mutable_dnn_compiled_graphs()->insert(dnn_compiled_graphs.cbegin(),
dnn_compiled_graphs.cend());
return std::unique_ptr<GpuThunkAotCompilationResult>(
new GpuThunkAotCompilationResult(hlo_module->Clone(),
std::move(proto)));
}
static absl::StatusOr<std::unique_ptr<GpuThunkAotCompilationResult>>
FromString(const std::string& serialized) {
CompilationResultProto proto;
if (!proto.ParseFromString(serialized)) {
return Internal(
"Failed to parse serialized GpuThunkAotCompilationResult.");
}
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloModule> module,
HloModule::CreateFromProtoWithConfig(proto.hlo_module_with_config()));
return std::unique_ptr<GpuThunkAotCompilationResult>(
new GpuThunkAotCompilationResult(std::move(module), std::move(proto)));
}
absl::StatusOr<std::string> SerializeAsString() const override {
return proto_.SerializeAsString();
}
absl::StatusOr<std::unique_ptr<Executable>> LoadExecutable(
Compiler* compiler, const se::StreamExecutor* stream_exec) const override;
const HloModule* optimized_module() const override { return module_.get(); }
std::unique_ptr<HloModule> consume_optimized_module() override {
return std::move(module_);
}
private:
GpuThunkAotCompilationResult(std::unique_ptr<HloModule> module,
CompilationResultProto proto)
: module_(std::move(module)), proto_(std::move(proto)) {}
std::unique_ptr<HloModule> module_;
CompilationResultProto proto_;
};
}
absl::StatusOr<std::unique_ptr<Executable>>
GpuThunkAotCompilationResult::LoadExecutable(
Compiler* compiler, const se::StreamExecutor* stream_exec) const {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloModule> hlo_module,
HloModule::CreateFromProtoWithConfig(proto_.hlo_module_with_config()));
TF_ASSIGN_OR_RETURN(
std::unique_ptr<BufferAssignment> buffer_assignment,
BufferAssignment::FromProto(proto_.buffer_assignment(), hlo_module.get(),
compiler->BufferSizeBytesFunction(),
nullptr));
ExecutionStreamAssignment execution_stream_assignment(hlo_module.get());
std::vector<uint8_t> binary(proto_.binary().begin(), proto_.binary().end());
TF_ASSIGN_OR_RETURN(
se::Platform * platform,
se::PlatformManager::PlatformWithId(compiler->PlatformId()));
std::string platform_name = platform->Name();
const se::DeviceDescription& gpu_device_info =
stream_exec->GetDeviceDescription();
mlir::DialectRegistry registry;
auto mlir_context = std::make_unique<mlir::MLIRContext>(registry);
llvm::LLVMContext llvm_context;
auto* gpu_compiler = dynamic_cast<GpuCompiler*>(compiler);
if (gpu_compiler == nullptr) {
return Internal("Compiler is not a GpuCompiler.");
}
auto llvm_module = std::make_unique<llvm::Module>("", llvm_context);
llvm_module->setTargetTriple(gpu_compiler->target_triple());
llvm_module->setDataLayout(gpu_compiler->data_layout());
IrEmitterContext ir_emitter_context(
hlo_module.get(), buffer_assignment.get(), &execution_stream_assignment,
platform_name, gpu_device_info, mlir_context.get(), llvm_module.get(),
nullptr,
false);
auto ir_emitter = IrEmitterUnnested::Create(&ir_emitter_context);
TF_RETURN_IF_ERROR(
ir_emitter->EmitHloComputation(hlo_module->entry_computation()));
std::vector<GpuExecutable::ConstantInfo> constants =
std::move(ir_emitter_context.constants());
TF_ASSIGN_OR_RETURN(auto output_info,
GetOutputInfo(*hlo_module, *buffer_assignment));
const Shape& output_shape = hlo_module->result_shape();
int64_t debug_buffer_assignment_show_max =
hlo_module->config()
.debug_options()
.xla_debug_buffer_assignment_show_max();
TF_ASSIGN_OR_RETURN(
std::unique_ptr<GpuExecutable> executable,
GpuExecutable::Create(GpuExecutable::Params{
proto_.asm_text(),
binary,
Thunk::BinaryMap(proto_.dnn_compiled_graphs().cbegin(),
proto_.dnn_compiled_graphs().cend()),
gpu_device_info.gpu_compute_capability(),
ir_emitter->ConsumeThunkSequence(),
std::move(constants),
std::move(output_info),
std::move(hlo_module->name()),
std::move(output_shape),
std::nullopt,
std::move(buffer_assignment),
debug_buffer_assignment_show_max,
std::move(hlo_module),
true}));
return executable;
}
GpuCompiler::GpuCompiler(se::Platform::Id platform_id,
const char* target_triple, const char* data_layout)
: platform_id_(platform_id),
target_triple_(target_triple),
data_layout_(data_layout),
pointer_size_(llvm::DataLayout(data_layout)
.getPointerSize(0 )) {}
namespace {
void AddHloVerifier(HloPassPipeline* pipeline, HloVerifierOpts&& opts = {},
bool debug_only = false) {
std::unique_ptr<TargetVerifierMetadata> verifier_metadata =
std::make_unique<CpuGpuVerifierMetadata>(std::move(opts));
if (debug_only) {
pipeline->AddInvariantCheckerDebug<HloVerifier>(
std::move(verifier_metadata), "hlo verifier (debug)");
} else {
pipeline->AddInvariantChecker<HloVerifier>(std::move(verifier_metadata),
"hlo verifier");
}
}
void CheckNotScheduled(HloModule* hlo_module) {
if (hlo_module->has_schedule() &&
!hlo_module->config().debug_options().xla_disable_all_hlo_passes()) {
LOG(WARNING) << "\nThe current HLO module " << hlo_module->name()
<< " is scheduled and optimized. \n"
<< "It is not expected to run optimization passes again.\n"
"Use a test method like RunAndCompareNoHloPasses() or "
<< "the xla_disable_all_hlo_passes flag.";
}
}
void LogDebugOptions(HloModule* hlo_module) {
XLA_VLOG_LINES(
1, absl::StrFormat("GpuCompilationEnvironment of hlo_module %s:\n%s",
hlo_module->name(),
hlo_module->config().debug_options().DebugString()));
}
AlgebraicSimplifierOptions LayoutInsensitiveAlgebraicSimplifierOptions(
const HloModuleConfig& hlo_module_config,
const Compiler::TargetConfig& gpu_target_config,
AlgebraicSimplifierOptions opts_from_compiler) {
AlgebraicSimplifierOptions layout_insensitive_algsimp_opts =
opts_from_compiler;
layout_insensitive_algsimp_opts.set_conv_is_lowerable_callback(
GpuConvRewriter::ConvIsLowerable);
layout_insensitive_algsimp_opts.set_enable_dot_strength_reduction(
hlo_module_config.debug_options()
.xla_gpu_enable_dot_strength_reduction());
layout_insensitive_algsimp_opts.set_supports_non_canonical_dots(false);
layout_insensitive_algsimp_opts.set_minmax_propagate_nan(
!hlo_module_config.debug_options().xla_gpu_enable_fast_min_max());
layout_insensitive_algsimp_opts
.set_unconditionally_simplify_reduce_of_transpose_or_reshape(true);
if (gpu_target_config.platform_name == "ROCM") {
layout_insensitive_algsimp_opts.set_enable_conv_operand_swap(false);
}
layout_insensitive_algsimp_opts
.set_enable_unconditional_reduce_of_concat_replacement(false);
return layout_insensitive_algsimp_opts;
}
absl::Status RunPreSPMDPartitionerPasses(HloModule* hlo_module) {
HloPassPipeline pre_spmd_pipeline("pre-spmd-partitioner"); | #include "xla/service/gpu/gpu_compiler.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/autotune_results.pb.h"
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/executable.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/gpu/gpu_hlo_schedule.h"
#include "xla/service/gpu/metrics.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/service/xla_debug_info_manager.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
using ::testing::IsEmpty;
using ::testing::Not;
using ::testing::TempDir;
using ::tsl::testing::StatusIs;
class GpuCompilerTest : public HloTestBase {
public:
absl::Status Schedule(HloModule* module) {
auto compiler = backend().compiler();
const se::DeviceDescription& gpu_device_info =
backend().default_stream_executor()->GetDeviceDescription();
TF_RETURN_IF_ERROR(ScheduleGpuModule(module, 4, gpu_device_info).status());
return tensorflow::down_cast<GpuCompiler*>(compiler)
->RunPostSchedulingPipelines(module, 4 * 1024 * 1024, gpu_device_info);
}
};
TEST_F(GpuCompilerTest, CompiledProgramsCount) {
const char* hlo_text = R"(
HloModule test
ENTRY main {
p = f32[10]{0} parameter(0)
ROOT neg = f32[10]{0} negate(p)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_text).value();
ResetCompiledProgramsCountForTesting();
std::unique_ptr<Executable> executable =
backend()
.compiler()
->RunBackend(std::move(module), backend().default_stream_executor(),
{nullptr,
nullptr,
{},
false})
.value();
EXPECT_EQ(GetCompiledProgramsCount(), 1);
}
TEST_F(GpuCompilerTest, GenerateDebugInfoForNonAutotuningCompilations) {
const char* hlo_text = R"(
HloModule test
ENTRY main {
p = f32[10]{0} parameter(0)
ROOT neg = f32[10]{0} negate(p)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_text).value();
std::unique_ptr<Executable> executable =
backend()
.compiler()
->RunBackend(std::move(module), backend().default_stream_executor(),
{nullptr,
nullptr,
{},
false})
.value();
EXPECT_TRUE(XlaDebugInfoManager::Get()->TracksModule(
executable->module().unique_id()));
}
TEST_F(GpuCompilerTest, DoesNotGenerateDebugInfoForAutotuningCompilations) {
const char* hlo_text = R"(
HloModule test
ENTRY main {
p = f32[10]{0} parameter(0)
ROOT neg = f32[10]{0} negate(p)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_text).value();
int module_id = module->unique_id();
std::unique_ptr<Executable> executable =
backend()
.compiler()
->RunBackend(std::move(module), backend().default_stream_executor(),
{nullptr,
nullptr,
{},
true})
.value();
EXPECT_FALSE(XlaDebugInfoManager::Get()->TracksModule(module_id));
}
TEST_F(GpuCompilerTest, CopyInsertionFusion) {
const char* hlo_text = R"(
HloModule cluster
ENTRY main {
cst = f32[1]{0} constant({0})
ROOT tuple_out = (f32[1]{0}, f32[1]{0}, f32[1]{0}, f32[1]{0}) tuple(cst, cst, cst, cst)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{0, 0}));
auto module = ParseAndReturnVerifiedModule(hlo_text).value();
std::unique_ptr<HloModule> compiled_module =
backend()
.compiler()
->RunHloPasses(module->Clone(), backend().default_stream_executor(),
nullptr)
.value();
VLOG(2) << compiled_module->ToString();
size_t total_fusion_instrs = 0;
for (const HloInstruction* instr :
compiled_module->entry_computation()->instructions()) {
if (instr->opcode() == HloOpcode::kFusion) {
++total_fusion_instrs;
}
}
EXPECT_EQ(total_fusion_instrs, 1);
const HloInstruction* entry_root =
compiled_module->entry_computation()->root_instruction();
EXPECT_THAT(
entry_root,
GmockMatch(m::Tuple(
m::GetTupleElement(m::Fusion()), m::GetTupleElement(m::Fusion()),
m::GetTupleElement(m::Fusion()), m::GetTupleElement(m::Fusion()))));
}
TEST_F(GpuCompilerTest, CanRunScheduledModules) {
HloModuleConfig config;
DebugOptions debug_options = GetDebugOptionsForTest();
debug_options.set_xla_disable_all_hlo_passes(true);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule m, is_scheduled=true
w {
p = s8[] parameter(0)
ROOT n = s8[] negate(p)
}
ENTRY e {
p = s8[] parameter(0)
ROOT _ = s8[] fusion(p), kind=kLoop, calls=w
})",
config));
EXPECT_TRUE(Run(std::move(module), true));
}
class PersistedAutotuningTest : public HloTestBase {
protected:
static constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
p0 = f16[1,16,17,3] parameter(0)
p1 = s8[16,17,3] parameter(1)
cp1 = f16[16,17,3] convert(p1)
ROOT _ = f16[1,16,16] dot(p0, cp1),
lhs_contracting_dims={2,3}, rhs_contracting_dims={1,2}
})";
std::string GetUniqueTempFilePath(absl::string_view suffix) {
std::string filename = TempDir();
CHECK(tsl::Env::Default()->CreateUniqueFileName(&filename,
std::string(suffix)));
return filename;
}
std::string ExpectToReadNonEmptyFile(absl::string_view file_path) {
std::string str;
tsl::Env* env = tsl::Env::Default();
TF_EXPECT_OK(tsl::ReadFileToString(env, std::string(file_path), &str));
EXPECT_THAT(str, Not(IsEmpty()));
return str;
}
DebugOptions GetDebugOptionsForTest() override {
DebugOptions options = HloTestBase::GetDebugOptionsForTest();
options.set_xla_gpu_dump_autotune_results_to(
xla_gpu_dump_autotune_results_to_);
options.set_xla_gpu_load_autotune_results_from(
xla_gpu_load_autotune_results_from_);
return options;
}
std::string xla_gpu_dump_autotune_results_to_;
std::string xla_gpu_load_autotune_results_from_;
};
TEST_F(PersistedAutotuningTest, WriteResultsOnEachCompilation) {
constexpr absl::string_view kInvalidTextProto = "Invalid!";
xla_gpu_dump_autotune_results_to_ = GetUniqueTempFilePath(".txt");
TF_EXPECT_OK(GetOptimizedModule(kHloText).status());
{
std::string autotune_results_str =
ExpectToReadNonEmptyFile(xla_gpu_dump_autotune_results_to_);
AutotuneResults results;
EXPECT_TRUE(tsl::protobuf::TextFormat::ParseFromString(autotune_results_str,
&results));
}
tsl::Env* env = tsl::Env::Default();
TF_EXPECT_OK(tsl::WriteStringToFile(env, xla_gpu_dump_autotune_results_to_,
kInvalidTextProto));
TF_EXPECT_OK(GetOptimizedModule(kHloText).status());
{
std::string autotune_results_str =
ExpectToReadNonEmptyFile(xla_gpu_dump_autotune_results_to_);
AutotuneResults results;
EXPECT_TRUE(tsl::protobuf::TextFormat::ParseFromString(autotune_results_str,
&results));
}
}
int64_t CountCopies(const HloComputation& computation) {
int64_t count = 0;
for (const auto& instruction : computation.instructions()) {
if (instruction->opcode() == HloOpcode::kCopy) {
count++;
}
}
return count;
}
int64_t CountCopies(const HloModule& module) {
int64_t count = 0;
for (const auto& computation : module.computations()) {
count += CountCopies(*computation);
}
return count;
}
TEST_F(GpuCompilerTest, RemovesUnnecessaryCopyAfterScheduling) {
const absl::string_view hlo_string = R"(
HloModule all_gather_overlapping
condition {
input_tuple = (f32[1,128], f32[2,128], pred[]) parameter(0)
ROOT cond = pred[] get-tuple-element(input_tuple), index=2
}
body {
input_tuple = (f32[1,128], f32[2,128], pred[]) parameter(0)
param_0 = f32[1,128] get-tuple-element(input_tuple), index=0
param_1 = f32[2,128] get-tuple-element(input_tuple), index=1
cond = pred[] get-tuple-element(input_tuple), index=2
c0 = f32[] constant(0)
splat_c0 = f32[1,128] broadcast(c0), dimensions={}
add = f32[1,128] add(splat_c0, param_0)
all-gather-start = (f32[1,128], f32[2,128]) all-gather-start(add), channel_id=1337, replica_groups={{0,1}}, dimensions={0}, use_global_device_ids=true
c1_s32 = s32[] constant(1)
c0_s32 = s32[] constant(0)
dynamic-slice = f32[1,128] dynamic-slice(param_1, c1_s32, c0_s32), dynamic_slice_sizes={1,128}
all-gather-done = f32[2,128] all-gather-done(all-gather-start)
ROOT output_tuple = (f32[1,128], f32[2,128], pred[]) tuple(dynamic-slice, all-gather-done, cond)
}
ENTRY main {
param_0 = f32[1,128] parameter(0)
param_1 = f32[2,128] parameter(1)
param_2 = pred[] parameter(2)
tuple = (f32[1,128], f32[2,128], pred[]) tuple(param_0, param_1, param_2)
ROOT while = (f32[1,128], f32[2,128], pred[]) while(tuple), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
GetOptimizedModule(hlo_string));
EXPECT_EQ(CountCopies(*module), 5);
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* while_op = root->operand(0)->operand(0);
EXPECT_EQ(while_op->while_body()->root_instruction()->operand(1)->opcode(),
HloOpcode::kCopy);
TF_ASSERT_OK(Schedule(module.get()));
EXPECT_EQ(CountCopies(*module), 4);
module->entry_computation()->root_instruction();
while_op = root->operand(0)->operand(0);
EXPECT_EQ(while_op->while_body()->root_instruction()->operand(1)->opcode(),
HloOpcode::kAllGatherDone);
}
TEST_F(GpuCompilerTest,
GemmFusionIsNoOpWhenGemmFusionAutotunerFallsBackToCublas) {
GTEST_SKIP() << "TODO(b/344573710): this test is flaky, disable it "
<< " until flakiness is fixed.";
auto cc = backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
if (!cc.IsAtLeastAmpere()) {
GTEST_SKIP() << "Autotuning results have only been generated for Ampere "
<< "and Hopper GPUs";
}
const absl::string_view hlo_string = R"(
HloModule test
ENTRY main {
param_0 = bf16[3,32,1024,4,1024]{4,3,2,1,0} parameter(0)
param_1 = bf16[4,3,32,1024]{3,2,1,0} parameter(1)
param_2 = s32[] parameter(2)
constant_0 = s32[] constant(0)
dynamic-slice_0 = bf16[1,3,32,1024]{3,2,1,0} dynamic-slice(param_1, param_2, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,3,32,1024}
reshape_0 = bf16[3,32,1024]{2,1,0} reshape(dynamic-slice_0)
broadcast_0 = bf16[3,32,1024,4,1024]{2,1,4,3,0} broadcast(reshape_0), dimensions={0,1,2}
add_0 = bf16[3,32,1024,4,1024]{4,3,2,1,0} add(param_0, broadcast_0)
transpose_0 = bf16[3,4,1024,32,1024]{2,1,4,3,0} transpose(add_0), dimensions={0,3,4,1,2}
slice_0 = bf16[1,4,1024,32,1024]{4,3,2,1,0} slice(transpose_0), slice={[0:1], [0:4], [0:1024], [0:32], [0:1024]}
reshape_1 = bf16[4,1024,32,1024]{3,2,1,0} reshape(slice_0)
copy_0 = bf16[4,1024,32,1024]{3,2,1,0} copy(reshape_1)
constant_1 = bf16[] constant(0.08838)
broadcast_1 = bf16[4,1024,32,1024]{3,2,1,0} broadcast(constant_1), dimensions={}
multiply_0 = bf16[4,1024,32,1024]{3,2,1,0} multiply(copy_0, broadcast_1)
slice_1 = bf16[1,4,1024,32,1024]{4,3,2,1,0} slice(transpose_0), slice={[1:2], [0:4], [0:1024], [0:32], [0:1024]}
reshape_2 = bf16[4,1024,32,1024]{3,2,1,0} reshape(slice_1)
copy_1 = bf16[4,1024,32,1024]{3,2,1,0} copy(reshape_2)
ROOT dot_0 = bf16[4,32,1024,1024]{3,2,1,0} dot(multiply_0, copy_1), lhs_batch_dims={0,2}, lhs_contracting_dims={3}, rhs_batch_dims={0,2}, rhs_contracting_dims={3}
}
)";
HloModuleConfig config;
DebugOptions triton_enabled_debug_options = GetDebugOptionsForTest();
triton_enabled_debug_options.set_xla_gpu_enable_address_computation_fusion(
false);
triton_enabled_debug_options
.set_xla_gpu_require_complete_aot_autotune_results(true);
config.set_debug_options(triton_enabled_debug_options);
config.set_replica_count(1);
config.set_num_partitions(1);
std::string path =
tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "service", "gpu",
"gpu_compiler_test_autotune_db.textproto");
TF_EXPECT_OK(AutotunerUtil::LoadAutotuneResultsFromFile(path));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string, config));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> triton_enabled_module,
GetOptimizedModule(std::move(module)));
AutotunerUtil::ClearAutotuneResults();
DebugOptions triton_disabled_debug_options = GetDebugOptionsForTest();
triton_disabled_debug_options.set_xla_gpu_enable_address_computation_fusion(
false);
triton_disabled_debug_options.set_xla_gpu_enable_triton_gemm(false);
config.set_debug_options(triton_disabled_debug_options);
TF_ASSERT_OK_AND_ASSIGN(module,
ParseAndReturnVerifiedModule(hlo_string, config));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> triton_disabled_module,
GetOptimizedModule(std::move(module)));
const HloInstruction* root =
triton_enabled_module->entry_computation()->root_instruction();
const HloInstruction* custom_op = root->operand(0)->operand(0);
EXPECT_TRUE(custom_op->IsCustomCall("__cublas$gemm"));
EXPECT_EQ(triton_enabled_module->computation_count(),
triton_disabled_module->computation_count());
}
TEST_F(GpuCompilerTest, CollectivePermuteDecompositionAndPipelining) {
const char* kModuleStr = R"(
HloModule cp
cond {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(%param), index=0
ub = u32[] constant(11)
ROOT result = pred[] compare(count, ub), direction=LT
}
body {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(%param), index=0
send-data = get-tuple-element(%param), index=1
recv-data = f32[1, 1024, 1024] collective-permute(send-data),
source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}}, channel_id=1
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
replica = u32[] replica-id()
c10 = u32[] constant(10)
sum = u32[] add(replica, c10)
sum2 = u32[] add(sum, count)
conv = f32[] convert(sum2)
p = f32[1, 1024, 1024] broadcast(conv), dimensions={}
b = f32[1, 1024, 1024] add(p, recv-data)
c = f32[1, 1024, 1024] multiply(b, b)
d = f32[1, 1024, 1024] tan(c)
s = f32[1, 1024, 1024] dot(c, d), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
ROOT result = (u32[], f32[1, 1024, 1024]) tuple(new_count, s)
}
ENTRY test_computation {
c0 = u32[] constant(0)
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
while_init = (u32[], f32[1, 1024, 1024]) tuple(c0, init)
while_result = (u32[], f32[1, 1024, 1024]) while(while_init), body=body, condition=cond
ROOT result = f32[1, 1024, 1024] get-tuple-element(while_result), index=1
}
)";
const char* kExpected = R"(
CHECK: recv-done
CHECK-SAME: channel_id=[[CHANNEL_ID:[0-9]+]]
CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0"}
CHECK: send-done
CHECK-SAME: channel_id=[[CHANNEL_ID]]
CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0"}
CHECK: %[[CUSTOM_CALL:.*]] = custom-call
CHECK: %[[AFTER_ALL:.*]] = after-all
CHECK: %[[RESULT_RECV:.*]] = recv(%[[AFTER_ALL]])
CHECK-SAME: channel_id=[[CHANNEL_ID]]
CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0",
CHECK-SAME{LITERAL}: _xla_send_recv_source_target_pairs="{{0,1},{1,2},{2,3},{3,4}}"},
CHECK-SAME: control-predecessors={%[[CUSTOM_CALL]]}
CHECK: %[[RESULT_SEND:.*]] = send(%[[SOME_SEND_ARG:.*]], %[[AFTER_ALL]])
CHECK-SAME: channel_id=1
CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0",
CHECK-SAME{LITERAL}: _xla_send_recv_source_target_pairs="{{0,1},{1,2},{2,3},{3,4}}"},
CHECK-SAME: control-predecessors={%[[RESULT_RECV]]}
CHECK: ROOT
CHECK-SAME: %[[RESULT_RECV]]
CHECK: ENTRY
CHECK: %[[ENTRY_AFTER_ALL:.*]] = after-all
CHECK: %[[ENTRY_RECV:.*]] = recv(%[[ENTRY_AFTER_ALL]])
CHECK-SAME: channel_id=[[CHANNEL_ID]]
CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0",
CHECK-SAME{LITERAL}: _xla_send_recv_source_target_pairs="{{0,1},{1,2},{2,3},{3,4}}"}
CHECK: %[[ENTRY_SEND:.*]] = send(%[[SOME_SEND_ARG:.*]], %[[ENTRY_AFTER_ALL]])
CHECK-SAME: channel_id=1
CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0",
CHECK-SAME{LITERAL}: _xla_send_recv_source_target_pairs="{{0,1},{1,2},{2,3},{3,4}}"},
CHECK-SAME: control-predecessors={%[[ENTRY_RECV]]}
CHECK: %[[WHILE_INIT:.*]] = tuple
CHECK-SAME: %[[ENTRY_SEND]]
CHECK: while(%[[WHILE_INIT]])
CHECK: recv-done
CHECK-SAME: channel_id=[[CHANNEL_ID]]
CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0"}
CHECK: send-done
CHECK-SAME: channel_id=[[CHANNEL_ID]]
CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0"}
)";
HloModuleConfig config;
DebugOptions debug_options = GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_latency_hiding_scheduler(true);
debug_options.set_xla_gpu_collective_permute_decomposer_threshold(1);
debug_options.set_xla_gpu_enable_pipelined_p2p(true);
debug_options.set_xla_gpu_enable_triton_gemm(false);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module,
GetOptimizedModule(std::move(module)));
TF_ASSERT_OK(Schedule(optimized_module.get()));
HloPrintOptions options;
options.set_print_operand_shape(false);
options.set_print_result_shape(false);
TF_ASSERT_OK_AND_ASSIGN(
bool filecheck_matched,
RunFileCheck(optimized_module->ToString(options), kExpected));
EXPECT_TRUE(filecheck_matched);
}
class KernelCacheTest : public HloTestBase {
public:
void SetUp() override {
CHECK(tsl::Env::Default()->LocalTempFilename(&cache_file_name_));
HloModuleConfig config;
config.set_debug_options(GetDebugOptionsForTest());
TF_ASSERT_OK_AND_ASSIGN(bool can_use_link_modules,
dynamic_cast<GpuCompiler*>(backend().compiler())
->CanUseLinkModules(config));
if (!can_use_link_modules) {
GTEST_SKIP() << "Caching compiled kernels requires support of linking.";
}
}
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_gpu_kernel_cache_file(cache_file_name_);
debug_options.set_xla_gpu_enable_llvm_module_compilation_parallelism(true);
return debug_options;
}
bool CacheFileExists() {
if (!tsl::Env::Default()->FileExists(cache_file_name_).ok()) {
return false;
}
return true;
}
int CacheEntryCount() {
if (!CacheFileExists()) {
return 0;
}
std::string serialized;
TF_EXPECT_OK(tsl::ReadFileToString(tsl::Env::Default(), cache_file_name_,
&serialized));
CompilationCacheProto proto;
EXPECT_TRUE(proto.ParseFromString(std::string(serialized)));
return proto.entries_size();
}
std::string cache_file_name_;
static constexpr absl::string_view kHloText = R"(
ENTRY e {
p = s8[] parameter(0)
c = s8[] constant(8)
ROOT _ = s8[] add(p, c)
})";
};
TEST_F(KernelCacheTest, CacheIsGenerated) {
EXPECT_FALSE(CacheFileExists());
EXPECT_TRUE(Run(kHloText, false));
EXPECT_EQ(CacheEntryCount(), 1);
EXPECT_TRUE(Run(kHloText, false));
EXPECT_EQ(CacheEntryCount(), 1);
}
TEST_F(KernelCacheTest, NoCacheIsGeneratedWithoutCompiledKernels) {
EXPECT_FALSE(CacheFileExists());
EXPECT_TRUE(Run(R"(
ENTRY e {
a = f32[5,5] parameter(0)
ROOT _ = f32[5,5] custom-call(a, a), custom_call_target="__cublas$gemm",
backend_config="{ \"gemm_backend_config\": {\"alpha_real\":1,\"beta\":0,\"dot_dimension_numbers\":{\"lhs_contracting_dimensions\":[\"1\"],\"rhs_contracting_dimensions\":[\"0\"],\"lhs_batch_dimensions\":[],\"rhs_batch_dimensions\":[]},\"alpha_imag\":0,\"precision_config\":{\"operand_precision\":[\"DEFAULT\",\"DEFAULT\"]},\"epilogue\":\"DEFAULT\"}}"
})",
false));
EXPECT_FALSE(CacheFileExists());
}
TEST_F(KernelCacheTest, CacheGrowsWithNewKernels) {
EXPECT_FALSE(CacheFileExists());
EXPECT_TRUE(Run(kHloText, false));
EXPECT_EQ(CacheEntryCount(), 1);
EXPECT_TRUE(Run(R"(
ENTRY e {
p = s8[] parameter(0)
ROOT _ = s8[] multiply(p, p)
})",
false));
EXPECT_EQ(CacheEntryCount(), 2);
}
class KernelCacheTestSingleThreaded : public KernelCacheTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = KernelCacheTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_force_compilation_parallelism(1);
return debug_options;
}
};
TEST_F(KernelCacheTestSingleThreaded, CacheIsGenerated) {
EXPECT_FALSE(CacheFileExists());
EXPECT_TRUE(Run(kHloText, false));
EXPECT_EQ(CacheEntryCount(), 1);
EXPECT_TRUE(Run(kHloText, false));
EXPECT_EQ(CacheEntryCount(), 1);
}
class NoKernelCacheTest : public KernelCacheTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = KernelCacheTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_llvm_module_compilation_parallelism(false);
return debug_options;
}
};
TEST_F(NoKernelCacheTest, NoCacheWithoutCompilationParallelism) {
EXPECT_TRUE(Run(kHloText, false));
EXPECT_FALSE(CacheFileExists());
}
}
}
} |
2,078 | cpp | tensorflow/tensorflow | hlo_fusion_stats | third_party/xla/xla/service/gpu/hlo_fusion_stats.cc | third_party/xla/xla/service/gpu/hlo_fusion_stats_test.cc | #ifndef XLA_SERVICE_GPU_HLO_FUSION_STATS_H_
#define XLA_SERVICE_GPU_HLO_FUSION_STATS_H_
#include <cstdint>
#include <map>
#include <set>
#include <string>
#include "absl/status/status.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
namespace xla {
namespace gpu {
class HloOpcodeHistogram : public std::map<std::set<std::string>, int64_t> {
public:
std::string ToString();
};
class HloFusionStatsVisitor : public ConstDfsHloVisitorWithDefault {
public:
absl::Status RunOnModule(HloModule* module);
std::string ToString();
protected:
absl::Status DefaultAction(const xla::HloInstruction* instr) final;
absl::Status HandleFusion(const HloInstruction* fusion) override;
private:
int64_t num_fusions_ = 0;
int64_t num_loop_fusions_ = 0;
int64_t num_input_fusions_ = 0;
HloOpcodeHistogram loop_fusion_opcode_histogram_;
HloOpcodeHistogram input_fusion_opcode_histogram_;
};
}
}
#endif
#include "xla/service/gpu/hlo_fusion_stats.h"
#include <set>
#include <string>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace gpu {
namespace {
class OpcodeCollector : public ConstDfsHloVisitorWithDefault {
public:
std::set<std::string> GetUniqueOpcodes() { return opcodes_; }
protected:
absl::Status DefaultAction(const xla::HloInstruction* instr) final {
switch (instr->opcode()) {
case HloOpcode::kConstant:
break;
case HloOpcode::kParameter:
break;
case HloOpcode::kAbs:
case HloOpcode::kCbrt:
case HloOpcode::kCeil:
case HloOpcode::kCos:
case HloOpcode::kErf:
case HloOpcode::kExp:
case HloOpcode::kExpm1:
case HloOpcode::kFloor:
case HloOpcode::kLog:
case HloOpcode::kLog1p:
case HloOpcode::kLogistic:
case HloOpcode::kNegate:
case HloOpcode::kRoundNearestAfz:
case HloOpcode::kRoundNearestEven:
case HloOpcode::kRsqrt:
case HloOpcode::kSign:
case HloOpcode::kSin:
case HloOpcode::kSqrt:
case HloOpcode::kTan:
case HloOpcode::kTanh:
case HloOpcode::kAdd:
case HloOpcode::kAtan2:
case HloOpcode::kDivide:
case HloOpcode::kMultiply:
case HloOpcode::kSubtract:
opcodes_.insert("cwise");
break;
default:
opcodes_.insert(std::string(HloOpcodeString(instr->opcode())));
}
return absl::OkStatus();
}
private:
std::set<std::string> opcodes_;
};
std::set<std::string> GetUniqueOpcodes(HloComputation* computation) {
OpcodeCollector collector;
if (!computation->Accept(&collector).ok()) {
return {};
}
return collector.GetUniqueOpcodes();
}
}
std::string HloOpcodeHistogram::ToString() {
std::string result;
for (const auto& entry : *this) {
absl::StrAppend(&result, "{", absl::StrJoin(entry.first, ", "),
"}: ", entry.second, "\n");
}
return result;
}
absl::Status HloFusionStatsVisitor::RunOnModule(HloModule* module) {
TF_RETURN_IF_ERROR(module->entry_computation()->Accept(this));
return absl::OkStatus();
}
std::string HloFusionStatsVisitor::ToString() {
return absl::StrCat("HLO Fusion Stats:\n",
"Number of fusion ops: ", num_fusions_, "\n",
"Number of kLoop fusions: ", num_loop_fusions_, "\n",
loop_fusion_opcode_histogram_.ToString(), "\n",
"Number of kInput fusions: ", num_input_fusions_, "\n",
input_fusion_opcode_histogram_.ToString());
}
absl::Status HloFusionStatsVisitor::DefaultAction(
const xla::HloInstruction* instr) {
return absl::OkStatus();
}
absl::Status HloFusionStatsVisitor::HandleFusion(const HloInstruction* fusion) {
num_fusions_++;
std::set<std::string> opcodes =
GetUniqueOpcodes(fusion->fused_instructions_computation());
if (fusion->fusion_kind() == HloInstruction::FusionKind::kLoop) {
num_loop_fusions_++;
loop_fusion_opcode_histogram_[opcodes]++;
} else if (fusion->fusion_kind() == HloInstruction::FusionKind::kInput) {
num_input_fusions_++;
input_fusion_opcode_histogram_[opcodes]++;
}
return absl::OkStatus();
}
}
} | #include "xla/service/gpu/hlo_fusion_stats.h"
#include <string>
#include <gtest/gtest.h>
#include "absl/strings/match.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace gpu {
namespace {
using HloFusionStatsTest = HloTestBase;
TEST_F(HloFusionStatsTest, LoopFusionAndReduceFusion) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
scalar_add_computation {
scalar_lhs.0 = f32[] parameter(0)
scalar_rhs.0 = f32[] parameter(1)
ROOT add.0 = f32[] add(scalar_lhs.0, scalar_rhs.0)
}
fused_select {
p1.1 = f32[32,32,32]{2,1,0} parameter(1)
c0 = f32[] constant(0)
broadcast = f32[32,32,32]{2,1,0} broadcast(f32[] c0), dimensions={}
greater-than = pred[32,32,32]{2,1,0} compare(f32[32,32,32]{2,1,0} p1.1,
f32[32,32,32]{2,1,0} broadcast), direction=GT
p0.1 = f32[32,32,32]{2,1,0} parameter(0)
ROOT select = f32[32,32,32]{2,1,0} select(pred[32,32,32]{2,1,0}
greater-than, f32[32,32,32]{2,1,0} p0.1, f32[32,32,32]{2,1,0} broadcast)
}
another_fused_select {
p1.1 = f32[32,32,32]{2,1,0} parameter(1)
c0 = f32[] constant(0)
broadcast = f32[32,32,32]{2,1,0} broadcast(f32[] c0), dimensions={}
greater-than = pred[32,32,32]{2,1,0} compare(f32[32,32,32]{2,1,0} p1.1,
f32[32,32,32]{2,1,0} broadcast), direction=GT
p0.1 = f32[32,32,32]{2,1,0} parameter(0)
ROOT select = f32[32,32,32]{2,1,0} select(pred[32,32,32]{2,1,0}
greater-than, f32[32,32,32]{2,1,0} p0.1, f32[32,32,32]{2,1,0} broadcast)
}
fused_reduce {
p0.2 = f32[32,32,32]{2,1,0} parameter(0)
c1 = f32[] constant(0)
r1 = f32[32,32]{1,0} reduce(p0.2, c1), dimensions={2},
to_apply=scalar_add_computation
mul = f32[32,32,32]{2,1,0} multiply(p0.2, p0.2)
r2 = f32[32,32]{1,0} reduce(mul, c1), dimensions={2},
to_apply=scalar_add_computation
ROOT tuple = (f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(r1, r2)
}
ENTRY reduce {
p0 = f32[32,32,32]{2,1,0} parameter(0)
p1 = f32[32,32,32]{2,1,0} parameter(1)
select = f32[32,32,32]{2,1,0} fusion(p0, p1), kind=kLoop, calls=fused_select
select_2 = f32[32,32,32]{2,1,0} fusion(p0, p1), kind=kLoop, calls=another_fused_select
fusion = (f32[32,32]{1,0}, f32[32,32]{1,0}) fusion(select), kind=kInput,
calls=fused_reduce
gte0 = f32[32,32]{1,0} get-tuple-element(fusion), index=0
gte1 = f32[32,32]{1,0} get-tuple-element(fusion), index=1
ROOT root = (f32[32,32]{1,0}, f32[32,32]{1,0}, f32[32,32,32]{2,1,0}, f32[32,32,32]{2,1,0})
tuple(gte1, gte1, select, select_2)
})")
.value();
HloFusionStatsVisitor fusion_stats_visitor;
TF_ASSERT_OK(
module.get()->entry_computation()->Accept(&fusion_stats_visitor));
SCOPED_TRACE(module->ToString());
std::string stats = fusion_stats_visitor.ToString();
ASSERT_TRUE(absl::StrContains(stats, "Number of fusion ops: 3"));
ASSERT_TRUE(absl::StrContains(stats, "Number of kLoop fusions: 2"));
ASSERT_TRUE(absl::StrContains(stats, "{broadcast, compare, select}: 2"));
ASSERT_TRUE(absl::StrContains(stats, "Number of kInput fusions: 1"));
ASSERT_TRUE(absl::StrContains(stats, "{cwise, reduce, tuple}: 1"));
}
TEST_F(HloFusionStatsTest, AggregateCwiseOps) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
fused_computation {
p0.1 = f32[8,1,5,16,1,2]{5,4,3,2,1,0} parameter(0)
mul = f32[8,1,5,16,1,2]{5,4,3,2,1,0} multiply(p0.1, p0.1)
ROOT exp = f32[8,1,5,16,1,2]{5,4,3,2,1,0} exponential(mul)
}
ENTRY entry {
p0 = f32[8,1,5,16,1,2]{5,4,3,2,1,0} parameter(0)
ROOT fusion = f32[8,1,5,16,1,2]{5,4,3,2,1,0} fusion(p0), kind=kLoop,
calls=fused_computation
})")
.value();
HloFusionStatsVisitor fusion_stats_visitor;
TF_ASSERT_OK(
module.get()->entry_computation()->Accept(&fusion_stats_visitor));
SCOPED_TRACE(module->ToString());
std::string stats = fusion_stats_visitor.ToString();
ASSERT_TRUE(absl::StrContains(stats, "{cwise}: 1")) << stats;
}
}
}
} |
2,079 | cpp | tensorflow/tensorflow | hlo_algorithm_denylist | third_party/xla/xla/service/gpu/hlo_algorithm_denylist.cc | third_party/xla/xla/service/gpu/hlo_algorithm_denylist_test.cc | #ifndef XLA_SERVICE_GPU_HLO_ALGORITHM_DENYLIST_H_
#define XLA_SERVICE_GPU_HLO_ALGORITHM_DENYLIST_H_
#include <string>
#include <vector>
#include "xla/autotuning.pb.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/stream_executor/dnn.h"
namespace xla {
namespace gpu {
std::vector<stream_executor::dnn::AlgorithmDesc> GetDisabledConvAlgorithms(
ComputeCapability cc, CudnnVersion cudnn_version,
const std::string& blas_version, const std::string& hlo);
std::string HloStringWithGpuBackendConfig(const std::string& hlo,
GpuBackendConfig config);
}
}
#endif
#include "xla/service/gpu/hlo_algorithm_denylist.h"
#include <optional>
#include <string>
#include <tuple>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/backend_config.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/gpu_autotuning.pb.h"
#include "xla/stream_executor/dnn.h"
#include "tsl/platform/env.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status.h"
namespace xla {
namespace gpu {
constexpr char kDefaultDenylist[] = R"pb(
entries {
hlo: "(f32[512,512,7,7]{3,2,1,0}, u8[0]{0}) custom-call(f32[512,512,7,7]{3,2,1,0}, f32[512,512,3,3]{3,2,1,0}, f32[512]{0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target=\"__cudnn$convBiasActivationForward\""
backend_config {
operation_queue_id: 0
wait_on_operation_queues: []
cudnn_conv_backend_config: {
activation_mode: kNone
conv_result_scale: 1
side_input_scale: 0
leakyrelu_alpha: 0
},
force_earliest_schedule: false
}
cc { major: 7 }
cudnn_version { major: 9 }
algos { id: 14 }
}
entries {
hlo: "(f32[512,512,7,7]{3,2,1,0}, u8[0]{0}) custom-call(f32[512,512,7,7]{3,2,1,0}, f32[512,512,3,3]{3,2,1,0}, f32[512]{0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target=\"__cudnn$convBiasActivationForward\""
backend_config {
operation_queue_id: 0
wait_on_operation_queues: []
cudnn_conv_backend_config: {
activation_mode: kNone
conv_result_scale: 1
side_input_scale: 0
leakyrelu_alpha: 0
},
force_earliest_schedule: false
}
cc { major: 7 }
cudnn_version { major: 9 minor: 1 patch: 1 }
algos { id: 14 }
}
entries {
hlo: "(f32[27,256,32,32]{3,2,1,0}, u8[0]{0}) custom-call(f32[27,256,32,32]{3,2,1,0}, f32[256,256,3,3]{3,2,1,0}, f32[256]{0}, f32[27,256,32,32]{3,2,1,0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target=\"__cudnn$convBiasActivationForward\""
backend_config {
operation_queue_id: 0
wait_on_operation_queues: []
cudnn_conv_backend_config: {
activation_mode: kNone
conv_result_scale: 1
side_input_scale: 1,
leakyrelu_alpha: 0
},
force_earliest_schedule: false
}
cc { major: 7 }
cudnn_version { major: 9 }
algos { id: 14 }
}
entries {
hlo: "(f32[27,256,32,32]{3,2,1,0}, u8[0]{0}) custom-call(f32[27,256,32,32]{3,2,1,0}, f32[256,256,3,3]{3,2,1,0}, f32[256]{0}, f32[27,256,32,32]{3,2,1,0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target=\"__cudnn$convBiasActivationForward\""
backend_config {
operation_queue_id: 0
wait_on_operation_queues: []
cudnn_conv_backend_config: {
activation_mode: kNone
conv_result_scale: 1
side_input_scale: 1
leakyrelu_alpha: 0
},
force_earliest_schedule: false
}
cc { major: 7 minor: 5 }
cudnn_version { major: 9 }
algos { id: 14 }
}
entries {
hlo: "(f32[27,256,32,32]{3,2,1,0}, u8[0]{0}) custom-call(f32[27,256,32,32]{3,2,1,0}, f32[256,256,3,3]{3,2,1,0}, f32[256]{0}, f32[27,256,32,32]{3,2,1,0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target=\"__cudnn$convBiasActivationForward\""
backend_config {
operation_queue_id: 0
wait_on_operation_queues: []
cudnn_conv_backend_config: {
activation_mode: kNone
conv_result_scale: 1
side_input_scale: 1
leakyrelu_alpha: 0
},
force_earliest_schedule: false
}
cc { major: 7 }
cudnn_version { major: 9 minor: 1 patch: 1 }
algos { id: 14 }
}
entries {
hlo: "(f32[27,256,32,32]{3,2,1,0}, u8[0]{0}) custom-call(f32[27,256,32,32]{3,2,1,0}, f32[256,256,3,3]{3,2,1,0}, f32[256]{0}, f32[27,256,32,32]{3,2,1,0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target=\"__cudnn$convBiasActivationForward\""
backend_config {
operation_queue_id: 0
wait_on_operation_queues: []
cudnn_conv_backend_config: {
activation_mode: kNone
conv_result_scale: 1
side_input_scale: 1
leakyrelu_alpha: 0
},
force_earliest_schedule: false
}
cc { major: 7 minor: 5 }
cudnn_version { major: 9 minor: 1 patch: 1 }
algos { id: 14 }
}
)pb";
std::vector<stream_executor::dnn::AlgorithmDesc> GetDisabledConvAlgorithms(
ComputeCapability cc, CudnnVersion cudnn_version,
const std::string& blas_version, const std::string& hlo) {
using MapType = absl::flat_hash_map<
std::tuple<std::string, int, int, int, int, int, std::string>,
std::vector<stream_executor::dnn::AlgorithmDesc>>;
static MapType* denylist = [] {
auto* list = new MapType();
AlgorithmDenylist proto;
auto process_denylist = [list](const AlgorithmDenylist& proto) {
for (const auto& entry : proto.entries()) {
for (const auto& algo : entry.algos()) {
(*list)[std::make_tuple(HloStringWithGpuBackendConfig(
entry.hlo(), entry.backend_config()),
entry.cc().major(), entry.cc().minor(),
entry.cudnn_version().major(),
entry.cudnn_version().minor(),
entry.cudnn_version().patch(),
entry.blas_version())]
.emplace_back(algo.id(), algo.tensor_ops(), std::nullopt);
}
}
};
std::string file_path =
GetDebugOptionsFromFlags().xla_gpu_algorithm_denylist_path();
if (!file_path.empty()) {
TF_CHECK_OK(tsl::ReadTextProto(tsl::Env::Default(), file_path, &proto));
process_denylist(proto);
}
CHECK(tsl::protobuf::TextFormat::ParseFromString(
std::string(kDefaultDenylist), &proto));
process_denylist(proto);
return list;
}();
std::vector<stream_executor::dnn::AlgorithmDesc> algorithms;
auto add_matching_disabled_algorithms_to_result = [&](const auto& key) {
auto iter = denylist->find(key);
if (iter != denylist->end()) {
algorithms.insert(algorithms.end(), iter->second.begin(),
iter->second.end());
}
};
auto key = std::make_tuple(hlo, cc.major(), cc.minor(), cudnn_version.major(),
cudnn_version.minor(), cudnn_version.patch(),
blas_version);
add_matching_disabled_algorithms_to_result(key);
std::get<6>(key) = std::string{};
add_matching_disabled_algorithms_to_result(key);
return algorithms;
}
std::string HloStringWithGpuBackendConfig(const std::string& hlo,
GpuBackendConfig config) {
BackendConfigWrapper backend_config(config);
return absl::StrCat(hlo, ", backend_config=", backend_config.GetRawString());
}
}
} | #include "xla/service/gpu/hlo_algorithm_denylist.h"
#include <cstdlib>
#include <string>
#include "absl/strings/str_cat.h"
#include "xla/stream_executor/dnn.h"
#include "xla/tests/test_utils.h"
#include "tsl/platform/env.h"
#include "tsl/platform/path.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
class DenylistTest : public testing::Test {
protected:
DenylistTest() {
std::string existing_xla_flags;
const char* env = std::getenv("XLA_FLAGS");
if (env != nullptr) {
existing_xla_flags = absl::StrCat(env, " ");
}
tsl::setenv(
"XLA_FLAGS",
absl::StrCat(
existing_xla_flags, "--xla_gpu_algorithm_denylist_path=",
tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "service", "gpu",
"data", "hlo_algorithm_denylist.pbtxt"))
.data(),
1);
config_ =
ParseTextProto<GpuBackendConfig>(
"operation_queue_id: 0 wait_on_operation_queues: [] "
"cudnn_conv_backend_config: { activation_mode: kNone "
"conv_result_scale: 1 side_input_scale: 0 leakyrelu_alpha: 0} "
"force_earliest_schedule: false")
.value();
}
GpuBackendConfig config_;
};
TEST_F(DenylistTest, DefaultTest) {
ComputeCapability cc;
cc.set_major(7);
cc.set_minor(0);
CudnnVersion cudnn_version;
cudnn_version.set_major(7);
cudnn_version.set_minor(6);
cudnn_version.set_patch(2);
auto list = GetDisabledConvAlgorithms(
cc, cudnn_version, "9000",
HloStringWithGpuBackendConfig(
R"((f16[256,112,112,64]{3,2,1,0}, u8[0]{0}) custom-call(f16[256,224,224,4]{3,2,1,0}, f16[7,7,4,64]{2,1,0,3}), window={size=7x7 stride=2x2 pad=3_3x3_3}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward")",
config_));
EXPECT_THAT(list, testing::UnorderedElementsAre(
stream_executor::dnn::AlgorithmDesc{0, true},
stream_executor::dnn::AlgorithmDesc{0, false},
stream_executor::dnn::AlgorithmDesc{1, true},
stream_executor::dnn::AlgorithmDesc{1, false},
stream_executor::dnn::AlgorithmDesc{42, true},
stream_executor::dnn::AlgorithmDesc{42, false}));
}
TEST_F(DenylistTest, NegativeTest) {
ComputeCapability cc;
cc.set_major(7);
cc.set_minor(0);
CudnnVersion cudnn_version;
cudnn_version.set_major(7);
cudnn_version.set_minor(6);
cudnn_version.set_minor(2);
auto list =
GetDisabledConvAlgorithms(cc, cudnn_version, "9000", R"(invalid hlo)");
EXPECT_THAT(list, testing::IsEmpty());
}
TEST_F(DenylistTest, NoBlasVersionSet) {
ComputeCapability cc;
cc.set_major(7);
cc.set_minor(0);
CudnnVersion cudnn_version;
cudnn_version.set_major(7);
cudnn_version.set_minor(6);
cudnn_version.set_patch(2);
auto list = GetDisabledConvAlgorithms(
cc, cudnn_version, "120301",
HloStringWithGpuBackendConfig(
R"((f16[256,112,112,64]{3,2,1,0}, u8[0]{0}) custom-call(f16[256,224,224,4]{3,2,1,0}, f16[7,7,4,64]{2,1,0,3}), window={size=7x7 stride=2x2 pad=3_3x3_3}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward")",
config_));
EXPECT_THAT(list, testing::UnorderedElementsAre(
stream_executor::dnn::AlgorithmDesc{42, true},
stream_executor::dnn::AlgorithmDesc{42, false}));
}
TEST_F(DenylistTest, EntryFromHardcodedList) {
ComputeCapability cc;
cc.set_major(7);
cc.set_minor(0);
CudnnVersion cudnn_version;
cudnn_version.set_major(9);
cudnn_version.set_minor(0);
cudnn_version.set_patch(0);
auto list = GetDisabledConvAlgorithms(
cc, cudnn_version, "9000",
HloStringWithGpuBackendConfig(
R"((f32[512,512,7,7]{3,2,1,0}, u8[0]{0}) custom-call(f32[512,512,7,7]{3,2,1,0}, f32[512,512,3,3]{3,2,1,0}, f32[512]{0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target="__cudnn$convBiasActivationForward")",
config_));
EXPECT_THAT(list, testing::ElementsAre(
stream_executor::dnn::AlgorithmDesc{14, false}));
}
}
}
} |
2,080 | cpp | tensorflow/tensorflow | reduction_utils | third_party/xla/xla/service/gpu/reduction_utils.cc | third_party/xla/xla/service/gpu/reduction_utils_test.cc | #ifndef XLA_SERVICE_GPU_REDUCTION_UTILS_H_
#define XLA_SERVICE_GPU_REDUCTION_UTILS_H_
#include <cstdint>
#include <ostream>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/hlo_module_config.h"
#include "xla/util.h"
namespace xla {
namespace gpu {
int64_t MinThreadsXRowReduction(const HloModuleConfig& hlo_module_config);
inline constexpr int64_t BatchedReductionRaceFreeBound() { return 8; }
struct ReductionDimensions {
constexpr static int kRowMajorReducedDimension = 0;
constexpr static int kRowKeptDimension = 1;
constexpr static int kRowMinorReducedDimension = 2;
constexpr static int kColMajorKeptDimension = 0;
constexpr static int kColReducedDimension = 1;
constexpr static int kColMinorKeptDimension = 2;
bool is_row_reduction;
Vector3 dimensions;
bool operator==(const ReductionDimensions& other) const {
return is_row_reduction == other.is_row_reduction &&
dimensions == other.dimensions;
}
};
std::ostream& operator<<(std::ostream& os,
const ReductionDimensions& reduction_dimensions);
bool IsUnnestedReductionFasterThanElemental(
const ReductionDimensions& reduction_dimensions);
bool IsReductionFromOrToContiguousDimensions(const HloInstruction& reduce);
ReductionDimensions GetReductionKindAndContiguousComponents(
const HloInstruction& reduce);
Vector3 GetReductionTiling(const ReductionDimensions& reduction_dimensions);
int64_t ReductionDimensionRaceFreeBound(
const HloModuleConfig& hlo_module_config,
const ReductionDimensions& reduction_dimensions);
bool ReductionIsRaceFree(const HloModuleConfig& hlo_module_config,
const ReductionDimensions& reduction_dimensions);
bool IsRealReductionHero(const HloInstruction& root,
const HloInstruction& hero);
bool AreReductionsMultiOutputFusionCompatible(
const HloInstruction* reduce_hero, const HloInstruction* first_reduce);
}
}
#endif
#include "xla/service/gpu/reduction_utils.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <ostream>
#include "absl/algorithm/container.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/logging.h"
#ifdef GOOGLE_CUDA
#include "xla/service/gpu/gpu_asm_opts_util.h"
#include "xla/stream_executor/cuda/cuda_asm_compiler.h"
#endif
namespace xla {
namespace gpu {
namespace {
Vector3 PartitionShapeByMiddleDimensions(
const Shape& shape, absl::Span<const int64_t> dims_middle) {
CHECK(LayoutUtil::AreDimensionsConsecutive(shape.layout(), dims_middle));
Vector3 values = {1, 1, 1};
enum Segment { kMajor = 0, kMiddle = 1, kMinor = 2 };
Segment cur_segment = kMinor;
for (int64_t cur_dim : LayoutUtil::MinorToMajor(shape)) {
if (cur_segment != kMajor) {
bool cur_dim_in_middle = absl::c_linear_search(dims_middle, cur_dim);
if (cur_segment == kMinor) {
if (cur_dim_in_middle) {
cur_segment = kMiddle;
}
} else if (cur_segment == kMiddle) {
if (!cur_dim_in_middle) {
cur_segment = kMajor;
}
}
}
values[cur_segment] *= shape.dimensions(cur_dim);
}
return values;
}
}
int64_t MinThreadsXRowReduction(const HloModuleConfig& hlo_module_config) {
#ifdef GOOGLE_CUDA
auto ptxas_config =
PtxOptsFromDebugOptions(hlo_module_config.debug_options());
auto ptxas_version_tuple =
se::GetAsmCompilerVersion(ptxas_config.preferred_cuda_dir);
if (!ptxas_version_tuple.ok() ||
ptxas_version_tuple.value() < std::array<int64_t, 3>{12, 2, 0}) {
return 512;
}
#endif
return 1024;
}
Vector3 GetReductionTiling(const ReductionDimensions& reduction_dimensions) {
if (reduction_dimensions.is_row_reduction) {
int64_t tile_z = std::min(reduction_dimensions.dimensions[0],
BatchedReductionRaceFreeBound());
return {tile_z, 1, 16};
}
return {1, 128, 1};
}
int64_t ReductionDimensionRaceFreeBound(
const HloModuleConfig& hlo_module_config,
const ReductionDimensions& reduction_dimensions) {
Vector3 reduction_tiling = GetReductionTiling(reduction_dimensions);
if (reduction_dimensions.is_row_reduction) {
return MinThreadsXRowReduction(hlo_module_config) * reduction_tiling[2];
}
return WarpSize() * reduction_tiling[1];
}
bool IsUnnestedReductionFasterThanElemental(
const ReductionDimensions& reduction_dimensions) {
if (reduction_dimensions.is_row_reduction) {
return (reduction_dimensions.dimensions[2] >= WarpSize()) ||
((WarpSize() % reduction_dimensions.dimensions[2]) == 0);
}
int64_t major_size = reduction_dimensions.dimensions[1];
int64_t minor_size = reduction_dimensions.dimensions[2];
bool prefer_elemental_emitter =
(major_size < WarpSize()) ||
(major_size < 2 * WarpSize() && minor_size < WarpSize()) ||
(major_size < 4 * WarpSize() && minor_size < 8) ||
(major_size < 8 * WarpSize() && minor_size < 3);
return !prefer_elemental_emitter;
}
bool IsReductionFromOrToContiguousDimensions(const HloInstruction& reduce) {
if (reduce.opcode() != HloOpcode::kReduce) {
return false;
}
const Shape& operand_shape = reduce.operand(0)->shape();
absl::Span<const int64_t> dims_to_reduce = reduce.dimensions();
DimensionVector dims_to_keep;
for (int64_t dim = 0; dim < operand_shape.dimensions().size(); ++dim) {
if (!absl::c_linear_search(dims_to_reduce, dim)) {
dims_to_keep.push_back(dim);
}
}
return (LayoutUtil::AreDimensionsConsecutive(operand_shape.layout(),
dims_to_keep) ||
LayoutUtil::AreDimensionsConsecutive(operand_shape.layout(),
dims_to_reduce)) &&
IsUnnestedReductionFasterThanElemental(
GetReductionKindAndContiguousComponents(reduce));
}
bool ReductionIsRaceFree(const HloModuleConfig& hlo_module_config,
const ReductionDimensions& reduction_dimensions) {
if (reduction_dimensions.is_row_reduction) {
return reduction_dimensions.dimensions[2] <=
ReductionDimensionRaceFreeBound(hlo_module_config,
reduction_dimensions) &&
reduction_dimensions.dimensions[0] <=
BatchedReductionRaceFreeBound();
}
return reduction_dimensions.dimensions[1] <=
ReductionDimensionRaceFreeBound(hlo_module_config,
reduction_dimensions);
}
std::ostream& operator<<(std::ostream& os,
const ReductionDimensions& reduction_dimensions) {
bool is_row_reduction = reduction_dimensions.is_row_reduction;
os << (is_row_reduction ? "row " : "column ") << "reduction ["
<< absl::StrJoin(reduction_dimensions.dimensions, ",") << "] -> ["
<< reduction_dimensions.dimensions[0] << ", "
<< reduction_dimensions
.dimensions[is_row_reduction
? ReductionDimensions::kRowKeptDimension
: ReductionDimensions::kColMinorKeptDimension]
<< "]";
return os;
}
ReductionDimensions GetReductionKindAndContiguousComponents(
const HloInstruction& reduce) {
Shape input_shape = reduce.operand(0)->shape();
absl::Span<const int64_t> dims_to_reduce = reduce.dimensions();
DimensionVector dims_to_keep;
for (int64_t dim = 0; dim < input_shape.rank(); ++dim) {
if (!absl::c_linear_search(dims_to_reduce, dim)) {
dims_to_keep.push_back(dim);
}
}
if (dims_to_keep.empty()) {
return {true,
{1, 1, ShapeUtil::ElementsIn(input_shape)}};
}
if (LayoutUtil::AreDimensionsConsecutive(input_shape.layout(),
dims_to_keep)) {
Vector3 shape_partition =
PartitionShapeByMiddleDimensions(input_shape, dims_to_keep);
if (shape_partition[1] == 1) {
return {true,
{1, 1, shape_partition[0] * shape_partition[2]}};
}
if (shape_partition[2] == 1) {
return {false,
{1, shape_partition[0], shape_partition[1]}};
}
return {true, shape_partition};
}
Vector3 shape_partition =
PartitionShapeByMiddleDimensions(input_shape, dims_to_reduce);
if (shape_partition[2] == 1) {
return {true,
{1, shape_partition[0], shape_partition[1]}};
}
return {false, shape_partition};
}
bool IsRealReductionHero(const HloInstruction& root,
const HloInstruction& hero) {
if (!IsReductionFromOrToContiguousDimensions(hero)) {
return false;
}
return &root == &hero ||
ReductionIsRaceFree(hero.GetModule()->config(),
GetReductionKindAndContiguousComponents(hero));
}
bool AreReductionsMultiOutputFusionCompatible(
const HloInstruction* reduce_hero, const HloInstruction* first_reduce) {
return GetReductionKindAndContiguousComponents(*reduce_hero) ==
GetReductionKindAndContiguousComponents(*first_reduce);
}
}
} | #include "xla/service/gpu/reduction_utils.h"
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
namespace {
using ReductionUtilsTest = HloTestBase;
const char kModulePrefix[] = R"(
HloModule test_module
scalar_add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
})";
TEST_F(ReductionUtilsTest, ReductionsAreMultioutputFusionCompatible) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_sibling1 {
p_0 = f32[32,64]{1,0} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(p_0, constant), dimensions={1}, to_apply=scalar_add
}
fused_sibling2 {
p_0 = f32[32,64]{1,0} parameter(0)
neg = f32[32,64]{1,0} negate(p_0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(neg, constant), dimensions={1}, to_apply=scalar_add
}
ENTRY entry {
p_0 = f32[32,64]{1,0} parameter(0)
fusion1 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling1
fusion2 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling2
ROOT root = (f32[32]{0}, f32[32]{0}) tuple(fusion1, fusion2)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion1 = root->operand(0);
const HloInstruction* fusion2 = root->operand(1);
EXPECT_TRUE(AreReductionsMultiOutputFusionCompatible(
fusion1->fused_expression_root(), fusion2->fused_expression_root()));
}
TEST_F(ReductionUtilsTest,
ReductionsWithSameCanonicalizedDimsAreMultioutputFusionCompatible) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_sibling1 {
p_0 = f32[32,64]{1,0} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(p_0, constant), dimensions={1}, to_apply=scalar_add
}
fused_sibling2 {
p_0 = f32[32,64]{1,0} parameter(0)
bitcast = f32[32,8,8]{2,1,0} bitcast(p_0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(bitcast, constant), dimensions={1,2}, to_apply=scalar_add
}
ENTRY entry {
p_0 = f32[32,64]{1,0} parameter(0)
fusion1 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling1
fusion2 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling2
ROOT root = (f32[32]{0}, f32[32]{0}) tuple(fusion1, fusion2)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion1 = root->operand(0);
const HloInstruction* fusion2 = root->operand(1);
EXPECT_TRUE(AreReductionsMultiOutputFusionCompatible(
fusion1->fused_expression_root(), fusion2->fused_expression_root()));
}
TEST_F(ReductionUtilsTest,
ReductionsAreNotMultioutputFusionCompatible_DifferentOperandShapes) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_sibling1 {
p_0 = f32[32,64]{1,0} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(p_0, constant), dimensions={1}, to_apply=scalar_add
}
fused_sibling2 {
p_0 = f32[64,32]{1,0} parameter(0)
neg = f32[64,32]{1,0} negate(p_0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(neg, constant), dimensions={0}, to_apply=scalar_add
}
ENTRY entry {
p_0 = f32[32,64]{1,0} parameter(0)
p_1 = f32[64,32]{1,0} parameter(1)
fusion1 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling1
fusion2 = f32[32]{0} fusion(p_1), kind=kInput, calls=fused_sibling2
ROOT root = (f32[32]{0}, f32[32]{0}) tuple(fusion1, fusion2)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion1 = root->operand(0);
const HloInstruction* fusion2 = root->operand(1);
EXPECT_FALSE(AreReductionsMultiOutputFusionCompatible(
fusion1->fused_expression_root(), fusion2->fused_expression_root()));
}
TEST_F(ReductionUtilsTest,
ReductionsAreNotMultioutputFusionCompatible_DifferentOutputShapes) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_sibling1 {
p_0 = f32[32,64]{1,0} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(p_0, constant), dimensions={1}, to_apply=scalar_add
}
fused_sibling2 {
p_0 = f32[64,32]{1,0} parameter(0)
neg = f32[64,32]{1,0} negate(p_0)
constant = f32[] constant(0)
ROOT reduce = f32[64]{0} reduce(neg, constant), dimensions={1}, to_apply=scalar_add
}
ENTRY entry {
p_0 = f32[32,64]{1,0} parameter(0)
p_1 = f32[64,32]{1,0} parameter(1)
fusion1 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling1
fusion2 = f32[64]{0} fusion(p_1), kind=kInput, calls=fused_sibling2
ROOT root = (f32[32]{0}, f32[64]{0}) tuple(fusion1, fusion2)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion1 = root->operand(0);
const HloInstruction* fusion2 = root->operand(1);
EXPECT_FALSE(AreReductionsMultiOutputFusionCompatible(
fusion1->fused_expression_root(), fusion2->fused_expression_root()));
}
TEST_F(ReductionUtilsTest,
ReductionsAreNotMultioutputFusionCompatible_DifferentReduceDimensions) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_sibling1 {
p_0 = f32[32,32]{1,0} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(p_0, constant), dimensions={0}, to_apply=scalar_add
}
fused_sibling2 {
p_0 = f32[32,32]{1,0} parameter(0)
neg = f32[32,32]{1,0} negate(p_0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(neg, constant), dimensions={1}, to_apply=scalar_add
}
ENTRY entry {
p_0 = f32[32,32]{1,0} parameter(0)
fusion1 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling1
fusion2 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling2
ROOT root = (f32[32]{0}, f32[32]{0}) tuple(fusion1, fusion2)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion1 = root->operand(0);
const HloInstruction* fusion2 = root->operand(1);
EXPECT_FALSE(AreReductionsMultiOutputFusionCompatible(
fusion1->fused_expression_root(), fusion2->fused_expression_root()));
}
}
}
} |
2,081 | cpp | tensorflow/tensorflow | gpu_p2p_pipeliner | third_party/xla/xla/service/gpu/gpu_p2p_pipeliner.cc | third_party/xla/xla/service/gpu/gpu_p2p_pipeliner_test.cc | #ifndef XLA_SERVICE_GPU_GPU_P2P_PIPELINER_H_
#define XLA_SERVICE_GPU_GPU_P2P_PIPELINER_H_
#include "xla/service/hlo_pass_pipeline.h"
namespace xla {
namespace gpu {
void AddP2PPipeliner(HloPassPipeline& pipeline);
}
}
#endif
#include "xla/service/gpu/gpu_p2p_pipeliner.h"
#include <cstdint>
#include <functional>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/collective_pipeliner.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/util.h"
namespace xla {
namespace gpu {
namespace {
bool ShouldPipeline(const HloInstruction* instr) {
if (!HloPredicateIsOp<HloOpcode::kRecvDone, HloOpcode::kSendDone>(instr)) {
return false;
}
auto it = instr->frontend_attributes().map().find(kSendRecvPipelineAttr);
if (it == instr->frontend_attributes().map().end()) {
return false;
}
auto allowed_predecessor = [&]() {
return instr->opcode() == HloOpcode::kRecvDone &&
instr->control_predecessors().size() == 1 &&
instr->control_predecessors()[0]->opcode() == HloOpcode::kSend;
};
if (!instr->control_successors().empty() ||
(!instr->control_predecessors().empty() && !allowed_predecessor())) {
return false;
}
bool is_pipelined =
(instr->user_count() == 1 && instr->parent() != nullptr &&
instr->users()[0] == instr->parent()->root_instruction());
return !is_pipelined;
}
bool ShouldAllowLoopVariantParameterInChain(const HloInstruction* instr) {
CHECK(instr->opcode() == HloOpcode::kGetTupleElement &&
instr->operand(0)->opcode() == HloOpcode::kParameter);
return true;
}
absl::Status PostprocessP2PImpl(
HloInstruction* instr,
std::function<std::string(std::vector<ReplicaGroup>&)> transformer) {
if (!HloPredicateIsOp<HloOpcode::kRecvDone, HloOpcode::kSendDone>(instr)) {
return Internal("Expected SendDone/RecvDone as the pipelined collective");
}
instr = instr->mutable_operand(0);
if (!HloPredicateIsOp<HloOpcode::kRecv, HloOpcode::kSend>(instr)) {
return Internal("Expected Send/Recv as the SendDone/RecvDone operand");
}
auto validation_it =
instr->frontend_attributes().map().find(kSendRecvValidationAttr);
if (validation_it == instr->frontend_attributes().map().end() ||
validation_it->second == "invalid") {
return absl::OkStatus();
}
auto statusor_bounds = ParseReplicaGroupsOnly(validation_it->second);
if (!statusor_bounds.ok()) {
return statusor_bounds.status();
}
std::string validation_attr = transformer(statusor_bounds.value());
xla::FrontendAttributes attributes = instr->frontend_attributes();
(*attributes.mutable_map())[kSendRecvValidationAttr] = validation_attr;
instr->set_frontend_attributes(attributes);
return absl::OkStatus();
}
absl::Status PostprocessPeeledP2P(HloInstruction* instr) {
auto transform_bounds = [&](std::vector<ReplicaGroup>& replica_groups) {
std::vector<std::pair<int64_t, int64_t>> bounds;
bounds.reserve(replica_groups.size());
bool all_invalid = true;
for (const auto& replica_group : replica_groups) {
int64_t lower_bound = replica_group.replica_ids(0);
int64_t upper_bound = replica_group.replica_ids(1);
if (lower_bound <= 0 && upper_bound >= 0) {
all_invalid = false;
bounds.push_back({0, 0});
} else {
bounds.push_back({1, 0});
}
}
std::string validation_attr;
if (all_invalid) {
validation_attr = "invalid";
} else {
validation_attr = "{" +
absl::StrJoin(bounds, ",",
absl::PairFormatter(
[](std::string* out, int64_t value) {
absl::StrAppend(out, "{", value);
},
",",
[](std::string* out, int64_t value) {
absl::StrAppend(out, value, "}");
})) +
"}";
}
return validation_attr;
};
return PostprocessP2PImpl(instr, transform_bounds);
};
absl::Status PostprocessRotatedP2P(HloInstruction* instr) {
auto transform_bounds = [&](std::vector<ReplicaGroup>& replica_groups) {
std::vector<std::pair<int64_t, int64_t>> bounds;
bounds.reserve(replica_groups.size());
bool all_invalid = true;
for (const auto& replica_group : replica_groups) {
int64_t lower_bound = replica_group.replica_ids(0);
int64_t upper_bound = replica_group.replica_ids(1);
if (lower_bound <= upper_bound) {
if (lower_bound >= 1) {
--lower_bound;
}
if (upper_bound >= 1) {
--upper_bound;
}
if (lower_bound <= upper_bound) {
all_invalid = false;
bounds.push_back({lower_bound, upper_bound});
} else {
bounds.push_back({1, 0});
}
} else {
bounds.push_back({lower_bound, upper_bound});
}
}
std::string validation_attr;
if (all_invalid) {
validation_attr = "invalid";
} else {
validation_attr = "{" +
absl::StrJoin(bounds, ",",
absl::PairFormatter(
[](std::string* out, int64_t value) {
absl::StrAppend(out, "{", value);
},
",",
[](std::string* out, int64_t value) {
absl::StrAppend(out, value, "}");
})) +
"}";
}
return validation_attr;
};
return PostprocessP2PImpl(instr, transform_bounds);
}
}
void AddP2PPipeliner(HloPassPipeline& pipeline) {
CollectivePipeliner::Config config{
0,
INT64_MAX,
true,
false,
true,
CollectivePipeliner::PipeliningDirection::kBackward,
ShouldPipeline,
HloPredicateTrue,
HloPredicateTrue,
ShouldAllowLoopVariantParameterInChain,
true,
PostprocessPeeledP2P,
PostprocessRotatedP2P};
pipeline.AddPass<CollectivePipeliner>(config);
}
}
} | #include "xla/service/gpu/gpu_p2p_pipeliner.h"
#include <cstdint>
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/hlo_verifier.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
namespace xla {
namespace gpu {
namespace {
class GpuP2PPipelinerTest : public HloTestBase {
public:
GpuP2PPipelinerTest() {
const int64_t kNumReplicas = 1;
const int64_t kNumComputations = 4;
config_ = GetModuleConfigForTest(kNumReplicas,
kNumComputations);
}
absl::StatusOr<bool> RunOptimizer(HloModule* module) {
HloPassPipeline pipeline("optimizer");
pipeline.AddPass<HloVerifier>(false,
false);
AddP2PPipeliner(pipeline);
pipeline.AddPass<HloVerifier>(false,
false);
return pipeline.Run(module);
}
protected:
HloModuleConfig config_;
};
TEST_F(GpuP2PPipelinerTest,
TransformRecvSendBackwardsWithMetaDataPostProcessing) {
const char* kHloStr = R"(
HloModule module
cond {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(param), index=0
ub = u32[] constant(10)
ROOT result = pred[] compare(count, ub), direction=LT
}
body {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(param), index=0
send-data = get-tuple-element(param), index=1
after-all.0 = token[] after-all()
recv.0 = (u32[2], u32[], token[]) recv(after-all.0), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{1,0}}",
_xla_send_recv_pipeline="0",
_xla_send_recv_validation="{{1,7}}"
}
after-all.0.s = token[] after-all()
send.0 = (u32[2], u32[], token[]) send(send-data, after-all.0.s),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{1,0}}",
_xla_send_recv_pipeline="0",
_xla_send_recv_validation="{{1,7}}"
}
recv-done.0 = (u32[2], token[]) recv-done(recv.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}, control-predecessors={send.0}
recv-data = u32[2] get-tuple-element(recv-done.0), index=0
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
r = u32[2] broadcast(c1), dimensions={}
s = u32[2] add(r, recv-data)
send-done.0 = token[] send-done(send.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
ROOT result = (u32[], u32[2]) tuple(new_count, s)
}
ENTRY test_computation {
c0 = u32[] constant(0)
c1 = u32[] constant(1)
r = u32[] replica-id()
a = u32[] add(c1, r)
init = u32[2] broadcast(a), dimensions={}
while_init = (u32[], u32[2]) tuple(c0, init)
while_result = (u32[], u32[2]) while(while_init), body=body, condition=cond
ROOT result = u32[2] get-tuple-element(while_result), index=1
})";
auto module = ParseAndReturnUnverifiedModule(kHloStr, config_).value();
EXPECT_TRUE(RunOptimizer(module.get()).value());
XLA_VLOG_LINES(10, module->ToString());
auto while_op = FindInstruction(module.get(), "while");
EXPECT_EQ(while_op->opcode(), HloOpcode::kWhile);
EXPECT_EQ(while_op->shape().tuple_shapes().size(), 5);
auto recv1 =
DynCast<HloRecvInstruction>(FindInstruction(module.get(), "recv.1"));
EXPECT_NE(recv1, nullptr);
auto recv2 =
DynCast<HloRecvInstruction>(FindInstruction(module.get(), "recv.2"));
EXPECT_NE(recv2, nullptr);
EXPECT_EQ(recv1->channel_id(), recv2->channel_id());
auto send1 =
DynCast<HloSendInstruction>(FindInstruction(module.get(), "send.1"));
EXPECT_NE(send1, nullptr);
auto send2 =
DynCast<HloSendInstruction>(FindInstruction(module.get(), "send.2"));
EXPECT_NE(send2, nullptr);
EXPECT_EQ(send1->channel_id(), send2->channel_id());
const char* kPeeledAttr = "_xla_send_recv_validation=\"invalid\"";
const char* kRotatedAttr = "_xla_send_recv_validation=\"{{0,6}}\"";
EXPECT_THAT(send1->ToString(), ::testing::HasSubstr(kPeeledAttr));
EXPECT_THAT(recv1->ToString(), ::testing::HasSubstr(kPeeledAttr));
EXPECT_THAT(send2->ToString(), ::testing::HasSubstr(kRotatedAttr));
EXPECT_THAT(recv2->ToString(), ::testing::HasSubstr(kRotatedAttr));
}
}
}
} |
2,082 | cpp | tensorflow/tensorflow | cudnn_support_utils | third_party/xla/xla/service/gpu/cudnn_support_utils.cc | third_party/xla/xla/service/gpu/cudnn_support_utils_test.cc | #ifndef XLA_SERVICE_GPU_CUDNN_SUPPORT_UTILS_H_
#define XLA_SERVICE_GPU_CUDNN_SUPPORT_UTILS_H_
#include <cstdint>
#include <vector>
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/shape.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
absl::StatusOr<bool> CudnnSupportsOptimizedIntegerConvolution(
const se::CudaComputeCapability& compute_capability,
HloCustomCallInstruction& conv, int vector_size);
struct CudnnReorderTransposeConfig {
Shape transpose_shape;
Shape result_shape;
std::vector<int64_t> permutation;
};
absl::StatusOr<CudnnReorderTransposeConfig>
CudnnInferTransposeForFilterReordering(
const Shape& shape, const ConvolutionDimensionNumbers& dimension_numbers);
absl::StatusOr<CudnnReorderTransposeConfig>
CudnnInferTransposeForBiasReordering(const Shape& shape);
inline constexpr absl::string_view kWorkspaceAllocationCustomCallTarget =
"__nop";
bool IsWorkspaceAllocationRoot(const HloInstruction& root);
}
}
#endif
#include "xla/service/gpu/cudnn_support_utils.h"
#include <cstdint>
#include <vector>
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
absl::StatusOr<bool> CudnnSupportsOptimizedIntegerConvolution(
const se::CudaComputeCapability& compute_capability,
HloCustomCallInstruction& conv, int vector_size) {
TF_ASSIGN_OR_RETURN(auto kind, GetCudnnConvKind(&conv));
const Shape& input_shape = conv.operand(0)->shape();
const Shape& kernel_shape = conv.operand(1)->shape();
const Shape& result_shape = conv.shape().tuple_shapes(0);
const auto& dnums = conv.convolution_dimension_numbers();
if (vector_size != 4 && vector_size != 32) {
VLOG(3) << "Unsupported vector size for integer convolution: "
<< vector_size;
return false;
}
if ((vector_size == 32 && !compute_capability.IsAtLeast(7, 5)) ||
!compute_capability.IsAtLeast(6, 1)) {
VLOG(3) << "Compute capability " << compute_capability.ToString()
<< " is not sufficent for int8x" << vector_size
<< " vectorization.";
return false;
}
if (kind != CudnnConvKind::kForward &&
kind != CudnnConvKind::kForwardActivation) {
VLOG(3) << "Convolution kind is not forward or foward-activation: "
<< conv.ToString();
return false;
}
if (!primitive_util::IsIntegralType(input_shape.element_type()) ||
!primitive_util::IsIntegralType(kernel_shape.element_type())) {
VLOG(3) << "Convolution does not accept integer inputs/weights: "
<< conv.ToString();
return false;
}
if (dnums.input_spatial_dimensions().size() != 2 ||
dnums.kernel_spatial_dimensions().size() != 2 ||
dnums.output_spatial_dimensions().size() != 2) {
VLOG(3) << "Convolution is not 2D: " << conv.ToString();
return false;
}
if (vector_size == 32 &&
!primitive_util::IsIntegralType(result_shape.element_type())) {
VLOG(3) << "int8x32 convolutions only support integer output: "
<< conv.ToString();
return false;
}
if (vector_size == 32) {
int64_t W = input_shape.dimensions(dnums.input_spatial_dimensions()[0]);
int64_t H = input_shape.dimensions(dnums.input_spatial_dimensions()[1]);
int64_t R = kernel_shape.dimensions(dnums.kernel_spatial_dimensions()[0]);
int64_t S = kernel_shape.dimensions(dnums.kernel_spatial_dimensions()[1]);
const int64_t dilationW = conv.window().dimensions()[0].base_dilation();
const int64_t dilationH = conv.window().dimensions()[1].base_dilation();
if ((W <= (R - 1) * dilationW) || (H <= (S - 1) * dilationH)) {
VLOG(3) << "Conv spatial filter/input dimensions are too small for "
"vecotrized int8x32 convolution: "
<< conv.ToString();
return false;
}
}
if (window_util::HasDilation(conv.window())) {
VLOG(3) << "Vectorized integer convolutions do not support dilation: "
<< conv.ToString();
return false;
}
return true;
}
absl::StatusOr<CudnnReorderTransposeConfig>
CudnnInferTransposeForFilterReordering(
const Shape& shape, const ConvolutionDimensionNumbers& dimension_numbers) {
if (shape.rank() != 4 && shape.rank() != 5) {
return Internal("Filter shape has unexpected rank.");
}
const int64_t dO = dimension_numbers.kernel_output_feature_dimension();
const int64_t dI = dimension_numbers.kernel_input_feature_dimension();
const int64_t dH = dimension_numbers.kernel_spatial_dimensions().at(0);
const int64_t dW = dimension_numbers.kernel_spatial_dimensions().at(1);
bool revectorize = shape.rank() == 5;
const int64_t dZ = revectorize ? 10 - dO - dI - dH - dW : -1;
const int64_t vsize = revectorize ? shape.dimensions(dZ) : 1;
if (shape.dimensions(dO) % 32 != 0 ||
shape.dimensions(dI) % (32 / vsize) != 0 ||
(revectorize && vsize != 4 && vsize != 32)) {
return Internal("Filter shape is not vectorizable.");
}
std::vector<int64_t> output = {
shape.dimensions(dO), shape.dimensions(dI) / (32 / vsize),
shape.dimensions(dH), shape.dimensions(dW), 32};
Shape output_shape = ShapeUtil::MakeShape(shape.element_type(), output);
auto calc_index = [&](int dim) {
bool split_v = vsize == 32;
return (revectorize
? (dI < dim ? 2 - split_v : 0) + (dZ < dim ? 1 + split_v : 0)
: (dI < dim ? 3 : 0)) +
(dO < dim ? 3 : 0) + (dH < dim) + (dW < dim);
};
int idx_O = calc_index(dO);
int idx_I = calc_index(dI);
int idx_H = calc_index(dH);
int idx_W = calc_index(dW);
int idx_Y = vsize == 32 ? calc_index(dZ) : idx_I + 1;
int idx_Z = vsize == 4 ? calc_index(dZ) : vsize == 32 ? idx_Y + 1 : idx_I + 2;
std::vector<int64_t> dims(8);
dims[idx_O] = shape.dimensions(dO) / 8;
dims[idx_O + 1] = 4;
dims[idx_O + 2] = 2;
dims[idx_I] = shape.dimensions(dI) / (32 / vsize);
dims[idx_Y] = 8;
dims[idx_Z] = 4;
dims[idx_H] = shape.dimensions(dH);
dims[idx_W] = shape.dimensions(dW);
Shape split_shape = ShapeUtil::MakeShape(shape.element_type(), dims);
std::vector<int64_t> permutation = {idx_I, idx_H, idx_W, idx_O,
idx_O + 2, idx_Y, idx_O + 1, idx_Z};
return CudnnReorderTransposeConfig{split_shape, output_shape, permutation};
}
absl::StatusOr<CudnnReorderTransposeConfig>
CudnnInferTransposeForBiasReordering(const Shape& shape) {
if (shape.rank() != 1) {
return Internal("Bias shape has unexpected rank.");
}
if (shape.dimensions(0) % 32 != 0) {
return Internal("Bias shape is not vectorizable.");
}
std::vector<int64_t> dims = {shape.dimensions(0) / 32, 4, 2, 4};
Shape split_shape = ShapeUtil::MakeShape(shape.element_type(), dims);
std::vector<int64_t> permutation = {0, 2, 1, 3};
return CudnnReorderTransposeConfig{split_shape, shape, permutation};
}
bool IsWorkspaceAllocationRoot(const HloInstruction& root) {
return root.IsRoot() && root.opcode() == HloOpcode::kTuple &&
root.operand_count() == 2 &&
root.operand(1)->IsCustomCall(kWorkspaceAllocationCustomCallTarget) &&
root.operand(1)->operand_count() == 0;
}
}
} | #include "xla/service/gpu/cudnn_support_utils.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::tsl::testing::IsOkAndHolds;
class CudnnSupportUtilsTest : public HloTestBase {
public:
absl::StatusOr<HloCustomCallInstruction*> GetCustomCall(
xla::VerifiedHloModule* module, absl::string_view target) {
HloCustomCallInstruction* call = nullptr;
for (HloComputation* comp : module->MakeNonfusionComputations()) {
for (HloInstruction* inst : comp->instructions()) {
if (inst->IsCustomCall(target)) {
VLOG(1) << inst->ToString();
if (call != nullptr) {
return tsl::errors::FailedPrecondition(
"Found more than one custom call.");
}
call = Cast<HloCustomCallInstruction>(inst);
}
}
}
if (call == nullptr) {
return tsl::errors::FailedPrecondition(
"Did not find any matching custom call.");
}
return call;
}
};
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedIntegerConvolutionCheckVectorSize) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[8,10,10,128] parameter(0)
filter = s8[2,2,128,128] parameter(1)
ROOT result = (s8[8,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(conv,
GetCustomCall(module.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 7),
IsOkAndHolds(false));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 1),
IsOkAndHolds(false));
}
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedIntegerConvolutionCheckComputeCapability) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[8,10,10,128] parameter(0)
filter = s8[2,2,128,128] parameter(1)
ROOT result = (s8[8,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(conv,
GetCustomCall(module.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({6, 0}, *conv, 4),
IsOkAndHolds(false));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({6, 1}, *conv, 4),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 4}, *conv, 32),
IsOkAndHolds(false));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(true));
}
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedIntegerConvolutionCheckKind) {
auto moduleFwd = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,10,10,64] parameter(0)
filter = s8[2,2,64,128] parameter(1)
ROOT result = (s8[32,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(
conv, GetCustomCall(moduleFwd.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(true));
auto moduleBwdFilter = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = f16[10,20,30,41] parameter(0)
output = f16[10,20,30,40] parameter(1)
result = (f16[2,2,41,40], u8[0]) custom-call(input, output),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convBackwardFilter"
ROOT gte = f16[2,2,41,40] get-tuple-element(result), index=0
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(
conv, GetCustomCall(moduleBwdFilter.get(), "__cudnn$convBackwardFilter"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
auto moduleBwdInput = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
output = f16[10,20,30,40] parameter(0)
filter = f16[2,2,41,40] parameter(1)
result = (f16[10,20,30,41], u8[0]) custom-call(output, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convBackwardInput"
ROOT gte = f16[10,20,30,41] get-tuple-element(result), index=0
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(
conv, GetCustomCall(moduleBwdInput.get(), "__cudnn$convBackwardInput"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
}
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedVectorizedIntegerConvolutionCheckTypes) {
auto moduleS8InOut = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,10,10,64] parameter(0)
filter = s8[2,2,64,128] parameter(1)
ROOT result = (s8[32,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(
conv, GetCustomCall(moduleS8InOut.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(true));
auto moduleS8InF32Out = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,10,10,64] parameter(0)
filter = s8[2,2,64,128] parameter(1)
ROOT result = (f32[32,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(
conv, GetCustomCall(moduleS8InF32Out.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
auto moduleF32InF32Out = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = f32[32,10,10,64] parameter(0)
filter = f32[2,2,64,128] parameter(1)
ROOT result = (f32[32,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(
conv, GetCustomCall(moduleF32InF32Out.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(false));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
}
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedVectorizedIntegerConvolutionCheckDims) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,10,10,10,64] parameter(0)
filter = s8[2,2,2,64,128] parameter(1)
ROOT result = (s8[32,10,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b012f_012io->b012f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(conv,
GetCustomCall(module.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(false));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
}
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedVectorizedIntegerConvolutionCheckDilation) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,10,10,64] parameter(0)
filter = s8[2,2,64,128] parameter(1)
ROOT result = (s8[32,20,20,128], u8[0]) custom-call(input, filter),
window={size=2x2 rhs_dilate=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(conv,
GetCustomCall(module.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(false));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
}
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedVectorizedIntegerConvolutionCheckAlgo1Dims) {
auto moduleFilterCoversInput = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,2,2,64] parameter(0)
filter = s8[3,3,64,128] parameter(1)
ROOT result = (s8[32,2,2,128], u8[0]) custom-call(input, filter),
window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(conv, GetCustomCall(moduleFilterCoversInput.get(),
"__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
auto moduleFilterAlmostCoversInput = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,3,3,64] parameter(0)
filter = s8[3,3,64,128] parameter(1)
ROOT result = (s8[32,3,3,128], u8[0]) custom-call(input, filter),
window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(conv,
GetCustomCall(moduleFilterAlmostCoversInput.get(),
"__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(true));
}
class ReorderFilterRank4Test : public ::testing::TestWithParam<std::string> {};
TEST_P(ReorderFilterRank4Test, InferTransposeRank4) {
auto input_dims = GetParam();
size_t dI = input_dims.find('i');
size_t dO = input_dims.find('o');
size_t dH = input_dims.find('0');
size_t dW = input_dims.find('1');
ConvolutionDimensionNumbers dnums;
dnums.set_kernel_input_feature_dimension(dI);
dnums.set_kernel_output_feature_dimension(dO);
dnums.add_kernel_spatial_dimensions(dH);
dnums.add_kernel_spatial_dimensions(dW);
int64_t shape_dims[4] = {0, 0, 0, 0};
shape_dims[dI] = 224;
shape_dims[dO] = 96;
shape_dims[dH] = 5;
shape_dims[dW] = 3;
Shape shape = ShapeUtil::MakeShape(U8, absl::MakeSpan(shape_dims));
auto input = HloInstruction::CreateParameter(0, shape, "input");
auto filter = HloInstruction::CreateParameter(1, shape, "filter");
TF_ASSERT_OK_AND_ASSIGN(CudnnReorderTransposeConfig inferred_config,
CudnnInferTransposeForFilterReordering(shape, dnums));
EXPECT_THAT(inferred_config.result_shape.dimensions(),
::testing::ElementsAre(96, 7, 5, 3, 32));
Shape reshaped = ShapeUtil::PermuteDimensions(
inferred_config.permutation, inferred_config.transpose_shape);
EXPECT_THAT(reshaped.dimensions(),
::testing::ElementsAre(7, 5, 3, 12, 2, 8, 4, 4));
EXPECT_EQ(inferred_config.permutation[6], inferred_config.permutation[4] - 1);
EXPECT_EQ(inferred_config.permutation[7], inferred_config.permutation[5] + 1);
}
std::vector<std::string> GeneratePermutations(std::string input_dims) {
std::sort(input_dims.begin(), input_dims.end());
std::vector<std::string> permutations;
do {
permutations.push_back(input_dims);
} while (std::next_permutation(input_dims.begin(), input_dims.end()));
return permutations;
}
INSTANTIATE_TEST_SUITE_P(ReorderTestSuite, ReorderFilterRank4Test,
::testing::ValuesIn(GeneratePermutations("01io")));
class ReorderFilterRank5Test
: public ::testing::TestWithParam<std::tuple<std::string, int>> {};
TEST_P(ReorderFilterRank5Test, InferTransposeRank5) {
auto [input_dims, vsize] = GetParam();
size_t dI = input_dims.find('i');
size_t dO = input_dims.find('o');
size_t dH = input_dims.find('0');
size_t dW = input_dims.find('1');
ConvolutionDimensionNumbers dnums;
dnums.set_kernel_input_feature_dimension(dI);
dnums.set_kernel_output_feature_dimension(dO);
dnums.add_kernel_spatial_dimensions(dH);
dnums.add_kernel_spatial_dimensions(dW);
int64_t shape_dims[5] = {vsize, vsize, vsize, vsize, vsize};
shape_dims[dI] = 224 / vsize;
shape_dims[dO] = 96;
shape_dims[dH] = 5;
shape_dims[dW] = 3;
Shape shape = ShapeUtil::MakeShape(U8, absl::MakeSpan(shape_dims));
auto input = HloInstruction::CreateParameter(0, shape, "input");
auto filter = HloInstruction::CreateParameter(1, shape, "filter");
TF_ASSERT_OK_AND_ASSIGN(CudnnReorderTransposeConfig inferred_config,
CudnnInferTransposeForFilterReordering(shape, dnums));
EXPECT_THAT(inferred_config.result_shape.dimensions(),
::testing::ElementsAre(96, 7, 5, 3, 32));
Shape reshaped = ShapeUtil::PermuteDimensions(
inferred_config.permutation, inferred_config.transpose_shape);
EXPECT_THAT(reshaped.dimensions(),
::testing::ElementsAre(7, 5, 3, 12, 2, 8, 4, 4));
EXPECT_EQ(inferred_config.permutation[6], inferred_config.permutation[4] - 1);
}
INSTANTIATE_TEST_SUITE_P(
ReorderTestSuite, ReorderFilterRank5Test,
::testing::Combine(::testing::ValuesIn(GeneratePermutations("01?io")),
::testing::Values(4, 32)));
class ReorderBiasTest : public ::testing::Test {};
TEST_F(ReorderBiasTest, InferTranspose) {
Shape shape = ShapeUtil::MakeShape(U8, {96});
auto bias = HloInstruction::CreateParameter(2, shape, "bias");
Shape unused = ShapeUtil::MakeNil();
auto input = HloInstruction::CreateParameter(0, unused, "input");
auto filter = HloInstruction::CreateParameter(1, unused, "filter");
TF_ASSERT_OK_AND_ASSIGN(CudnnReorderTransposeConfig inferred_config,
CudnnInferTransposeForBiasReordering(shape));
Shape reshaped = ShapeUtil::PermuteDimensions(
inferred_config.permutation, inferred_config.transpose_shape);
EXPECT_THAT(reshaped.dimensions(), ::testing::ElementsAre(3, 2, 4, 4));
EXPECT_EQ(inferred_config.permutation[2], 1);
EXPECT_EQ(inferred_config.permutation[3], 3);
}
}
}
} |
2,083 | cpp | tensorflow/tensorflow | target_util | third_party/xla/xla/service/gpu/target_util.cc | third_party/xla/xla/service/gpu/target_util_test.cc | #ifndef XLA_SERVICE_GPU_TARGET_UTIL_H_
#define XLA_SERVICE_GPU_TARGET_UTIL_H_
#include <string>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Value.h"
#include "llvm/TargetParser/Triple.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
enum class TargetIntrinsicID {
kThreadIdx = 0,
kThreadIdy,
kThreadIdz,
kBlockIdx,
kBlockIdy,
kBlockIdz,
kBarrierId,
kBlockDimx,
kBlockDimy,
kBlockDimz,
kGroupBarrierId,
};
enum class TargetDeviceFunctionID {
kAtan2 = 0,
kCbrt,
kCos,
kExp,
kExpm1,
kFmod,
kHypot,
kLog,
kLog1p,
kPow,
kRsqrt,
kSin,
kSqrt,
kTan,
kTanh,
kErf,
};
absl::StatusOr<TargetDeviceFunctionID> GetTargetDeviceFunctionID(HloOpcode);
llvm::CallInst* EmitDeviceFunctionCall(
const std::string& callee_name, absl::Span<llvm::Value* const> operands,
absl::Span<const PrimitiveType> input_type, PrimitiveType output_type,
const llvm::AttrBuilder& attributes, llvm::IRBuilder<>* b,
absl::string_view name = "");
llvm::CallInst* EmitCallToTargetIntrinsic(
TargetIntrinsicID intrinsic_id, absl::Span<llvm::Value* const> operands,
absl::Span<llvm::Type* const> overloaded_types, llvm::IRBuilder<>* b);
void AnnotateFunctionAsGpuKernel(llvm::Module* module, llvm::Function* func,
llvm::IRBuilder<>* b);
std::string ObtainDeviceFunctionName(TargetDeviceFunctionID func_id,
PrimitiveType output_type,
llvm::Triple target_triple);
}
}
#endif
#include "xla/service/gpu/target_util.h"
#include <functional>
#include <string>
#include <variant>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/FPEnv.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsAMDGPU.h"
#include "llvm/IR/IntrinsicsNVPTX.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Casting.h"
#include "llvm/TargetParser/Triple.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/service/llvm_ir/llvm_type_conversion_util.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/util.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace gpu {
namespace {
using absl::StrCat;
struct TargetIntrinsics {
llvm::Intrinsic::ID nvptx_intrinsic;
std::variant<llvm::Intrinsic::ID,
std::function<llvm::CallInst*(llvm::IRBuilder<>*)>>
amdgpu_intrinsic_or_function;
std::variant<llvm::Intrinsic::ID,
std::function<llvm::CallInst*(llvm::IRBuilder<>*)>>
spir_intrinsic_or_function;
};
struct TargetIntrinsics GetIntrinsic(TargetIntrinsicID intrin) {
switch (intrin) {
case TargetIntrinsicID::kThreadIdx: {
return {
llvm::Intrinsic::nvvm_read_ptx_sreg_tid_x,
llvm::Intrinsic::amdgcn_workitem_id_x,
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall(
"_Z32__spirv_BuiltInLocalInvocationIdi", {b_->getInt32(0)},
{U32}, U64, {b_->getContext()}, b_);
},
};
}
case TargetIntrinsicID::kThreadIdy: {
return {
llvm::Intrinsic::nvvm_read_ptx_sreg_tid_y,
llvm::Intrinsic::amdgcn_workitem_id_y,
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall(
"_Z32__spirv_BuiltInLocalInvocationIdi", {b_->getInt32(1)},
{U32}, U64, {b_->getContext()}, b_);
},
};
}
case TargetIntrinsicID::kThreadIdz: {
return {
llvm::Intrinsic::nvvm_read_ptx_sreg_tid_z,
llvm::Intrinsic::amdgcn_workitem_id_z,
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall(
"_Z32__spirv_BuiltInLocalInvocationIdi", {b_->getInt32(2)},
{U32}, U64, {b_->getContext()}, b_);
},
};
}
case TargetIntrinsicID::kBlockIdx: {
return {
llvm::Intrinsic::nvvm_read_ptx_sreg_ctaid_x,
llvm::Intrinsic::amdgcn_workgroup_id_x,
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall("_Z26__spirv_BuiltInWorkgroupIdi",
{b_->getInt32(0)}, {U32}, U64,
{b_->getContext()}, b_);
},
};
}
case TargetIntrinsicID::kBlockIdy: {
return {
llvm::Intrinsic::nvvm_read_ptx_sreg_ctaid_y,
llvm::Intrinsic::amdgcn_workgroup_id_y,
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall("_Z26__spirv_BuiltInWorkgroupIdi",
{b_->getInt32(1)}, {U32}, U64,
{b_->getContext()}, b_);
},
};
}
case TargetIntrinsicID::kBlockIdz: {
return {
llvm::Intrinsic::nvvm_read_ptx_sreg_ctaid_z,
llvm::Intrinsic::amdgcn_workgroup_id_z,
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall("_Z26__spirv_BuiltInWorkgroupIdi",
{b_->getInt32(2)}, {U32}, U64,
{b_->getContext()}, b_);
},
};
}
case TargetIntrinsicID::kBarrierId: {
return {llvm::Intrinsic::nvvm_barrier0, llvm::Intrinsic::amdgcn_s_barrier,
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall(
"_Z22__spirv_ControlBarrierjjj",
{b_->getInt32(2), b_->getInt32(2), b_->getInt32(272)},
{U32, U32, U32}, U32,
llvm::AttrBuilder(b_->getContext())
.addAttribute(llvm::Attribute::Convergent),
b_);
}};
}
case TargetIntrinsicID::kBlockDimx: {
return {llvm::Intrinsic::nvvm_read_ptx_sreg_ntid_x,
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall("__ockl_get_local_size",
{b_->getInt32(0)}, {U32}, U64,
{b_->getContext()}, b_);
},
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall(
"_Z28__spirv_BuiltInWorkgroupSizei", {b_->getInt32(0)},
{U32}, U64, {b_->getContext()}, b_);
}};
}
case TargetIntrinsicID::kBlockDimy: {
return {llvm::Intrinsic::nvvm_read_ptx_sreg_ntid_y,
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall("__ockl_get_local_size",
{b_->getInt32(1)}, {U32}, U64,
{b_->getContext()}, b_);
},
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall(
"_Z28__spirv_BuiltInWorkgroupSizei", {b_->getInt32(1)},
{U32}, U64, {b_->getContext()}, b_);
}};
}
case TargetIntrinsicID::kBlockDimz: {
return {llvm::Intrinsic::nvvm_read_ptx_sreg_ntid_z,
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall("__ockl_get_local_size",
{b_->getInt32(2)}, {U32}, U64,
{b_->getContext()}, b_);
},
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall(
"_Z28__spirv_BuiltInWorkgroupSizei", {b_->getInt32(2)},
{U32}, U64, {b_->getContext()}, b_);
}};
}
case TargetIntrinsicID::kGroupBarrierId: {
return {llvm::Intrinsic::nvvm_bar_warp_sync,
llvm::Intrinsic::amdgcn_wave_barrier,
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall(
"_Z22__spirv_ControlBarrierjjj",
{b_->getInt32(2), b_->getInt32(2), b_->getInt32(272)},
{U32, U32, U32}, U32,
llvm::AttrBuilder(b_->getContext())
.addAttribute(llvm::Attribute::Convergent),
b_);
}};
}
}
}
struct TargetDeviceFunction {
const std::string nvptx_root;
const std::string amdgpu_root;
const std::string spir_root;
};
struct TargetDeviceFunction GetDeviceFunctionRoot(
TargetDeviceFunctionID func_id) {
switch (func_id) {
case TargetDeviceFunctionID::kAtan2: {
return {"__nv_atan2", "__ocml_atan2", "_Z17__spirv_ocl_atan2"};
}
case TargetDeviceFunctionID::kCos: {
return {"__nv_cos", "__ocml_cos", "_Z15__spirv_ocl_cos"};
}
case TargetDeviceFunctionID::kErf: {
return {"__nv_erf", "__ocml_erf", "_Z15__spirv_ocl_erf"};
}
case TargetDeviceFunctionID::kExp: {
return {"__nv_exp", "__ocml_exp", "_Z15__spirv_ocl_exp"};
}
case TargetDeviceFunctionID::kExpm1: {
return {"__nv_expm1", "__ocml_expm1", "_Z17__spirv_ocl_expm1"};
}
case TargetDeviceFunctionID::kFmod: {
return {"__nv_fmod", "__ocml_fmod", "_Z16__spirv_ocl_fmod"};
}
case TargetDeviceFunctionID::kHypot: {
return {"__nv_hypot", "__ocml_hypot", "_Z17__spirv_ocl_hypot"};
}
case TargetDeviceFunctionID::kLog: {
return {"__nv_log", "__ocml_log", "_Z15__spirv_ocl_log"};
}
case TargetDeviceFunctionID::kLog1p: {
return {"__nv_log1p", "__ocml_log1p", "_Z17__spirv_ocl_log1p"};
}
case TargetDeviceFunctionID::kPow: {
return {"__nv_pow", "__ocml_pow", "_Z15__spirv_ocl_pow"};
}
case TargetDeviceFunctionID::kRsqrt: {
return {"__nv_rsqrt", "__ocml_rsqrt", "_Z17__spirv_ocl_rsqrt"};
}
case TargetDeviceFunctionID::kSin: {
return {"__nv_sin", "__ocml_sin", "_Z15__spirv_ocl_sin"};
}
case TargetDeviceFunctionID::kSqrt: {
return {"__nv_sqrt", "__ocml_sqrt", "_Z16__spirv_ocl_sqrt"};
}
case TargetDeviceFunctionID::kTan: {
return {"__nv_tan", "__ocml_tan", "_Z15__spirv_ocl_tan"};
}
case TargetDeviceFunctionID::kTanh: {
return {"__nv_tanh", "__ocml_tanh", "_Z16__spirv_ocl_tanh"};
}
case TargetDeviceFunctionID::kCbrt: {
return {"__nv_cbrt", "__ocml_cbrt", "_Z16__spirv_ocl_cbrt"};
}
}
}
}
absl::StatusOr<TargetDeviceFunctionID> GetTargetDeviceFunctionID(HloOpcode op) {
switch (op) {
case HloOpcode::kAtan2:
return TargetDeviceFunctionID::kAtan2;
case HloOpcode::kCos:
return TargetDeviceFunctionID::kCos;
case HloOpcode::kExp:
return TargetDeviceFunctionID::kExp;
case HloOpcode::kErf:
return TargetDeviceFunctionID::kErf;
case HloOpcode::kExpm1:
return TargetDeviceFunctionID::kExpm1;
case HloOpcode::kLog:
return TargetDeviceFunctionID::kLog;
case HloOpcode::kLog1p:
return TargetDeviceFunctionID::kLog1p;
case HloOpcode::kPower:
return TargetDeviceFunctionID::kPow;
case HloOpcode::kRemainder:
return TargetDeviceFunctionID::kFmod;
case HloOpcode::kRsqrt:
return TargetDeviceFunctionID::kRsqrt;
case HloOpcode::kSin:
return TargetDeviceFunctionID::kSin;
case HloOpcode::kSqrt:
return TargetDeviceFunctionID::kSqrt;
case HloOpcode::kTan:
return TargetDeviceFunctionID::kTan;
case HloOpcode::kTanh:
return TargetDeviceFunctionID::kTanh;
case HloOpcode::kCbrt:
return TargetDeviceFunctionID::kCbrt;
default:
break;
}
return NotFound("The HLO opcode %s is not mapped to a device function",
HloOpcodeString(op));
}
std::string ObtainDeviceFunctionName(TargetDeviceFunctionID func_id,
PrimitiveType output_type,
llvm::Triple target_triple) {
struct TargetDeviceFunction gpu_root_names = GetDeviceFunctionRoot(func_id);
if (target_triple.isNVPTX()) {
if (output_type == F32) {
return StrCat(gpu_root_names.nvptx_root, "f");
} else if (output_type == F64) {
return gpu_root_names.nvptx_root;
} else {
LOG(FATAL) << "Unexpected type while getting device function name: "
<< primitive_util::LowercasePrimitiveTypeName(output_type);
}
} else if (target_triple.getArch() == llvm::Triple::amdgcn) {
if (output_type == F32) {
return StrCat(gpu_root_names.amdgpu_root, "_f32");
} else if (output_type == F64) {
return StrCat(gpu_root_names.amdgpu_root, "_f64");
} else {
LOG(FATAL) << "Unexpected type while getting device function name.";
}
} else if (target_triple.isSPIR()) {
if (output_type == F32) {
if (gpu_root_names.spir_root == "_Z17__spirv_ocl_hypot" ||
gpu_root_names.spir_root == "_Z15__spirv_ocl_pow" ||
gpu_root_names.spir_root == "_Z17__spirv_ocl_atan2" ||
gpu_root_names.spir_root == "_Z16__spirv_ocl_fmod") {
return StrCat(gpu_root_names.spir_root, "ff");
} else {
return StrCat(gpu_root_names.spir_root, "f");
}
} else if (output_type == F64) {
if (gpu_root_names.spir_root == "_Z17__spirv_ocl_hypot" ||
gpu_root_names.spir_root == "_Z15__spirv_ocl_pow" ||
gpu_root_names.spir_root == "_Z17__spirv_ocl_atan2" ||
gpu_root_names.spir_root == "_Z16__spirv_ocl_fmod") {
return StrCat(gpu_root_names.spir_root, "dd");
} else {
return StrCat(gpu_root_names.spir_root, "d");
}
} else {
LOG(FATAL) << "Unexpected type while getting device function name.";
}
} else {
LOG(FATAL) << "Invalid triple " << target_triple.str();
}
}
llvm::CallInst* EmitDeviceFunctionCall(
const std::string& callee_name, absl::Span<llvm::Value* const> operands,
absl::Span<const PrimitiveType> input_types, PrimitiveType output_type,
const llvm::AttrBuilder& attributes, llvm::IRBuilder<>* b,
absl::string_view name) {
std::vector<llvm::Type*> ir_input_types;
llvm::Module* module = b->GetInsertBlock()->getModule();
llvm::Triple target_triple = llvm::Triple(module->getTargetTriple());
for (PrimitiveType input_type : input_types) {
ir_input_types.push_back(
llvm_ir::PrimitiveTypeToIrType(input_type, module));
}
llvm::FunctionType* callee_type = llvm::FunctionType::get(
llvm_ir::PrimitiveTypeToIrType(output_type, module),
ir_input_types,
false);
llvm::Function* callee = llvm::dyn_cast<llvm::Function>(
b->GetInsertBlock()
->getModule()
->getOrInsertFunction(callee_name, callee_type)
.getCallee());
callee->addFnAttrs(attributes);
if (target_triple.isSPIR())
callee->setCallingConv(llvm::CallingConv::SPIR_FUNC);
return b->CreateCall(callee, llvm_ir::AsArrayRef(operands), name.data());
}
llvm::CallInst* EmitCallToTargetIntrinsic(
TargetIntrinsicID intrinsic_id, absl::Span<llvm::Value* const> operands,
absl::Span<llvm::Type* const> overloaded_types, llvm::IRBuilder<>* b) {
llvm::Module* module = b->GetInsertBlock()->getModule();
struct TargetIntrinsics gpu_intrinsic_id = GetIntrinsic(intrinsic_id);
llvm::Triple target_triple = llvm::Triple(module->getTargetTriple());
llvm::Intrinsic::ID llvm_intrinsic_id = llvm::Intrinsic::not_intrinsic;
if (target_triple.isNVPTX()) {
llvm_intrinsic_id = gpu_intrinsic_id.nvptx_intrinsic;
} else if (target_triple.getArch() == llvm::Triple::amdgcn) {
llvm::Intrinsic::ID* llvm_intrinsic_id_ptr =
std::get_if<llvm::Intrinsic::ID>(
&gpu_intrinsic_id.amdgpu_intrinsic_or_function);
if (llvm_intrinsic_id_ptr) {
llvm_intrinsic_id = *llvm_intrinsic_id_ptr;
} else {
std::function<llvm::CallInst*(llvm::IRBuilder<>*)>* builder_func =
std::get_if<std::function<llvm::CallInst*(llvm::IRBuilder<>*)>>(
&gpu_intrinsic_id.amdgpu_intrinsic_or_function);
return (*builder_func)(b);
}
} else if (target_triple.isSPIR()) {
llvm::Intrinsic::ID* llvm_intrinsic_id_ptr =
std::get_if<llvm::Intrinsic::ID>(
&gpu_intrinsic_id.spir_intrinsic_or_function);
if (llvm_intrinsic_id_ptr) {
llvm_intrinsic_id = *llvm_intrinsic_id_ptr;
} else {
std::function<llvm::CallInst*(llvm::IRBuilder<>*)>* builder_func =
std::get_if<std::function<llvm::CallInst*(llvm::IRBuilder<>*)>>(
&gpu_intrinsic_id.spir_intrinsic_or_function);
return (*builder_func)(b);
}
} else {
LOG(FATAL) << "Invalid triple " << target_triple.str();
}
llvm::Function* intrinsic = llvm::Intrinsic::getDeclaration(
module, llvm_intrinsic_id, llvm_ir::AsArrayRef(overloaded_types));
return b->CreateCall(intrinsic, llvm_ir::AsArrayRef(operands));
}
void AnnotateFunctionAsGpuKernel(llvm::Module* module, llvm::Function* func,
llvm::IRBuilder<>* b) {
llvm::Triple target_triple = llvm::Triple(module->getTargetTriple());
if (target_triple.isNVPTX()) {
llvm::LLVMContext& context = module->getContext();
llvm::NamedMDNode* nvvm_annotations_node =
module->getOrInsertNamedMetadata("nvvm.annotations");
nvvm_annotations_node->addOperand(llvm::MDNode::get(
context, {llvm::ConstantAsMetadata::get(func),
llvm::MDString::get(context, "kernel"),
llvm::ConstantAsMetadata::get(b->getInt32(1))}));
} else if (target_triple.getArch() == llvm::Triple::amdgcn) {
func->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
func->addFnAttr("amdgpu-flat-work-group-size", "1, 1024");
} else if (target_triple.isSPIR()) {
func->setCallingConv(llvm::CallingConv::SPIR_KERNEL);
} else {
LOG(FATAL) << "Invalid triple " << target_triple.str();
}
}
}
} | #include "xla/service/gpu/target_util.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Verifier.h"
#include "llvm/Support/raw_ostream.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
class TargetUtilTest : public testing::Test {
public:
TargetUtilTest() : module_("test", ctx_), builder_(ctx_) {}
protected:
void SetUp() override {
auto fn = llvm::Function::Create(
llvm::FunctionType::get(llvm::Type::getVoidTy(ctx_), {}),
llvm::Function::LinkageTypes::ExternalLinkage, "fn", module_);
auto block = llvm::BasicBlock::Create(ctx_, "blk", fn);
builder_.SetInsertPoint(block);
}
llvm::LLVMContext ctx_;
llvm::Module module_;
llvm::IRBuilder<> builder_;
};
TEST_F(TargetUtilTest, NVPTXGroupBarrier) {
module_.setTargetTriple("nvptx");
EmitCallToTargetIntrinsic(TargetIntrinsicID::kGroupBarrierId,
{builder_.getInt32(-1)}, {},
&builder_);
builder_.CreateRetVoid();
EXPECT_FALSE(llvm::verifyModule(module_, &llvm::errs()));
}
TEST_F(TargetUtilTest, AMDGCNGroupBarrier) {
module_.setTargetTriple("amdgcn");
EmitCallToTargetIntrinsic(TargetIntrinsicID::kGroupBarrierId, {}, {},
&builder_);
builder_.CreateRetVoid();
EXPECT_FALSE(llvm::verifyModule(module_, &llvm::errs()));
}
}
}
} |
2,084 | cpp | tensorflow/tensorflow | collective_permute_valid_iteration_annotator | third_party/xla/xla/service/gpu/transforms/collective_permute_valid_iteration_annotator.cc | third_party/xla/xla/service/gpu/transforms/collective_permute_valid_iteration_annotator_test.cc | #ifndef XLA_SERVICE_GPU_COLLECTIVE_PERMUTE_VALID_ITERATION_ANNOTATOR_H_
#define XLA_SERVICE_GPU_COLLECTIVE_PERMUTE_VALID_ITERATION_ANNOTATOR_H_
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class CollectivePermuteValidIterationAnnotator : public HloModulePass {
public:
CollectivePermuteValidIterationAnnotator() = default;
absl::string_view name() const override {
return "collective-permute-valid-iteration-annotator";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/gpu/collective_permute_valid_iteration_annotator.h"
#include "xla/literal_util.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/while_loop_analysis.h"
namespace xla {
static const HloInstruction* NonConstantOperand(const HloInstruction* instr) {
const HloInstruction* result = nullptr;
for (const HloInstruction* operand : instr->operands()) {
if (!operand->IsConstant()) {
if (result != nullptr) {
CHECK_EQ(result, operand);
}
result = operand;
}
}
CHECK_NE(result, nullptr);
return result;
}
std::optional<int64_t> GetStep(HloInstruction* while_inst) {
std::optional<int64_t> indvar_tuple_idx =
GetLoopInductionVarTupleIdx(while_inst);
if (!indvar_tuple_idx) {
return std::nullopt;
};
auto* while_body_indvar_update =
while_inst->while_body()->root_instruction()->mutable_operand(
*indvar_tuple_idx);
auto* while_body_indvar = NonConstantOperand(while_body_indvar_update);
HloInstruction* trip_count_increase_step_instr = nullptr;
if (!Match(while_body_indvar_update,
match::AddAnyOrder(match::Op().Is(while_body_indvar),
match::Op(&trip_count_increase_step_instr)))) {
return std::nullopt;
}
return LiteralUtil::LiteralAsScalarInt64(
trip_count_increase_step_instr->literal());
}
absl::StatusOr<bool> CollectivePermuteValidIterationAnnotator::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* comp : module->computations(execution_threads)) {
for (HloInstruction* inst : comp->instructions()) {
if (inst->opcode() != HloOpcode::kCollectivePermute) {
continue;
}
if (inst->frontend_attributes().map().find(kSendRecvValidationAttr) !=
inst->frontend_attributes().map().end()) {
continue;
}
auto sourceTargetPairs = inst->source_target_pairs();
if (!IsForwardCycle(sourceTargetPairs) &&
!IsBackwardCycle(sourceTargetPairs)) {
continue;
}
VLOG(2) << "Collective permute with cycle: " << inst->ToString();
int64_t max_device_num = -1;
for (auto [source, target] : sourceTargetPairs) {
max_device_num = std::max(std::max(source, target), max_device_num);
}
int64_t num_devices = max_device_num + 1;
HloInstruction* whileOp = inst->parent()->WhileCallInstruction();
if (whileOp == nullptr) {
VLOG(2) << "No surrounding while op found. Ignoring " << inst->name();
continue;
}
if (!whileOp->frontend_attributes().map().contains(
"is_pipelined_while_loop"))
continue;
TF_ASSIGN_OR_RETURN(WhileLoopBackendConfig config,
whileOp->backend_config<WhileLoopBackendConfig>());
if (!config.has_known_trip_count()) {
VLOG(2) << "Trip count for while loop (" << whileOp->name()
<< "): unknown";
continue;
}
int64_t trip_count = config.known_trip_count().n();
std::optional<int64_t> step = GetStep(whileOp);
VLOG(2) << "Trip count for while loop (" << whileOp->name()
<< "): " << trip_count;
if (!step) {
VLOG(2) << "Could not find step for while operation";
continue;
}
VLOG(2) << "Step for while loop (" << whileOp->name() << "): " << *step;
if (*step != 1) {
VLOG(2) << "Step is not 1. Skipping...";
continue;
}
int64_t offset = trip_count - num_devices;
std::vector<std::pair<int64_t, int64_t>> sendRecvValidation(
sourceTargetPairs.size());
for (size_t currIdx = 0; currIdx < sourceTargetPairs.size(); currIdx++) {
sendRecvValidation[currIdx] = {currIdx, currIdx + offset};
}
if (IsBackwardCycle(sourceTargetPairs)) {
std::reverse(sendRecvValidation.begin(), sendRecvValidation.end());
}
xla::FrontendAttributes attributes;
std::string iteration_instances =
"{" +
absl::StrJoin(sendRecvValidation, ",",
[](std::string* out, std::pair<int64_t, int64_t> item) {
absl::StrAppend(out, "{", item.first, ",",
item.second, "}");
}) +
"}";
(*attributes.mutable_map())[kSendRecvValidationAttr] =
iteration_instances;
inst->add_frontend_attributes(attributes);
VLOG(1) << "Adding " << kSendRecvValidationAttr << " to " << inst->name()
<< ": " << iteration_instances;
changed = true;
}
}
return changed;
}
} | #include "xla/service/gpu/collective_permute_valid_iteration_annotator.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/while_loop_trip_count_annotator.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
using CollectivePermuteValidIterationAnnotatorTest = HloTestBase;
TEST_F(CollectivePermuteValidIterationAnnotatorTest, NoChange) {
absl::string_view hlo_string = R"(
HloModule test, entry_computation_layout={()->(s32[], s32[])}
%Body (param: (s32[], s32[])) -> (s32[], s32[]) {
%param = (s32[], s32[]) parameter(0)
%i = s32[] get-tuple-element((s32[], s32[]) %param), index=1
%one = s32[] constant(1)
%i_plus_one = s32[] add(s32[] %i, s32[] %one)
%permute = s32[] collective-permute(%i_plus_one), channel_id=1, source_target_pairs={{0,1},{1,2},{2,3},{3,0}}
ROOT %tuple = (s32[], s32[]) tuple(s32[] %permute, s32[] %permute)
}
%Cond (param.1: (s32[], s32[])) -> pred[] {
%param.1 = (s32[], s32[]) parameter(0)
%i.1 = s32[] get-tuple-element((s32[], s32[]) %param.1), index=1
%trip_count = s32[] constant(10)
ROOT %done = pred[] compare(s32[] %i.1, s32[] %trip_count), direction=LT
}
ENTRY %test () -> (s32[], s32[]) {
%i_start = s32[] constant(0)
%p_start = s32[] constant(0)
%initial_tuple = (s32[], s32[]) tuple(s32[] %i_start, s32[] %p_start)
ROOT %while = (s32[], s32[]) while((s32[], s32[]) %initial_tuple), condition=%Cond, body=%Body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string, 1, 4));
HloPassPipeline pipeline("my-pass-pipeline");
pipeline.AddPass<WhileLoopTripCountAnnotator>();
pipeline.AddPass<CollectivePermuteValidIterationAnnotator>();
TF_ASSERT_OK_AND_ASSIGN(bool changed, pipeline.Run(module.get()));
EXPECT_FALSE(changed);
HloCollectivePermuteInstruction* cp =
DynCastOrNull<HloCollectivePermuteInstruction>(
FindInstruction(module.get(), HloOpcode::kCollectivePermute));
ASSERT_NE(cp, nullptr);
auto sendRecvValidationIt =
cp->frontend_attributes().map().find(kSendRecvValidationAttr);
ASSERT_EQ(sendRecvValidationIt, cp->frontend_attributes().map().end());
}
TEST_F(CollectivePermuteValidIterationAnnotatorTest, ForwardCycle) {
absl::string_view hlo_string = R"(
HloModule test, entry_computation_layout={()->(s32[], s32[])}
%Body (param: (s32[], s32[])) -> (s32[], s32[]) {
%param = (s32[], s32[]) parameter(0)
%i = s32[] get-tuple-element((s32[], s32[]) %param), index=1
%one = s32[] constant(1)
%i_plus_one = s32[] add(s32[] %i, s32[] %one)
%permute = s32[] collective-permute(%i_plus_one), channel_id=1, source_target_pairs={{0,1},{1,2},{2,3},{3,0}}
ROOT %tuple = (s32[], s32[]) tuple(s32[] %permute, s32[] %i_plus_one)
}
%Cond (param.1: (s32[], s32[])) -> pred[] {
%param.1 = (s32[], s32[]) parameter(0)
%i.1 = s32[] get-tuple-element((s32[], s32[]) %param.1), index=1
%trip_count = s32[] constant(10)
ROOT %done = pred[] compare(s32[] %i.1, s32[] %trip_count), direction=LT
}
ENTRY %test () -> (s32[], s32[]) {
%i_start = s32[] constant(0)
%p_start = s32[] constant(0)
%initial_tuple = (s32[], s32[]) tuple(s32[] %i_start, s32[] %p_start)
ROOT %while = (s32[], s32[]) while((s32[], s32[]) %initial_tuple), condition=%Cond, body=%Body, frontend_attributes={is_pipelined_while_loop="true"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string, 1, 4));
HloPassPipeline pipeline("my-pass-pipeline");
pipeline.AddPass<WhileLoopTripCountAnnotator>();
pipeline.AddPass<CollectivePermuteValidIterationAnnotator>();
TF_ASSERT_OK_AND_ASSIGN(bool changed, pipeline.Run(module.get()));
EXPECT_TRUE(changed);
HloCollectivePermuteInstruction* cp =
DynCastOrNull<HloCollectivePermuteInstruction>(
FindInstruction(module.get(), HloOpcode::kCollectivePermute));
ASSERT_NE(cp, nullptr);
auto sendRecvValidationIt =
cp->frontend_attributes().map().find(kSendRecvValidationAttr);
ASSERT_NE(sendRecvValidationIt, cp->frontend_attributes().map().end());
std::string sendRecvValidationAttr = sendRecvValidationIt->second;
EXPECT_EQ(sendRecvValidationAttr, "{{0,6},{1,7},{2,8},{3,9}}");
}
TEST_F(CollectivePermuteValidIterationAnnotatorTest, BackwardCycle) {
absl::string_view hlo_string = R"(
HloModule test, entry_computation_layout={()->(s32[], s32[])}
%Body (param: (s32[], s32[])) -> (s32[], s32[]) {
%param = (s32[], s32[]) parameter(0)
%i = s32[] get-tuple-element((s32[], s32[]) %param), index=1
%one = s32[] constant(1)
%i_plus_one = s32[] add(s32[] %i, s32[] %one)
%permute = s32[] collective-permute(%i_plus_one), channel_id=1, source_target_pairs={{0,3},{1,0},{2,1},{3,2}}
ROOT %tuple = (s32[], s32[]) tuple(s32[] %permute, s32[] %i_plus_one)
}
%Cond (param.1: (s32[], s32[])) -> pred[] {
%param.1 = (s32[], s32[]) parameter(0)
%i.1 = s32[] get-tuple-element((s32[], s32[]) %param.1), index=1
%trip_count = s32[] constant(10)
ROOT %done = pred[] compare(s32[] %i.1, s32[] %trip_count), direction=LT
}
ENTRY %test () -> (s32[], s32[]) {
%i_start = s32[] constant(0)
%p_start = s32[] constant(0)
%initial_tuple = (s32[], s32[]) tuple(s32[] %i_start, s32[] %p_start)
ROOT %while = (s32[], s32[]) while((s32[], s32[]) %initial_tuple), condition=%Cond, body=%Body, frontend_attributes={is_pipelined_while_loop="true"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string, 1, 4));
HloPassPipeline pipeline("my-pass-pipeline");
pipeline.AddPass<WhileLoopTripCountAnnotator>();
pipeline.AddPass<CollectivePermuteValidIterationAnnotator>();
TF_ASSERT_OK_AND_ASSIGN(bool changed, pipeline.Run(module.get()));
EXPECT_TRUE(changed);
HloCollectivePermuteInstruction* cp =
DynCastOrNull<HloCollectivePermuteInstruction>(
FindInstruction(module.get(), HloOpcode::kCollectivePermute));
ASSERT_NE(cp, nullptr);
auto sendRecvValidationIt =
cp->frontend_attributes().map().find(kSendRecvValidationAttr);
ASSERT_NE(sendRecvValidationIt, cp->frontend_attributes().map().end());
std::string sendRecvValidationAttr = sendRecvValidationIt->second;
EXPECT_EQ(sendRecvValidationAttr, "{{3,9},{2,8},{1,7},{0,6}}");
}
}
} |
2,085 | cpp | tensorflow/tensorflow | gemv_rewriter | third_party/xla/xla/service/gpu/transforms/gemv_rewriter.cc | third_party/xla/xla/service/gpu/transforms/gemv_rewriter_test.cc | #ifndef XLA_SERVICE_GPU_GEMV_REWRITER_H_
#define XLA_SERVICE_GPU_GEMV_REWRITER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class GemvRewriter : public HloModulePass {
public:
absl::string_view name() const override { return "gemv-rewriter"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
}
#endif
#include "xla/service/gpu/gemv_rewriter.h"
#include <cstdint>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/shape.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
absl::StatusOr<Layout> GetLayoutWithNewMinorMostDimension(
const Layout& layout) {
if (!LayoutUtil::IsMonotonicWithDim0Major(layout)) {
return absl::InvalidArgumentError("Layout is not normalized.");
}
return LayoutUtil::MakeDescendingLayout(layout.minor_to_major_size() + 1);
}
class GemvRewriterVisitor : public DfsHloRewriteVisitor {
public:
absl::Status HandleDot(HloInstruction* instr) override {
HloDotInstruction* dot = Cast<HloDotInstruction>(instr);
const DotDimensionNumbers& dim_numbers = dot->dot_dimension_numbers();
HloInstruction* lhs = dot->mutable_operand(0);
HloInstruction* rhs = dot->mutable_operand(1);
bool lhs_has_non_contracting_dim =
lhs->shape().rank() ==
dim_numbers.lhs_batch_dimensions_size() +
dim_numbers.lhs_contracting_dimensions_size() + 1;
bool rhs_has_non_contracting_dim =
rhs->shape().rank() ==
dim_numbers.rhs_batch_dimensions_size() +
dim_numbers.rhs_contracting_dimensions_size() + 1;
if (lhs_has_non_contracting_dim && rhs_has_non_contracting_dim) {
return absl::OkStatus();
}
if (!lhs_has_non_contracting_dim && !rhs_has_non_contracting_dim) {
return absl::OkStatus();
}
if (dot->shape().is_dynamic()) {
return absl::OkStatus();
}
changed_ = true;
HloComputation* computation = dot->parent();
HloInstruction* new_lhs = lhs;
if (!lhs_has_non_contracting_dim) {
const Shape& lhs_shape = lhs->shape();
absl::Span<const int64_t> lhs_dimensions = lhs_shape.dimensions();
std::vector<int64_t> new_lhs_dimensions(lhs_dimensions.begin(),
lhs_dimensions.end());
new_lhs_dimensions.push_back(1);
Shape new_lhs_shape(
lhs_shape.element_type(), new_lhs_dimensions,
absl::InlinedVector<bool, 4>(new_lhs_dimensions.size(), false),
{});
TF_ASSIGN_OR_RETURN(
*new_lhs_shape.mutable_layout(),
GetLayoutWithNewMinorMostDimension(lhs_shape.layout()));
new_lhs = computation->AddInstruction(
HloInstruction::CreateBitcast(new_lhs_shape, lhs));
}
HloInstruction* new_rhs = rhs;
if (!rhs_has_non_contracting_dim) {
const Shape& rhs_shape = rhs->shape();
absl::Span<const int64_t> rhs_dimensions = rhs_shape.dimensions();
std::vector<int64_t> new_rhs_dimensions(rhs_dimensions.begin(),
rhs_dimensions.end());
new_rhs_dimensions.push_back(1);
Shape new_rhs_shape(
rhs_shape.element_type(), new_rhs_dimensions,
absl::InlinedVector<bool, 4>(new_rhs_dimensions.size(), false),
{});
TF_ASSIGN_OR_RETURN(
*new_rhs_shape.mutable_layout(),
GetLayoutWithNewMinorMostDimension(rhs_shape.layout()));
new_rhs = computation->AddInstruction(
HloInstruction::CreateBitcast(new_rhs_shape, rhs));
}
std::vector<int64_t> new_out_dimensions;
new_out_dimensions.reserve(dot->shape().dimensions().size() + 1);
for (int64_t dim_size : dot->shape().dimensions()) {
new_out_dimensions.push_back(dim_size);
}
if (!lhs_has_non_contracting_dim) {
int non_contracting_dim_size = new_out_dimensions.back();
new_out_dimensions[new_out_dimensions.size() - 1] = 1;
new_out_dimensions.push_back(non_contracting_dim_size);
} else {
new_out_dimensions.push_back(1);
}
Shape new_out_shape(
dot->shape().element_type(), new_out_dimensions,
absl::InlinedVector<bool, 4>(new_out_dimensions.size(), false),
{});
TF_ASSIGN_OR_RETURN(
*new_out_shape.mutable_layout(),
GetLayoutWithNewMinorMostDimension(dot->shape().layout()));
HloInstruction* new_dot =
computation->AddInstruction(HloInstruction::CreateDot(
new_out_shape, new_lhs, new_rhs, dot->dot_dimension_numbers(),
dot->precision_config()));
HloInstruction* bitcast = computation->AddInstruction(
HloInstruction::CreateBitcast(dot->shape(), new_dot));
return computation->ReplaceInstruction(dot, bitcast);
}
bool changed() const { return changed_; }
private:
bool changed_ = false;
};
}
absl::StatusOr<bool> GemvRewriter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
GemvRewriterVisitor gemv_rewriter;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
TF_RETURN_IF_ERROR(computation->Accept(&gemv_rewriter));
}
return gemv_rewriter.changed();
}
}
} | #include "xla/service/gpu/gemv_rewriter.h"
#include <memory>
#include <optional>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
class GemvRewriterTest : public HloTestBase {};
TEST_F(GemvRewriterTest, RewriteMatrixVectorMultiplicationToGemm) {
const char* hlo = R"(
HloModule m
ENTRY e {
p0 = f32[32,7] parameter(0)
p1 = f32[7] parameter(1)
ROOT d = f32[32] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
const char* expected = R"()
})";
RunAndFilecheckHloRewrite(hlo, GemvRewriter(), expected);
}
TEST_F(GemvRewriterTest, RewriteVectorMatrixMultiplicationToGemm) {
const char* hlo = R"(
HloModule m
ENTRY e {
p0 = f32[7] parameter(0)
p1 = f32[7,32] parameter(1)
ROOT d = f32[32] dot(p0, p1),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
})";
const char* expected = R"()
})";
RunAndFilecheckHloRewrite(hlo, GemvRewriter(), expected);
}
TEST_F(GemvRewriterTest, RewriteMatrixVectorMultiplicationWithBatch) {
const char* hlo = R"(
HloModule m
ENTRY e {
p0 = f32[2,5,32,7] parameter(0)
p1 = f32[2,5,7] parameter(1)
ROOT d = f32[2,5,32] dot(p0, p1),
lhs_batch_dims={0,1}, rhs_batch_dims={0,1},
lhs_contracting_dims={3}, rhs_contracting_dims={2}
})";
const char* expected = R"()
})";
RunAndFilecheckHloRewrite(hlo, GemvRewriter(), expected);
}
TEST_F(GemvRewriterTest, DotNotRewriteVectorVectorMultiplication) {
const char* hlo = R"(
HloModule m
ENTRY e {
p0 = f32[7] parameter(0)
p1 = f32[7] parameter(1)
ROOT d = f32[] dot(p0, p1),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
})";
RunAndFilecheckHloRewrite(hlo, GemvRewriter(), std::nullopt);
}
TEST_F(GemvRewriterTest, DotNotRewriteMatrixMatrixMultiplication) {
const char* hlo = R"(
HloModule m
ENTRY e {
p0 = f32[5,7] parameter(0)
p1 = f32[7,32] parameter(1)
ROOT d = f32[5,32] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
RunAndFilecheckHloRewrite(hlo, GemvRewriter(), std::nullopt);
}
TEST_F(GemvRewriterTest, DoNotRewriteDotsWithNonNormalizedLayout) {
const char* hlo = R"(
HloModule m
ENTRY e {
p0 = f32[5,32,7]{2,1,0} parameter(0)
p1 = f32[5,7]{0,1} parameter(1)
ROOT d = f32[5,32]{0,1} dot(p0, p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
GemvRewriter rewriter;
absl::StatusOr<bool> result = this->RunHloPass(&rewriter, module.get());
EXPECT_FALSE(result.ok());
EXPECT_EQ(result.status().message(), "Layout is not normalized.");
}
}
} |
2,086 | cpp | tensorflow/tensorflow | stream_attribute_async_wrapper | third_party/xla/xla/service/gpu/transforms/stream_attribute_async_wrapper.cc | third_party/xla/xla/service/gpu/transforms/stream_attribute_async_wrapper_test.cc | #ifndef XLA_SERVICE_GPU_STREAM_ATTRIBUTE_ASYNC_WRAPPER_H_
#define XLA_SERVICE_GPU_STREAM_ATTRIBUTE_ASYNC_WRAPPER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla::gpu {
class StreamAttributeAsyncWrapper : public HloModulePass {
public:
inline static constexpr char kParallelExecutionThread[] = "parallel";
absl::string_view name() const override {
return "async-stream-attribute-wrapper";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/gpu/stream_attribute_async_wrapper.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
static absl::StatusOr<bool> AsynchronizeInstruction(HloInstruction* instr) {
auto instr_gpu_config = instr->backend_config<GpuBackendConfig>();
if (!instr_gpu_config.ok() || instr_gpu_config->operation_queue_id() ==
Thunk::kDefaultExecutionStreamId.value()) {
return false;
}
HloComputation* computation = instr->parent();
TF_ASSIGN_OR_RETURN(
HloInstruction * done,
computation->CreateAsyncInstructions(
instr, {}, StreamAttributeAsyncWrapper::kParallelExecutionThread,
true));
TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config,
done->backend_config<GpuBackendConfig>());
gpu_config.set_force_earliest_schedule(false);
TF_RETURN_IF_ERROR(done->set_backend_config(gpu_config));
VLOG(5) << "Created async instruction: " << done->ToString();
return true;
}
}
absl::StatusOr<bool> StreamAttributeAsyncWrapper::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(
2, "StreamAttributeAsyncWrapper::Run(), before:\n" + module->ToString());
bool changed = false;
for (const HloComputation* comp :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instr : comp->instructions()) {
TF_ASSIGN_OR_RETURN(bool result, AsynchronizeInstruction(instr));
changed |= result;
}
}
XLA_VLOG_LINES(
2, "StreamAttributeAsyncWrapper::Run(), after:\n" + module->ToString());
return changed;
}
} | #include "xla/service/gpu/stream_attribute_async_wrapper.h"
#include <memory>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
using StreamAttributeAsyncWrapperTest = HloTestBase;
TEST_F(StreamAttributeAsyncWrapperTest, NonDefaultOpIsWrapped) {
constexpr absl::string_view kHloString = R"(
HloModule ModuleWithAsync
ENTRY entry {
p1_32 = f32[1] parameter(0)
p2_32 = f32[1] parameter(1)
add_32 = f32[1] add(p1_32, p2_32), backend_config={"operation_queue_id":"1", "wait_on_operation_queues":[], "force_earliest_schedule":true}
ROOT exp_32 = f32[1] exponential(add_32), backend_config={"operation_queue_id":"0", "wait_on_operation_queues":[1]}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
StreamAttributeAsyncWrapper async_wrapper;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, async_wrapper.Run(module.get()));
EXPECT_TRUE(changed);
const HloInstruction* producer =
module->entry_computation()->root_instruction()->operand(0);
EXPECT_EQ(producer->opcode(), HloOpcode::kAsyncDone);
TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig done_gpu_config,
producer->backend_config<GpuBackendConfig>());
EXPECT_EQ(done_gpu_config.force_earliest_schedule(), false);
const HloInstruction* producer_start = producer->operand(0);
EXPECT_EQ(producer_start->opcode(), HloOpcode::kAsyncStart);
const xla::HloAsyncInstruction* async =
Cast<HloAsyncInstruction>(producer_start);
EXPECT_EQ(async->async_wrapped_opcode(), HloOpcode::kAdd);
TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config,
async->backend_config<GpuBackendConfig>());
EXPECT_EQ(gpu_config.operation_queue_id(), 1);
EXPECT_EQ(gpu_config.force_earliest_schedule(), true);
EXPECT_EQ(async->async_execution_thread(), "parallel");
}
}
} |
2,087 | cpp | tensorflow/tensorflow | fusion_process_dump | third_party/xla/xla/service/gpu/fusion_process_dump.cc | third_party/xla/xla/service/gpu/fusion_process_dump_test.cc | #ifndef XLA_SERVICE_GPU_FUSION_PROCESS_DUMP_H_
#define XLA_SERVICE_GPU_FUSION_PROCESS_DUMP_H_
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/fusion_process_dump.pb.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
class FusionProcessDump {
public:
static absl::StatusOr<FusionProcessDump> LoadFromFile(
const std::string& path);
static absl::StatusOr<FusionProcessDump> LoadFromData(
const std::string& data, absl::string_view format);
static absl::StatusOr<FusionProcessDump> LoadFromProto(
const FusionProcessDumpProto& fusion_process_dump_proto);
const FusionProcessDumpProto& proto() { return fusion_process_dump_proto_; }
HloModule* module() { return hlo_module_.get(); }
const se::DeviceDescription& device_info() { return device_info_; }
int64_t current_step_idx() { return current_step_idx_; }
HloComputation* GetCurrentComputation();
HloInstruction* GetInstructionWithName(absl::string_view name);
HloInstruction* GetProducer();
absl::InlinedVector<HloInstruction*, 2> GetConsumers();
HloInstruction* GetLastFusion() { return last_fusion_; }
const FusionStep& CurrentStep();
bool HasNext();
void Advance();
private:
FusionProcessDump(FusionProcessDumpProto fusion_process_dump_proto,
std::unique_ptr<HloModule> hlo_module,
se::DeviceDescription device_info,
absl::flat_hash_map<std::string, HloComputation*>
instruction_name_to_computation_map)
: fusion_process_dump_proto_(std::move(fusion_process_dump_proto)),
hlo_module_(std::move(hlo_module)),
device_info_(std::move(device_info)),
instruction_name_to_computation_map_(
std::move(instruction_name_to_computation_map)) {}
FusionProcessDumpProto fusion_process_dump_proto_;
std::unique_ptr<HloModule> hlo_module_;
se::DeviceDescription device_info_;
absl::flat_hash_map<std::string, HloComputation*>
instruction_name_to_computation_map_;
int64_t current_step_idx_ = 0;
HloInstruction* last_fusion_ = nullptr;
};
}
}
#endif
#include "xla/service/gpu/fusion_process_dump.h"
#include <string>
#include <string_view>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/fusion_process_dump.pb.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tools/hlo_module_loader.h"
#include "xla/util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/path.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
HloInstruction* AddFusionInstruction(HloInstruction* producer,
HloInstruction* consumer,
HloComputation* computation,
std::string_view fusion_name) {
if (consumer->opcode() == HloOpcode::kFusion) {
return consumer;
}
auto kind = HloInstruction::FusionKind::kLoop;
auto fusion_instruction = computation->AddInstruction(
HloInstruction::CreateFusion(consumer->shape(), kind, consumer),
fusion_name);
TF_CHECK_OK(computation->ReplaceInstruction(consumer, fusion_instruction));
return fusion_instruction;
}
HloInstruction* Fuse(HloInstruction* producer, HloInstruction* consumer,
HloComputation* computation,
std::string_view fusion_name) {
HloInstruction* fusion_instruction =
AddFusionInstruction(producer, consumer, computation, fusion_name);
if (producer->opcode() == HloOpcode::kFusion) {
fusion_instruction->MergeFusionInstruction(producer);
} else {
fusion_instruction->FuseInstruction(producer);
}
if (producer->user_count() == 0) {
TF_CHECK_OK(computation->RemoveInstruction(producer));
}
return fusion_instruction;
}
absl::string_view GetProducerName(const FusionStep& step) {
if (step.has_fusion()) {
return step.fusion().producer_name();
}
if (step.has_update_priority()) {
return step.update_priority().producer_name();
}
if (step.has_producer_ineligible()) {
return step.producer_ineligible().producer_name();
}
LOG(FATAL) << "Producer name not found in the current step.";
}
}
absl::StatusOr<FusionProcessDump> FusionProcessDump::LoadFromFile(
const std::string& path) {
std::string format = std::string(tsl::io::Extension(path));
std::string data;
TF_RETURN_IF_ERROR(tsl::ReadFileToString(tsl::Env::Default(), path, &data));
return FusionProcessDump::LoadFromData(data, format);
}
absl::StatusOr<FusionProcessDump> FusionProcessDump::LoadFromData(
const std::string& data, absl::string_view format) {
FusionProcessDumpProto fusion_process_dump_proto;
if (format == "txt" || format == "pbtxt") {
if (!tsl::protobuf::TextFormat::ParseFromString(
data, &fusion_process_dump_proto)) {
return InvalidArgument("Failed to parse input as HLO protobuf text");
}
} else if (format == "pb") {
if (!fusion_process_dump_proto.ParseFromString(data)) {
return InvalidArgument("Failed to parse input as HLO protobuf binary");
}
} else {
return InvalidArgument(
"Invalid format from file extension: '%s'. Expected: txt, pb, or pbtxt",
format);
}
return FusionProcessDump::LoadFromProto(fusion_process_dump_proto);
}
absl::StatusOr<FusionProcessDump> FusionProcessDump::LoadFromProto(
const FusionProcessDumpProto& fusion_process_dump_proto) {
TF_ASSIGN_OR_RETURN(
auto module,
LoadModuleFromData(fusion_process_dump_proto.hlo_module_before_fusion(),
"txt"));
se::DeviceDescription gpu_device_info(
fusion_process_dump_proto.gpu_device_info());
absl::flat_hash_map<std::string, HloComputation*>
instruction_name_to_computation_map;
for (HloComputation* computation : module->MakeNonfusionComputations()) {
for (HloInstruction* instr : computation->instructions()) {
instruction_name_to_computation_map[instr->name()] = computation;
}
}
return FusionProcessDump(std::move(fusion_process_dump_proto),
std::move(module), std::move(gpu_device_info),
std::move(instruction_name_to_computation_map));
}
HloComputation* FusionProcessDump::GetCurrentComputation() {
return instruction_name_to_computation_map_.at(
GetProducerName(CurrentStep()));
}
HloInstruction* FusionProcessDump::GetInstructionWithName(
absl::string_view name) {
return instruction_name_to_computation_map_[name]->GetInstructionWithName(
name);
}
HloInstruction* FusionProcessDump::GetProducer() {
return GetInstructionWithName(GetProducerName(CurrentStep()));
}
absl::InlinedVector<HloInstruction*, 2> FusionProcessDump::GetConsumers() {
auto& step = CurrentStep();
if (step.has_fusion()) {
return {GetInstructionWithName(step.fusion().consumer_name())};
}
if (step.has_update_priority()) {
absl::InlinedVector<HloInstruction*, 2> consumers;
for (const auto& consumer_name : step.update_priority().consumer_names()) {
consumers.push_back(GetInstructionWithName(consumer_name));
}
return consumers;
}
return {};
}
const FusionStep& FusionProcessDump::CurrentStep() {
CHECK(HasNext());
return fusion_process_dump_proto_.fusion_steps(current_step_idx_);
}
bool FusionProcessDump::HasNext() {
return current_step_idx_ < fusion_process_dump_proto_.fusion_steps_size();
}
void FusionProcessDump::Advance() {
auto step = CurrentStep();
if (step.has_fusion()) {
const auto& fusion_step = step.fusion();
auto* computation = GetCurrentComputation();
HloInstruction* producer =
computation->GetInstructionWithName(fusion_step.producer_name());
HloInstruction* consumer =
computation->GetInstructionWithName(fusion_step.consumer_name());
HloInstruction* fusion =
Fuse(producer, consumer, computation, fusion_step.fusion_name());
instruction_name_to_computation_map_[fusion->name()] = computation;
last_fusion_ = fusion;
}
++current_step_idx_;
}
}
} | #include "xla/service/gpu/fusion_process_dump.h"
#include <string>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/fusion_process_dump.pb.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace m = ::xla::match;
namespace xla {
namespace gpu {
namespace {
using FusionProcessDumpTest = HloTestBase;
void AddFusion(FusionProcessDumpProto& dump_proto,
const std::string& fusion_name, const std::string& producer_name,
const std::string& consumer_name) {
auto step = dump_proto.add_fusion_steps();
auto fusion_step = step->mutable_fusion();
fusion_step->set_fusion_name(fusion_name);
fusion_step->set_producer_name(producer_name);
fusion_step->set_consumer_name(consumer_name);
}
TEST_F(FusionProcessDumpTest, MultipleFusionSteps) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY main {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
add = f32[] add(p0, p1)
subtract = f32[] subtract(p0, p1)
abs = f32[] abs(subtract)
ROOT multiply = f32[] multiply(add, abs)
})"));
FusionProcessDumpProto dump_proto;
*dump_proto.mutable_gpu_device_info() =
TestGpuDeviceInfo::RTXA6000DeviceInfo().ToGpuProto();
dump_proto.set_hlo_module_before_fusion(
module->ToString(HloPrintOptions::ShortParsable()));
AddFusion(dump_proto, "fusion.1", "subtract", "abs");
AddFusion(dump_proto, "fusion.2", "fusion.1", "multiply");
AddFusion(dump_proto, "fusion.2", "add", "fusion.2");
TF_ASSERT_OK_AND_ASSIGN(auto fusion_process_dump,
FusionProcessDump::LoadFromProto(dump_proto));
fusion_process_dump.Advance();
fusion_process_dump.Advance();
fusion_process_dump.Advance();
EXPECT_FALSE(fusion_process_dump.HasNext());
auto root =
fusion_process_dump.module()->entry_computation()->root_instruction();
EXPECT_EQ(root->name(), "fusion.2");
ASSERT_THAT(root, GmockMatch(m::Fusion(m::Parameter(), m::Parameter())));
EXPECT_THAT(root->fused_expression_root(),
GmockMatch(m::Multiply(
m::Add(m::Parameter(), m::Parameter()),
m::Abs(m::Subtract(m::Parameter(), m::Parameter())))));
}
}
}
} |
2,088 | cpp | tensorflow/tensorflow | rename_fusions | third_party/xla/xla/service/gpu/transforms/rename_fusions.cc | third_party/xla/xla/service/gpu/transforms/rename_fusions_test.cc | #ifndef XLA_SERVICE_GPU_RENAME_FUSIONS_H_
#define XLA_SERVICE_GPU_RENAME_FUSIONS_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class RenameFusions : public HloModulePass {
absl::string_view name() const override { return "rename_fusions"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
}
#endif
#include "xla/service/gpu/rename_fusions.h"
#include <memory>
#include <string>
#include "absl/container/btree_set.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
namespace xla {
namespace gpu {
namespace {
constexpr absl::string_view FusionKindToString(
HloInstruction::FusionKind kind) {
switch (kind) {
case HloInstruction::FusionKind::kCustom:
return "custom";
case HloInstruction::FusionKind::kLoop:
return "loop";
case HloInstruction::FusionKind::kInput:
return "input";
case HloInstruction::FusionKind::kOutput:
return "output";
}
}
std::string MakeFusionHeroNames(const HloInstruction* instruction) {
std::unique_ptr<HloFusionAdaptor> fusion_adaptor =
HloFusionAdaptor::ForInstruction(instruction);
absl::btree_set<absl::string_view> heroes;
for (auto root : fusion_adaptor->GetRoots()) {
heroes.insert(HloOpcodeString(FindNonTrivialHero(root).opcode()));
}
return absl::StrReplaceAll(absl::StrJoin(heroes, "_"), {{"-", "_"}});
}
void RenameFusion(HloModule* module, HloInstruction* instruction) {
std::string hero_names = MakeFusionHeroNames(instruction);
module->SetAndUniquifyInstrName(
instruction, absl::StrCat(FusionKindToString(instruction->fusion_kind()),
"_", hero_names, "_fusion"));
module->SetAndUniquifyComputationName(
instruction->fused_instructions_computation(),
absl::StrCat("fused_", hero_names));
}
}
absl::StatusOr<bool> RenameFusions::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
for (HloComputation* computation : module->MakeNonfusionComputations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() != HloOpcode::kFusion ||
instruction->fusion_kind() == HloInstruction::FusionKind::kCustom) {
continue;
}
RenameFusion(module, instruction);
}
}
return true;
}
}
} | #include "xla/service/gpu/rename_fusions.h"
#include <utility>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
class RenameFusionsTest : public HloTestBase {
protected:
RenameFusions rename_fusions_;
};
TEST_F(RenameFusionsTest, FusionInstructionNames) {
absl::string_view kHlo = R"(
HloModule test_module
square {
p = f32[16384] parameter(0)
ROOT m = f32[16384] multiply(p, p)
}
exp {
p = f32[16384] parameter(0)
ROOT e = f32[16384] exponential(p)
}
log {
p = f32[16384] parameter(0)
ROOT l = f32[16384] log(p)
}
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
ENTRY main {
p0 = bf16[1024,8192] parameter(0)
p1 = f32[8192] parameter(1)
p2 = f32[16384] parameter(2)
convert = f32[1024,8192] convert(p0)
broadcast = f32[1024,8192] broadcast(p1), dimensions={1}
c0 = f32[] constant(0)
multiply = f32[1024,8192] multiply(broadcast, convert)
reduce = f32[1024] reduce(multiply, c0), dimensions={1}, to_apply=add
convert.1 = bf16[1024] convert(reduce)
s = f32[16384] fusion(p2), kind=kLoop, calls=square
e = f32[16384] fusion(s), kind=kLoop, calls=exp
l = f32[16384] fusion(s), kind=kInput, calls=log
ROOT result = (bf16[1024]{0}, f32[16384]{0}, f32[16384]{0}) tuple(convert.1, l, e)
})";
RunAndFilecheckHloRewrite(kHlo, std::move(rename_fusions_), R"(
CHECK: ENTRY %main
CHECK: %loop_multiply_fusion{{.*}} calls=%fused_multiply
CHECK: %input_log_fusion{{.*}} calls=%fused_log
CHECK: %loop_exponential_fusion{{.*}} calls=%fused_exponential
CHECK: ROOT %result
)");
}
}
} |
2,089 | cpp | tensorflow/tensorflow | gpu_conv_rewriter | null | null | #ifndef XLA_SERVICE_GPU_GPU_CONV_REWRITER_H_
#define XLA_SERVICE_GPU_GPU_CONV_REWRITER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class GpuConvRewriter : public HloModulePass {
public:
explicit GpuConvRewriter(const se::GpuComputeCapability& compute_capability)
: compute_capability_(compute_capability) {};
absl::string_view name() const override { return "gpu-conv-rewriter"; }
static bool ConvIsLowerable(HloInstruction* conv);
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
se::GpuComputeCapability compute_capability_;
};
}
}
#endif
#include "xla/service/gpu/gpu_conv_rewriter.h"
#include <cstdint>
#include <cstdlib>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <string_view>
#include <tuple>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/permutation_util.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
absl::Status CheckTypes(HloInstruction* conv,
const se::GpuComputeCapability cc) {
auto valid_shape = [conv, &cc](const Shape& shape) -> absl::Status {
PrimitiveType type = shape.element_type();
if (!primitive_util::IsFloatingPointType(type) &&
!primitive_util::IsIntegralType(type)) {
return Unimplemented(
"Convolutions must have floating-point or integral operands/outputs, "
"but got convolution with type %s: %s",
primitive_util::LowercasePrimitiveTypeName(type), conv->ToString());
}
if (primitive_util::IsF8Type(type)) {
if (type != F8E4M3FN && type != F8E5M2) {
return Unimplemented(
"The only FP8 types supported in convolutions are f8e5m2 and "
"f8e4m3, "
"but got convolution with FP8 type %s: %s",
primitive_util::LowercasePrimitiveTypeName(type), conv->ToString());
}
if (!std::holds_alternative<se::CudaComputeCapability>(cc)) {
return Unimplemented(
"FP8 convolutions are only supported on CUDA GPUs, but got "
"FP8 convolution on ROCm GPU: %s",
conv->ToString());
} else if (!std::get<se::CudaComputeCapability>(cc).IsAtLeastHopper()) {
return Unimplemented(
"FP8 convolutions are only supported on CUDA GPUs with compute "
"capability at least 9.0, but got "
"FP8 convolution on GPU with compute capability %s: %s",
std::get<se::CudaComputeCapability>(cc).ToString(),
conv->ToString());
}
}
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(valid_shape(conv->shape()));
TF_RETURN_IF_ERROR(valid_shape(conv->operand(0)->shape()));
TF_RETURN_IF_ERROR(valid_shape(conv->operand(1)->shape()));
return absl::OkStatus();
}
using ConvolutionMatch = std::optional<
std::tuple<Window, ConvolutionDimensionNumbers, HloInstruction*>>;
bool MaybeConv1dToConv2d(HloInstruction* conv) {
if (conv->window().dimensions().size() != 2) {
return false;
}
if (conv->operand(1)->opcode() != HloOpcode::kReshape) {
return false;
}
auto filter = conv->operand(1);
std::optional<ShapeUtil::ShapeEqualityDescriptor> reshape_degenerate =
filter->ReshapeMerelyInsertsOrDeletes1SizedDimensions();
if (reshape_degenerate.has_value() &&
reshape_degenerate->deleted_dimensions.empty() &&
reshape_degenerate->inserted_dimensions.size() == 1) {
const auto& dnums = conv->convolution_dimension_numbers();
for (auto dim : dnums.kernel_spatial_dimensions()) {
if (dim == reshape_degenerate->inserted_dimensions[0]) {
return true;
}
}
}
return false;
}
bool CanImplementAsGpuForwardConv(HloInstruction* conv) {
const ConvolutionDimensionNumbers& dnums =
conv->convolution_dimension_numbers();
if (dnums.input_spatial_dimensions_size() > 3) {
return false;
}
if (ShapeUtil::IsZeroElementArray(conv->operand(0)->shape()) ||
ShapeUtil::IsZeroElementArray(conv->operand(1)->shape())) {
return false;
}
if (dnums.input_spatial_dimensions_size() == 2
? !window_util::AllOrNoneReversed(conv->window())
: window_util::HasWindowReversal(conv->window())) {
return false;
}
return true;
}
ConvolutionMatch MatchBackwardFilter(HloInstruction* conv) {
VLOG(2) << "Trying to match convolution backward filter.";
if (conv->feature_group_count() > 1) {
VLOG(1) << conv->ToString()
<< " is a forward convolution. All grouped backward filters are "
"mapped to batch grouped convolutions in tf2xla bridge. Hence "
"backward filter "
"convolutions cannot have feature groups greater than 1 at this "
"point. No need to fold to backward filter.";
return std::nullopt;
}
CHECK_EQ(HloOpcode::kConvolution, conv->opcode());
const ConvolutionDimensionNumbers& conv_dnums =
conv->convolution_dimension_numbers();
auto input_batch_dim = conv_dnums.input_batch_dimension();
auto input_feature_dim = conv_dnums.input_feature_dimension();
auto input_spatial_dims = conv_dnums.input_spatial_dimensions();
auto kernel_input_feature_dim = conv_dnums.kernel_input_feature_dimension();
auto kernel_output_feature_dim = conv_dnums.kernel_output_feature_dimension();
auto kernel_spatial_dims = conv_dnums.kernel_spatial_dimensions();
auto output_batch_dim = conv_dnums.output_batch_dimension();
auto output_feature_dim = conv_dnums.output_feature_dimension();
auto output_spatial_dims = conv_dnums.output_spatial_dimensions();
for (const WindowDimension& window_dim : conv->window().dimensions()) {
if (window_dim.stride() != 1) {
VLOG(1) << "Forward convolution's window "
<< conv->window().ShortDebugString()
<< " should have stride of 1.";
return std::nullopt;
}
if (window_dim.base_dilation() != 1) {
VLOG(1) << "Forward convolution's window "
<< conv->window().ShortDebugString()
<< " should have no base (LHS) dilation.";
return std::nullopt;
}
if (window_dim.padding_low() < 0) {
VLOG(1) << "Padding low should be non-negative.";
return std::nullopt;
}
if (window_dim.window_reversal()) {
VLOG(1) << "Window reversal field not supported";
return std::nullopt;
}
}
int small_kernel_dimension_num = 0;
for (int i = 0; i < kernel_spatial_dims.size(); ++i) {
if (conv->operand(1)->shape().dimensions(kernel_spatial_dims[i]) <=
conv->shape().dimensions(output_spatial_dims[i])) {
small_kernel_dimension_num += 1;
}
}
if ((kernel_spatial_dims.empty() || small_kernel_dimension_num > 1 ||
(!MaybeConv1dToConv2d(conv) && small_kernel_dimension_num == 1)) &&
!window_util::HasWindowDilation(conv->window())) {
VLOG(1) << conv->ToString()
<< " is a regular forward convolution. No need "
"to fold it to a backward filter convolution....";
return std::nullopt;
}
Window backward_conv_window;
for (int i = 0; i < input_spatial_dims.size(); ++i) {
WindowDimension* dim = backward_conv_window.add_dimensions();
int64_t filter_size = conv->shape().dimensions(output_spatial_dims[i]);
dim->set_size(filter_size);
dim->set_stride(conv->window().dimensions(i).window_dilation());
dim->set_padding_low(conv->window().dimensions(i).padding_low());
dim->set_base_dilation(1);
dim->set_window_dilation(1);
int64_t input_size =
conv->operand(0)->shape().dimensions(input_spatial_dims[i]);
int64_t output_size = conv->window().dimensions(i).size();
int64_t padded_input_size = filter_size + (output_size - 1) * dim->stride();
int64_t min_padding_high =
padded_input_size - input_size - dim->padding_low();
int64_t max_padding_high = min_padding_high + dim->stride() - 1;
CHECK_GE(dim->padding_low(), 0);
if (dim->padding_low() >= min_padding_high &&
dim->padding_low() <= max_padding_high) {
dim->set_padding_high(dim->padding_low());
} else {
if (dim->padding_low() < min_padding_high) {
dim->set_padding_high(min_padding_high);
} else {
dim->set_padding_high(max_padding_high);
}
}
if (dim->padding_high() < 0) {
LOG(WARNING)
<< "Fusing this pattern to backward filter convolution would cause "
"negative padding ("
<< dim->padding_high()
<< ") on right/bottom of the weight gradients, which is not "
"supported by GpuConvPaddingLegalization (b/32744257). "
"Falling back to "
"unfused convolution for instruction: "
<< conv->ToString();
return std::nullopt;
}
}
ConvolutionDimensionNumbers backward_conv_dnums;
backward_conv_dnums.set_input_batch_dimension(input_feature_dim);
backward_conv_dnums.set_input_feature_dimension(input_batch_dim);
for (int i = 0; i < input_spatial_dims.size(); ++i) {
backward_conv_dnums.add_input_spatial_dimensions(input_spatial_dims[i]);
}
backward_conv_dnums.set_output_batch_dimension(kernel_input_feature_dim);
backward_conv_dnums.set_output_feature_dimension(kernel_output_feature_dim);
for (int i = 0; i < kernel_spatial_dims.size(); ++i) {
backward_conv_dnums.add_output_spatial_dimensions(kernel_spatial_dims[i]);
}
backward_conv_dnums.set_kernel_input_feature_dimension(output_batch_dim);
backward_conv_dnums.set_kernel_output_feature_dimension(output_feature_dim);
for (int i = 0; i < output_spatial_dims.size(); ++i) {
backward_conv_dnums.add_kernel_spatial_dimensions(output_spatial_dims[i]);
}
HloInstruction* lhs = conv->mutable_operand(0);
return std::make_tuple(backward_conv_window, backward_conv_dnums, lhs);
}
ConvolutionMatch MatchBackwardInput(HloInstruction* conv) {
VLOG(2) << "Trying to match convolution backward input.";
if (conv->feature_group_count() > 1) {
return std::nullopt;
}
CHECK_EQ(HloOpcode::kConvolution, conv->opcode());
HloInstruction* reverse_filter = conv->mutable_operand(1);
ConvolutionDimensionNumbers dnums = conv->convolution_dimension_numbers();
auto kernel_out_feature_dim = dnums.kernel_output_feature_dimension();
auto kernel_out_features =
reverse_filter->shape().dimensions(kernel_out_feature_dim);
if (conv->feature_group_count() > 1 &&
kernel_out_features == conv->feature_group_count()) {
return std::nullopt;
}
bool is_reversed_filter =
reverse_filter->opcode() == HloOpcode::kReverse &&
absl::c_is_permutation(dnums.kernel_spatial_dimensions(),
reverse_filter->dimensions());
bool is_reversed_conv1d_filter =
MaybeConv1dToConv2d(conv) &&
reverse_filter->operand(0)->opcode() == HloOpcode::kReverse;
bool is_1x1_filter =
absl::c_all_of(conv->window().dimensions(),
[](const WindowDimension& d) { return d.size() == 1; });
if (!is_reversed_filter && !is_reversed_conv1d_filter &&
!(window_util::HasBaseDilation(conv->window()) &&
(reverse_filter->IsConstant() || is_1x1_filter))) {
VLOG(1) << "Can't match to backwards convolution. Either filter is not "
"kReverse, or it's not a base-dilated conv with a 1x1 or "
"constant filter.";
return std::nullopt;
}
for (const WindowDimension& window_dim : conv->window().dimensions()) {
if (window_dim.stride() != 1) {
VLOG(1) << "Forward convolution's window "
<< conv->window().ShortDebugString()
<< " should have stride of 1.";
return std::nullopt;
}
if (window_dim.window_dilation() != 1) {
VLOG(1) << "Forward convolution's window "
<< conv->window().ShortDebugString()
<< " should have no window dilation.";
return std::nullopt;
}
if (window_dim.window_reversal()) {
VLOG(1) << "Window reversal field not supported";
return std::nullopt;
}
}
const auto& input_spatial_dims = dnums.input_spatial_dimensions();
const auto& output_spatial_dims = dnums.output_spatial_dimensions();
CHECK_EQ(conv->window().dimensions().size(), input_spatial_dims.size());
CHECK_EQ(output_spatial_dims.size(), input_spatial_dims.size());
const Window& old_window = conv->window();
Window new_window = old_window;
for (size_t i = 0; i < input_spatial_dims.size(); ++i) {
auto dim = new_window.mutable_dimensions(i);
dim->set_stride(old_window.dimensions(i).base_dilation());
dim->set_base_dilation(1);
auto kernel_size = old_window.dimensions(i).size();
auto backward_padding_low =
kernel_size - 1 - old_window.dimensions(i).padding_low();
if (backward_padding_low < 0) {
LOG(WARNING)
<< "The low padding of the backward convolution would be negative ("
<< backward_padding_low
<< "), which isn't supported by GpuConvPaddingLegalization "
"for now (b/32744257).";
return std::nullopt;
}
dim->set_padding_low(backward_padding_low);
auto unpadded_input_size = conv->shape().dimensions(output_spatial_dims[i]);
auto output_size =
conv->operand(0)->shape().dimensions(input_spatial_dims[i]);
auto padded_input_size = kernel_size + dim->stride() * (output_size - 1);
auto total_pad_size = padded_input_size - unpadded_input_size;
auto min_padding_high = total_pad_size - backward_padding_low;
auto max_padding_high = min_padding_high + dim->stride() - 1;
if (backward_padding_low >= min_padding_high &&
backward_padding_low <= max_padding_high) {
dim->set_padding_high(backward_padding_low);
} else {
if (backward_padding_low < min_padding_high) {
dim->set_padding_high(min_padding_high);
} else {
dim->set_padding_high(max_padding_high);
}
}
if (dim->padding_high() < 0) {
LOG(WARNING) << "Fusing this pattern to backward convolution would cause "
"negative padding ("
<< dim->padding_high()
<< ") on right/bottom of the activations, which is not "
"supported by GpuConvPaddingLegalization (b/32744257). "
"Falling back to unfused convolution for instruction: "
<< conv->ToString();
return std::nullopt;
}
}
auto conv_dnums = conv->convolution_dimension_numbers();
dnums.set_kernel_input_feature_dimension(
conv_dnums.kernel_output_feature_dimension());
dnums.set_kernel_output_feature_dimension(
conv_dnums.kernel_input_feature_dimension());
for (int i = 0; i < input_spatial_dims.size(); ++i) {
dnums.set_input_spatial_dimensions(i,
conv_dnums.output_spatial_dimensions(i));
dnums.set_output_spatial_dimensions(i,
conv_dnums.input_spatial_dimensions(i));
}
dnums.set_input_feature_dimension(conv_dnums.output_feature_dimension());
dnums.set_input_batch_dimension(conv_dnums.output_batch_dimension());
dnums.set_output_feature_dimension(conv_dnums.input_feature_dimension());
dnums.set_output_batch_dimension(conv_dnums.input_batch_dimension());
if (reverse_filter->opcode() != HloOpcode::kReverse &&
reverse_filter->IsConstant()) {
HloComputation* c = conv->parent();
reverse_filter = c->AddInstruction(
HloInstruction::CreateReverse(reverse_filter->shape(), reverse_filter,
dnums.kernel_spatial_dimensions()));
reverse_filter = c->AddInstruction(
HloInstruction::CreateReverse(reverse_filter->shape(), reverse_filter,
dnums.kernel_spatial_dimensions()));
TF_CHECK_OK(conv->ReplaceOperandWith(1, reverse_filter));
}
HloInstruction* rhs = reverse_filter;
if (rhs->opcode() == HloOpcode::kReverse) {
rhs = rhs->mutable_operand(0);
} else if (is_reversed_conv1d_filter) {
auto src = rhs->mutable_operand(0)->mutable_operand(0);
rhs = conv->parent()->AddInstruction(
HloInstruction::CreateReshape(rhs->shape(), src));
}
if (conv->feature_group_count() == 1) {
return std::make_tuple(new_window, dnums, rhs);
}
int64_t input_feature_dimension = dnums.kernel_input_feature_dimension();
int64_t output_feature_dimension = dnums.kernel_output_feature_dimension();
if (std::abs(input_feature_dimension - output_feature_dimension) != 1) {
return std::nullopt;
}
int64_t input_features = rhs->shape().dimensions(input_feature_dimension);
int64_t output_features = rhs->shape().dimensions(output_feature_dimension);
std::vector<int64_t> reshape_dims = SpanToVector(rhs->shape().dimensions());
auto num_groups = conv->feature_group_count();
CHECK_EQ(input_features % num_groups, 0)
<< "Input feature count should be an exact multiple of feature group "
"count";
reshape_dims[input_feature_dimension] =
reshape_dims[input_feature_dimension] / num_groups;
reshape_dims.insert(reshape_dims.begin() + input_feature_dimension,
num_groups);
HloComputation* c = conv->parent();
rhs = c->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(rhs->shape().element_type(), reshape_dims), rhs));
std::vector<int64_t> transpose_dims(rhs->shape().dimensions_size());
std::iota(transpose_dims.begin(), transpose_dims.end(), 0);
transpose_dims.erase(transpose_dims.begin() + input_feature_dimension);
transpose_dims.insert(transpose_dims.begin() + output_feature_dimension,
input_feature_dimension);
std::vector<int64_t> transpose_reshape_dims =
SpanToVector(rhs->shape().dimensions());
transpose_reshape_dims.erase(transpose_reshape_dims.begin() +
input_feature_dimension);
transpose_reshape_dims.insert(
transpose_reshape_dims.begin() + output_feature_dimension, num_groups);
rhs = c->AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(rhs->shape().element_type(), transpose_reshape_dims),
rhs, transpose_dims));
Shape new_shape = rhs->shape();
new_shape.DeleteDimension(output_feature_dimension);
new_shape.set_dimensions(output_feature_dimension,
output_features * num_groups);
rhs = c->AddInstruction(HloInstruction::CreateReshape(new_shape, rhs));
return std::make_tuple(new_window, dnums, rhs);
}
HloInstruction* CreateGpuConv(absl::string_view call_target, const Shape& shape,
HloInstruction* lhs, HloInstruction* rhs,
const Window& window,
const ConvolutionDimensionNumbers& dnums,
int64_t feature_group_count,
const PrecisionConfig& precision_config,
const OpMetadata& metadata) {
HloComputation* computation = lhs->parent(); | #include "xla/service/gpu/gpu_conv_rewriter.h"
#include <optional>
#include <string>
#include "absl/log/check.h"
#include "absl/strings/str_format.h"
#include "xla/array4d.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/literal_util.h"
#include "xla/protobuf_util.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/service/shape_inference.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
class GpuConvRewriterTest : public HloTestBase {
public:
GpuConvRewriterTest()
: HloTestBase(true,
false) {
for (int i = 0; i < 2; ++i) {
WindowDimension* window_dim = default_conv_window_.add_dimensions();
window_dim->set_size(1);
window_dim->set_stride(1);
window_dim->set_padding_low(0);
window_dim->set_padding_high(0);
window_dim->set_window_dilation(1);
window_dim->set_base_dilation(1);
}
tf_default_dnums_for_backward_filter_.set_input_batch_dimension(3);
tf_default_dnums_for_backward_filter_.set_input_feature_dimension(0);
tf_default_dnums_for_backward_filter_.add_input_spatial_dimensions(1);
tf_default_dnums_for_backward_filter_.add_input_spatial_dimensions(2);
tf_default_dnums_for_backward_filter_.set_kernel_input_feature_dimension(0);
tf_default_dnums_for_backward_filter_.set_kernel_output_feature_dimension(
3);
tf_default_dnums_for_backward_filter_.add_kernel_spatial_dimensions(1);
tf_default_dnums_for_backward_filter_.add_kernel_spatial_dimensions(2);
tf_default_dnums_for_backward_filter_.add_output_spatial_dimensions(0);
tf_default_dnums_for_backward_filter_.add_output_spatial_dimensions(1);
tf_default_dnums_for_backward_filter_.set_output_batch_dimension(2);
tf_default_dnums_for_backward_filter_.set_output_feature_dimension(3);
tf_default_dnums_for_backward_input_.set_input_batch_dimension(0);
tf_default_dnums_for_backward_input_.set_output_batch_dimension(0);
tf_default_dnums_for_backward_input_.set_input_feature_dimension(3);
tf_default_dnums_for_backward_input_.set_output_feature_dimension(3);
tf_default_dnums_for_backward_input_.add_input_spatial_dimensions(1);
tf_default_dnums_for_backward_input_.add_output_spatial_dimensions(1);
tf_default_dnums_for_backward_input_.add_input_spatial_dimensions(2);
tf_default_dnums_for_backward_input_.add_output_spatial_dimensions(2);
tf_default_dnums_for_backward_input_.set_kernel_input_feature_dimension(3);
tf_default_dnums_for_backward_input_.set_kernel_output_feature_dimension(2);
tf_default_dnums_for_backward_input_.add_kernel_spatial_dimensions(0);
tf_default_dnums_for_backward_input_.add_kernel_spatial_dimensions(1);
}
protected:
const se::GpuComputeCapability& GetComputeCapability() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.gpu_compute_capability();
}
bool RunPass(HloModule* module) {
return GpuConvRewriter(GetComputeCapability()).Run(module).value();
}
Window default_conv_window_;
ConvolutionDimensionNumbers tf_default_dnums_for_backward_filter_;
ConvolutionDimensionNumbers tf_default_dnums_for_backward_input_;
};
TEST_F(GpuConvRewriterTest, BackwardFilterConvolve) {
HloComputation::Builder builder(TestName());
HloInstruction* activations =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {1, 1, 3, 1}), "activations"));
HloInstruction* gradients =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {1, 1, 2, 1}), "gradients"));
Window conv_window = default_conv_window_;
conv_window.mutable_dimensions(1)->set_size(2);
conv_window.mutable_dimensions(1)->set_window_dilation(2);
auto* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeInference::InferConvolveShape(
activations->shape(), gradients->shape(), 1,
1, conv_window,
tf_default_dnums_for_backward_filter_,
std::nullopt)
.value(),
activations, gradients, 1,
1, conv_window,
tf_default_dnums_for_backward_filter_, DefaultPrecisionConfig(2)));
OpMetadata metadata;
metadata.set_op_name("foo");
conv->set_metadata(metadata);
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
ASSERT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardFilterCallTarget}), 0)));
const auto& md_after_opt =
entry_computation->root_instruction()->operand(0)->metadata();
EXPECT_TRUE(protobuf_util::ProtobufEquals(md_after_opt, metadata))
<< md_after_opt.DebugString() << " vs " << metadata.DebugString();
}
TEST_F(GpuConvRewriterTest,
BackwardFilterConvolveEquivalentToForwardConvolution) {
HloComputation::Builder builder(TestName());
HloInstruction* activations =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {1, 1, 3, 1}), "activations"));
HloInstruction* gradients =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {1, 1, 3, 1}), "gradients"));
Window conv_window = default_conv_window_;
conv_window.mutable_dimensions(1)->set_size(3);
builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeInference::InferConvolveShape(
activations->shape(), gradients->shape(), 1,
1, conv_window,
tf_default_dnums_for_backward_filter_,
std::nullopt)
.value(),
activations, gradients, 1,
1, conv_window,
tf_default_dnums_for_backward_filter_, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
EXPECT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvForwardCallTarget}), 0)));
}
TEST_F(GpuConvRewriterTest, BackwardFilterConvolveWithPaddedActivations) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* activations =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {20, 35, 35, 32}), "activations"));
HloInstruction* gradients =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {20, 35, 35, 32}), "gradients"));
Window conv_window = default_conv_window_;
for (int i = 0; i < 2; ++i) {
conv_window.mutable_dimensions(i)->set_size(35);
conv_window.mutable_dimensions(i)->set_padding_low(1);
conv_window.mutable_dimensions(i)->set_padding_high(1);
}
builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeUtil::MakeShape(F32, {32, 3, 3, 32}), activations, gradients,
1, 1, conv_window,
tf_default_dnums_for_backward_filter_, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
EXPECT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardFilterCallTarget}), 0)));
}
TEST_F(GpuConvRewriterTest, BackwardFilterConvolveWithPaddedGradients) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* activations =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {20, 10, 10, 192}), "activations"));
HloInstruction* gradients =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {20, 4, 4, 320}), "gradients"));
Window conv_window = default_conv_window_;
for (int i = 0; i < 2; ++i) {
conv_window.mutable_dimensions(i)->set_size(4);
conv_window.mutable_dimensions(i)->set_padding_high(-1);
conv_window.mutable_dimensions(i)->set_window_dilation(2);
}
builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeUtil::MakeShape(F32, {320, 3, 3, 192}), activations, gradients,
1, 1, conv_window,
tf_default_dnums_for_backward_filter_, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
EXPECT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardFilterCallTarget}), 0)));
}
TEST_F(GpuConvRewriterTest, BackwardFilterConvolveWithUnevenPadding) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* activations =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {20, 35, 35, 32}), "activations"));
HloInstruction* gradients =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {20, 35, 35, 32}), "gradients"));
Window conv_window = default_conv_window_;
for (int i = 0; i < 2; ++i) {
conv_window.mutable_dimensions(i)->set_size(35);
conv_window.mutable_dimensions(i)->set_padding_high(1);
}
builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeUtil::MakeShape(F32, {32, 2, 2, 32}), activations, gradients,
1, 1, conv_window,
tf_default_dnums_for_backward_filter_, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
EXPECT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardFilterCallTarget}), 0)));
}
TEST_F(GpuConvRewriterTest, BackwardInputConvolveEvenPadding) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* output =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {4, 5, 16, 16}), "output"));
HloInstruction* kernel =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {5, 3, 7, 7}), "kernel"));
HloInstruction* reverse_kernel = builder.AddInstruction(
HloInstruction::CreateReverse(kernel->shape(), kernel, {2, 3}));
Window conv_window = default_conv_window_;
for (int i = 0; i < 2; ++i) {
conv_window.mutable_dimensions(i)->set_size(7);
conv_window.mutable_dimensions(i)->set_padding_low(3);
conv_window.mutable_dimensions(i)->set_padding_high(3);
}
ConvolutionDimensionNumbers conv_dnums;
conv_dnums.set_input_batch_dimension(0);
conv_dnums.set_output_batch_dimension(0);
conv_dnums.set_input_feature_dimension(1);
conv_dnums.set_output_feature_dimension(1);
conv_dnums.add_input_spatial_dimensions(2);
conv_dnums.add_output_spatial_dimensions(2);
conv_dnums.add_input_spatial_dimensions(3);
conv_dnums.add_output_spatial_dimensions(3);
conv_dnums.set_kernel_input_feature_dimension(0);
conv_dnums.set_kernel_output_feature_dimension(1);
conv_dnums.add_kernel_spatial_dimensions(2);
conv_dnums.add_kernel_spatial_dimensions(3);
HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeUtil::MakeShape(F32, {4, 3, 16, 16}), output,
reverse_kernel, 1,
1, conv_window, conv_dnums,
DefaultPrecisionConfig(2)));
CHECK(ShapeUtil::Compatible(
conv->shape(),
ShapeInference::InferConvolveShape(
output->shape(), reverse_kernel->shape(),
1, 1, conv_window,
conv_dnums, std::nullopt)
.value()));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
ASSERT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardInputCallTarget}), 0)));
const HloInstruction* custom_call =
entry_computation->root_instruction()->operand(0);
for (int i = 0; i < 2; ++i) {
const WindowDimension& window_dim = custom_call->window().dimensions(i);
EXPECT_EQ(3, window_dim.padding_low());
EXPECT_EQ(3, window_dim.padding_high());
EXPECT_EQ(1, window_dim.stride());
EXPECT_EQ(1, window_dim.base_dilation());
}
}
TEST_F(GpuConvRewriterTest, BackwardInputConvolve1x1Filter) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* output =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {1, 1, 3, 1}), "output"));
HloInstruction* kernel =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {1, 1, 1, 1}), "kernel"));
Window conv_window = default_conv_window_;
conv_window.mutable_dimensions(1)->set_base_dilation(2);
builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeInference::InferConvolveShape(
output->shape(), kernel->shape(),
1,
1, conv_window,
tf_default_dnums_for_backward_input_,
std::nullopt)
.value(),
output, kernel, 1,
1, conv_window,
tf_default_dnums_for_backward_input_, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
EXPECT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardInputCallTarget}), 0)));
}
TEST_F(GpuConvRewriterTest,
BackwardInputConvolve1x1FilterEquivalentToForwardConvolve) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* output =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {1, 1, 3, 1}), "output"));
HloInstruction* kernel =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {1, 1, 1, 1}), "kernel"));
builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeInference::InferConvolveShape(
output->shape(), kernel->shape(), 1,
1, default_conv_window_,
tf_default_dnums_for_backward_input_,
std::nullopt)
.value(),
output, kernel, 1,
1, default_conv_window_,
tf_default_dnums_for_backward_input_, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
EXPECT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvForwardCallTarget}), 0)));
}
TEST_F(GpuConvRewriterTest, BackwardInputConvolveUnevenPaddingOnGradients) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* output =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {20, 4, 4, 320}), "output"));
HloInstruction* kernel =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {3, 3, 192, 320}), "kernel"));
HloInstruction* reverse_kernel = builder.AddInstruction(
HloInstruction::CreateReverse(kernel->shape(), kernel, {0, 1}));
Window conv_window = default_conv_window_;
for (int i = 0; i < 2; ++i) {
conv_window.mutable_dimensions(i)->set_size(3);
conv_window.mutable_dimensions(i)->set_padding_low(2);
conv_window.mutable_dimensions(i)->set_padding_high(3);
conv_window.mutable_dimensions(i)->set_base_dilation(2);
}
HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeUtil::MakeShape(F32, {20, 10, 10, 192}), output, reverse_kernel,
1, 1, conv_window,
tf_default_dnums_for_backward_input_, DefaultPrecisionConfig(2)));
CHECK(ShapeUtil::Compatible(
conv->shape(), ShapeInference::InferConvolveShape(
output->shape(), reverse_kernel->shape(),
1, 1,
conv_window, tf_default_dnums_for_backward_input_,
std::nullopt)
.value()));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
ASSERT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardInputCallTarget}), 0)));
const HloInstruction* custom_call =
entry_computation->root_instruction()->operand(0);
for (int i = 0; i < 2; ++i) {
const WindowDimension& window_dim = custom_call->window().dimensions(i);
EXPECT_EQ(0, window_dim.padding_low());
EXPECT_EQ(0, window_dim.padding_high());
EXPECT_EQ(2, window_dim.stride());
EXPECT_EQ(1, window_dim.base_dilation());
}
}
TEST_F(GpuConvRewriterTest, BackwardInputConvolveLowPaddingTooLarge) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* output =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {20, 4, 4, 320}), "output"));
HloInstruction* kernel =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {3, 3, 192, 320}), "kernel"));
HloInstruction* reverse_kernel = builder.AddInstruction(
HloInstruction::CreateReverse(kernel->shape(), kernel, {0, 1}));
Window conv_window = default_conv_window_;
for (int i = 0; i < 2; ++i) {
conv_window.mutable_dimensions(i)->set_size(3);
conv_window.mutable_dimensions(i)->set_padding_low(3);
conv_window.mutable_dimensions(i)->set_padding_high(2);
conv_window.mutable_dimensions(i)->set_base_dilation(2);
}
HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeUtil::MakeShape(F32, {20, 10, 10, 192}), output, reverse_kernel,
1, 1, conv_window,
tf_default_dnums_for_backward_input_, DefaultPrecisionConfig(2)));
CHECK(ShapeUtil::Compatible(
conv->shape(), ShapeInference::InferConvolveShape(
output->shape(), reverse_kernel->shape(),
1, 1,
conv_window, tf_default_dnums_for_backward_input_,
std::nullopt)
.value()));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
EXPECT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvForwardCallTarget}), 0)));
}
TEST_F(GpuConvRewriterTest, BackwardInputConvolveUnevenPaddingOnActivations) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* output =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {1, 1, 7, 1}), "output"));
HloInstruction* kernel =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {1, 3, 1, 1}), "kernel"));
HloInstruction* reverse_kernel = builder.AddInstruction(
HloInstruction::CreateReverse(kernel->shape(), kernel, {0, 1}));
Window conv_window = default_conv_window_;
WindowDimension* forward_conv_col_dim = conv_window.mutable_dimensions(1);
forward_conv_col_dim->set_size(3);
forward_conv_col_dim->set_padding_low(2);
forward_conv_col_dim->set_padding_high(1);
forward_conv_col_dim->set_base_dilation(2);
HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeUtil::MakeShape(F32, {1, 1, 14, 1}), output, reverse_kernel,
1, 1, conv_window,
tf_default_dnums_for_backward_input_, DefaultPrecisionConfig(2)));
CHECK(ShapeUtil::Compatible(
conv->shape(), ShapeInference::InferConvolveShape(
output->shape(), reverse_kernel->shape(),
1, 1,
conv_window, tf_default_dnums_for_backward_input_,
std::nullopt)
.value()));
auto module = CreateNewVerifiedModule();
const HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
ASSERT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardInputCallTarget}), 0)));
const WindowDimension& backward_conv_col_dim =
entry_computation->root_instruction()->operand(0)->window().dimensions(1);
EXPECT_EQ(0, backward_conv_col_dim.padding_low());
EXPECT_EQ(1, backward_conv_col_dim.padding_high());
}
TEST_F(GpuConvRewriterTest,
BackwardInputConvolveNegativePaddingHighOnActivations) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* output =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {1, 1, 3, 1}), "output"));
HloInstruction* kernel =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {1, 2, 1, 1}), "kernel"));
HloInstruction* reverse_kernel = builder.AddInstruction(
HloInstruction::CreateReverse(kernel->shape(), kernel, {0, 1}));
Window conv_window = default_conv_window_;
WindowDimension* forward_conv_col_dim = conv_window.mutable_dimensions(1);
forward_conv_col_dim->set_size(2);
forward_conv_col_dim->set_padding_high(2);
HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeUtil::MakeShape(F32, {1, 1, 4, 1}), output, reverse_kernel,
1, 1, conv_window,
tf_default_dnums_for_backward_input_, DefaultPrecisionConfig(2)));
CHECK(ShapeUtil::Compatible(
conv->shape(), ShapeInference::InferConvolveShape(
output->shape(), reverse_kernel->shape(),
1, 1,
conv_window, tf_default_dnums_for_backward_input_,
std::nullopt)
.value()));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
EXPECT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvForwardCallTarget}), 0)));
}
TEST_F(GpuConvRewriterTest, BackwardInputConvolveConstantFilter) {
Array4D<float> constant_arr(4, 4, 2, 2);
constant_arr.FillIota(0);
std::string constant_str =
LiteralUtil::CreateR4FromArray4D(constant_arr).ToStringWithoutShape();
const std::string module_str = absl::StrFormat(R"(
HloModule test
ENTRY entry_computation {
param0 = f32[128,2,16,16]{3,2,1,0} parameter(0)
constant = f32[4,4,2,2]{3,2,1,0} constant(%s)
ROOT convolution = f32[128,2,32,32]{3,2,1,0} convolution(param0, constant),
window={size=4x4 pad=2_2x2_2 lhs_dilate=2x2},
dim_labels=bf01_01oi->bf01, feature_group_count=1
})",
constant_str);
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
EXPECT_TRUE(RunPass(m.get()));
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardInputCallTarget},
m::Parameter(), m::Reverse(m::Constant())),
0)));
}
TEST_F(GpuConvRewriterTest, TestBackwardFilterPatternMatch) {
const std::string module_str = absl::StrFormat(R"(
HloModule Test
ENTRY Test {
input = f32[8,120,256,256] parameter(0)
filter = f32[8,120,256,256] parameter(1)
ROOT conv = f32[120,120,3,3] convolution(input, filter), window={size=256x256 pad=1_1x1_1}, dim_labels=fb01_io01->fb01
})");
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
EXPECT_TRUE(RunPass(m.get()));
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardFilterCallTarget},
m::Parameter(0), m::Parameter(1)),
0)));
}
TEST_F(GpuConvRewriterTest, TestBackwardFilterPatternNoMatch) {
const std::string module_str = absl::StrFormat(R"(
HloModule Test
ENTRY Test {
input = f32[8,128,2,32] parameter(0)
filter = f32[3,3,128,128] parameter(1)
ROOT conv = f32[8,128,2,32] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01
})");
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
EXPECT_TRUE(RunPass(m.get()));
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvForwardCallTarget}, m::Parameter(0),
m::Parameter(1)),
0)));
}
TEST_F(GpuConvRewriterTest, TestConv1dBackwardFilterPatternMatch) {
const std::string module_str = absl::StrFormat(R"(
HloModule Test
ENTRY Test {
input = f32[8,256,128] parameter(0)
filter = f32[8,254,128] parameter(1)
reshape.1 = f32[8,1,256,128] reshape(input)
reshape.2 = f32[8,1,254,128] reshape(filter)
ROOT conv = f32[1,3,128,128] convolution(reshape.1, reshape.2), window={size=1x254}, dim_labels=f01b_i01o->01bf
})");
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
EXPECT_TRUE(RunPass(m.get()));
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardFilterCallTarget},
m::Reshape(), m::Reshape()),
0)));
}
TEST_F(GpuConvRewriterTest, Tes |
2,090 | cpp | tensorflow/tensorflow | hlo_fusion_analysis | third_party/xla/xla/service/gpu/hlo_fusion_analysis.cc | third_party/xla/xla/service/gpu/hlo_fusion_analysis_test.cc | #ifndef XLA_SERVICE_GPU_HLO_FUSION_ANALYSIS_H_
#define XLA_SERVICE_GPU_HLO_FUSION_ANALYSIS_H_
#include <cstdint>
#include <memory>
#include <optional>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
class HloFusionAnalysis {
public:
enum class EmitterFusionKind {
kLoop,
kCustomFusion,
kTriton,
kReduction,
kTranspose,
kConcatenate,
kInputSlices,
kScatter,
kCuDnn,
};
struct InputOutputInfo {
int smallest_input_dtype_bits;
int smallest_output_dtype_bits;
};
static HloFusionAnalysis Create(FusionBackendConfig backend_config,
std::unique_ptr<HloFusionAdaptor> fusion,
const se::DeviceDescription* device_info);
static HloFusionAnalysis Create(const HloFusionInstruction* fusion,
const se::DeviceDescription* device_info);
const HloFusionAdaptor& fusion() const { return *fusion_; }
const absl::InlinedVector<HloInstructionAdaptor, 2>& fusion_roots() const {
return fusion_roots_;
}
HloInstructionAdaptor fusion_root(int64_t i) const {
return fusion_roots_[i];
}
int64_t fusion_root_count() const { return fusion_roots_.size(); }
const absl::InlinedVector<HloInstructionAdaptor, 2>& fusion_heroes() const {
return fusion_heroes_;
}
HloInstructionAdaptor fusion_hero(int64_t i) const {
return fusion_heroes_[i];
}
int64_t fusion_hero_count() const { return fusion_heroes_.size(); }
EmitterFusionKind GetEmitterFusionKind() const;
const HloInstruction* FindHeroReduction() const;
const se::DeviceDescription& device_info() const { return *device_info_; }
const FusionBackendConfig& fusion_backend_config() const {
return fusion_backend_config_;
}
const TransposeDescription& tiled_transpose() const {
CHECK(tiled_transpose_.has_value());
return *tiled_transpose_;
}
const InputOutputInfo& input_output_info() const {
return input_output_info_;
}
private:
HloFusionAnalysis(FusionBackendConfig fusion_backend_config,
std::unique_ptr<HloFusionAdaptor> fusion,
absl::InlinedVector<HloInstructionAdaptor, 2> fusion_roots,
absl::InlinedVector<HloInstructionAdaptor, 2> fusion_heroes,
const se::DeviceDescription* device_info,
std::optional<TransposeDescription> tiled_transpose,
InputOutputInfo input_output_info);
bool HasConsistentTransposeHeros() const;
FusionBackendConfig fusion_backend_config_;
std::unique_ptr<HloFusionAdaptor> fusion_;
absl::InlinedVector<HloInstructionAdaptor, 2> fusion_roots_;
absl::InlinedVector<HloInstructionAdaptor, 2> fusion_heroes_;
const se::DeviceDescription* device_info_;
std::optional<TransposeDescription> tiled_transpose_;
InputOutputInfo input_output_info_;
};
HloFusionAnalysis AnalyzeProducerConsumerFusion(
const HloInstruction& producer, const HloInstruction& consumer,
const se::DeviceDescription& device_info);
HloFusionAnalysis AnalyzeFusion(const HloInstruction& consumer,
const se::DeviceDescription& device_info);
}
}
#endif
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include <algorithm>
#include <limits>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/reduction_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
namespace {
bool IsInputFusibleNonStridedSlices(
const absl::Span<const HloInstructionAdaptor> fusion_roots) {
return absl::c_all_of(fusion_roots, [&](const HloInstructionAdaptor& root) {
return IsSliceWithUnitStrides(&root.instruction());
});
}
bool AllSliceInputsAreCompatible(
const absl::Span<const HloInstructionAdaptor> fusion_roots) {
const Shape& first_slice_operand_shape =
fusion_roots[0].GetOperand(0).shape();
return absl::c_all_of(fusion_roots, [&](const HloInstructionAdaptor& slice) {
return ShapeUtil::EqualIgnoringElementType(slice.GetOperand(0).shape(),
first_slice_operand_shape);
});
}
std::optional<TransposeDescription> FindConsistentTransposeHero(
const absl::InlinedVector<HloInstructionAdaptor, 2>& hlo_roots,
const absl::InlinedVector<HloInstructionAdaptor, 2>& heroes) {
std::optional<TransposeDescription> tiled_transpose_hero;
std::vector<const HloInstruction*> non_transpose_roots;
for (auto [root, hero] : llvm::zip(hlo_roots, heroes)) {
if (auto tr = GetDescriptionForTiledTransposeEmitter(root.instruction(),
hero.instruction())) {
if (!tiled_transpose_hero) {
tiled_transpose_hero = tr;
} else if (!tiled_transpose_hero->IsEquivalent(*tr)) {
return std::nullopt;
}
} else {
non_transpose_roots.push_back(&root.instruction());
}
}
if (!tiled_transpose_hero) return std::nullopt;
for (auto* root : non_transpose_roots) {
if (!ShapeUtil::IsReshapeOrTransposeBitcast(
root->shape(), tiled_transpose_hero->input_shape(),
true)) {
return std::nullopt;
}
}
return tiled_transpose_hero;
}
const Shape& GetShape(const HloInstructionAdaptor& adaptor) {
return adaptor.shape();
}
const Shape& GetShape(const HloInstruction* instruction) {
return instruction->shape();
}
template <typename Container>
int SmallestBitWidth(const Container& args) {
int bits = std::numeric_limits<int>::max();
for (const auto& operand : args) {
const Shape& shape = GetShape(operand);
if (!shape.IsArray()) continue;
bits = std::min(bits, shape.element_type() == PRED
? 8
: primitive_util::BitWidth(shape.element_type()));
}
return bits;
}
}
HloFusionAnalysis::HloFusionAnalysis(
FusionBackendConfig fusion_backend_config,
std::unique_ptr<HloFusionAdaptor> fusion,
absl::InlinedVector<HloInstructionAdaptor, 2> fusion_roots,
absl::InlinedVector<HloInstructionAdaptor, 2> fusion_heroes,
const se::DeviceDescription* device_info,
std::optional<TransposeDescription> tiled_transpose,
HloFusionAnalysis::InputOutputInfo input_output_info)
: fusion_backend_config_(std::move(fusion_backend_config)),
fusion_(std::move(fusion)),
fusion_roots_(std::move(fusion_roots)),
fusion_heroes_(std::move(fusion_heroes)),
device_info_(device_info),
tiled_transpose_(tiled_transpose),
input_output_info_(std::move(input_output_info)) {}
HloFusionAnalysis HloFusionAnalysis::Create(
FusionBackendConfig backend_config,
std::unique_ptr<HloFusionAdaptor> fusion,
const se::DeviceDescription* device_info) {
absl::InlinedVector<HloInstructionAdaptor, 2> roots = fusion->GetRoots();
absl::InlinedVector<HloInstructionAdaptor, 2> heroes;
for (auto root : roots) {
heroes.push_back(FindNonTrivialHero(root));
}
InputOutputInfo input_output_info{
SmallestBitWidth(fusion->GetParameters()),
SmallestBitWidth(roots),
};
std::optional<TransposeDescription> tiled_transpose_hero =
FindConsistentTransposeHero(roots, heroes);
return HloFusionAnalysis(std::move(backend_config), std::move(fusion),
std::move(roots), std::move(heroes), device_info,
tiled_transpose_hero, std::move(input_output_info));
}
HloFusionAnalysis HloFusionAnalysis::Create(
const HloFusionInstruction* fusion,
const se::DeviceDescription* device_info) {
CHECK(device_info != nullptr);
FusionBackendConfig backend_config =
fusion->has_backend_config()
? fusion->backend_config<GpuBackendConfig>()->fusion_backend_config()
: FusionBackendConfig::default_instance();
return Create(std::move(backend_config),
HloFusionAdaptor::ForInstruction(fusion), device_info);
}
bool HloFusionAnalysis::HasConsistentTransposeHeros() const {
return tiled_transpose_.has_value();
}
static bool UseConcatenateFusion(
absl::Span<const HloInstructionAdaptor> roots,
absl::Span<const HloInstructionAdaptor> heroes) {
if (heroes.size() != 1) return false;
if (heroes.front().opcode() != HloOpcode::kConcatenate) return false;
if (roots.front().shape().IsTuple()) return false;
if (heroes.front().instruction().operand_count() > 4) return false;
return true;
}
HloFusionAnalysis::EmitterFusionKind HloFusionAnalysis::GetEmitterFusionKind()
const {
if (fusion_backend_config_.kind() == kCustomFusionKind) {
return EmitterFusionKind::kCustomFusion;
}
if (fusion_backend_config_.kind() == kTritonFusionKind ||
fusion_backend_config_.kind() == kTritonGemmFusionKind) {
return EmitterFusionKind::kTriton;
}
if (fusion_backend_config_.kind() == kCuDnnFusionKind) {
return EmitterFusionKind::kCuDnn;
}
if (input_output_info_.smallest_input_dtype_bits < 8 ||
input_output_info_.smallest_output_dtype_bits < 8) {
if (fusion_roots_.size() > 1 &&
IsInputFusibleNonStridedSlices(fusion_roots_) &&
AllSliceInputsAreCompatible(fusion_roots_)) {
return EmitterFusionKind::kInputSlices;
}
return EmitterFusionKind::kLoop;
}
std::optional<HloInstructionAdaptor> first_reduce_hero;
for (auto [root, hero] : llvm::zip(fusion_roots_, fusion_heroes_)) {
if (IsRealReductionHero(root.instruction(), hero.instruction())) {
first_reduce_hero = hero;
break;
}
}
if (first_reduce_hero.has_value()) {
bool valid_shapes = true;
Shape hero_operand_shape = first_reduce_hero->GetOperand(0).shape();
for (auto [root, hero] : llvm::zip(fusion_roots_, fusion_heroes_)) {
if (root == *first_reduce_hero) {
continue;
}
if (!IsRealReductionHero(root.instruction(), hero.instruction())) {
if (ShapeUtil::ElementsIn(root.shape()) !=
ShapeUtil::ElementsIn(hero_operand_shape)) {
valid_shapes = false;
break;
}
} else if (!AreReductionsMultiOutputFusionCompatible(
&hero.instruction(), &first_reduce_hero->instruction())) {
valid_shapes = false;
break;
}
}
if (valid_shapes) {
return EmitterFusionKind::kReduction;
}
}
if (HasConsistentTransposeHeros() && tiled_transpose_->permutation[2] != 2) {
return EmitterFusionKind::kTranspose;
}
if (fusion_roots_.size() > 1) {
if (IsInputFusibleNonStridedSlices(fusion_roots_) &&
AllSliceInputsAreCompatible(fusion_roots_)) {
return EmitterFusionKind::kInputSlices;
}
return EmitterFusionKind::kLoop;
}
if (fusion_roots_[0].opcode() == HloOpcode::kScatter) {
return EmitterFusionKind::kScatter;
}
if (UseConcatenateFusion(fusion_roots_, fusion_heroes_)) {
return EmitterFusionKind::kConcatenate;
}
return EmitterFusionKind::kLoop;
}
const HloInstruction* HloFusionAnalysis::FindHeroReduction() const {
if (GetEmitterFusionKind() != EmitterFusionKind::kReduction) {
return nullptr;
}
const auto& roots = fusion_roots();
CHECK(!roots.empty());
for (auto [root, hero] : llvm::zip(roots, fusion_heroes_)) {
if (IsRealReductionHero(root.instruction(), hero.instruction())) {
return &hero.instruction();
}
}
LOG(FATAL) << "Did not find a hero reduction";
}
HloFusionAnalysis AnalyzeProducerConsumerFusion(
const HloInstruction& producer, const HloInstruction& consumer,
const se::DeviceDescription& device_info) {
return HloFusionAnalysis::Create(
consumer.has_backend_config()
? consumer.backend_config<GpuBackendConfig>()->fusion_backend_config()
: producer.backend_config<GpuBackendConfig>()
->fusion_backend_config(),
HloFusionAdaptor::ForProducerConsumer(&producer, &consumer),
&device_info);
}
HloFusionAnalysis AnalyzeFusion(const HloInstruction& consumer,
const se::DeviceDescription& device_info) {
return HloFusionAnalysis::Create(
consumer.backend_config<GpuBackendConfig>()->fusion_backend_config(),
HloFusionAdaptor::ForInstruction(&consumer), &device_info);
}
}
} | #include "xla/service/gpu/hlo_fusion_analysis.h"
#include <gtest/gtest.h>
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_description.pb.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
class HloFusionAnalysisTest : public HloTestBase {};
TEST_F(HloFusionAnalysisTest, DoesNotPeekOutsideBoundary) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
ENTRY main {
%p0 = f32[1024] parameter(0)
%p1 = f32[] parameter(1)
%reduce = f32[] reduce(%p0, %p1), dimensions={0}, to_apply=add
ROOT %bitcast = s32[] bitcast(%reduce)
})"));
auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info);
EXPECT_EQ(analysis.GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kLoop);
auto analysis_fused =
AnalyzeProducerConsumerFusion(*root->operand(0), *root, device_info);
EXPECT_EQ(analysis_fused.GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kReduction);
}
TEST_F(HloFusionAnalysisTest, ReductionWithMultipleUsers) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fused_computation {
%p0 = f32[1024] parameter(0)
%p1 = f32[] parameter(1)
%reduce = f32[] reduce(%p0, %p1), dimensions={0}, to_apply=add
%negate = f32[] negate(%reduce)
%log = f32[] log(%reduce)
ROOT %tuple = (f32[], f32[]) tuple(%negate, %log)
}
ENTRY main {
%p0 = f32[1024] parameter(0)
%p1 = f32[] parameter(1)
ROOT %fusion = (f32[], f32[]) fusion(%p0, %p1), kind=kLoop, calls=fused_computation
})"));
auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto analysis = HloFusionAnalysis::Create(
FusionBackendConfig::default_instance(),
HloFusionAdaptor::ForInstruction(
module->entry_computation()->root_instruction()),
&device_info);
EXPECT_EQ(analysis.GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kReduction);
}
TEST_F(HloFusionAnalysisTest, ReductionEpilogueFusion) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fused_computation {
%p0 = f32[1024] parameter(0)
%p1 = f32[] parameter(1)
%reduce = f32[] reduce(%p0, %p1), dimensions={0}, to_apply=add
ROOT %negate = f32[] negate(%reduce)
}
ENTRY main {
%p0 = f32[1024] parameter(0)
%p1 = f32[] parameter(1)
ROOT %fusion = f32[] fusion(%p0, %p1), kind=kInput, calls=fused_computation
})"));
auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
auto analysis = HloFusionAnalysis::Create(
FusionBackendConfig::default_instance(),
HloFusionAdaptor::ForInstruction(root), &device_info);
EXPECT_EQ(analysis.GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kReduction);
}
TEST_F(HloFusionAnalysisTest, ReductionEpilogueFusionPartiallyFused) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
%p0 = f32[1024] parameter(0)
%p1 = f32[] parameter(1)
ROOT %reduce = f32[] reduce(%p0, %p1), dimensions={0}, to_apply=add
}
ENTRY main {
%p0 = f32[1024] parameter(0)
%p1 = f32[] parameter(1)
%fusion = f32[] fusion(%p0, %p1), kind=kInput, calls=fusion
ROOT %negate = f32[] negate(%fusion)
})"));
auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
auto analysis =
AnalyzeProducerConsumerFusion(*root->operand(0), *root, device_info);
EXPECT_EQ(analysis.GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kReduction);
}
TEST_F(HloFusionAnalysisTest, ReductionEpilogueFusionPartiallyFusedInConsumer) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
%p0 = f32[] parameter(0)
ROOT %negate = f32[] negate(%p0)
}
ENTRY main {
%p0 = f32[1024] parameter(0)
%p1 = f32[] parameter(1)
%reduce = f32[] reduce(%p0, %p1), dimensions={0}, to_apply=add
ROOT %fusion = f32[] fusion(%reduce), kind=kInput, calls=fusion
})"));
auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
auto analysis =
AnalyzeProducerConsumerFusion(*root->operand(0), *root, device_info);
EXPECT_EQ(analysis.GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kReduction);
}
TEST_F(HloFusionAnalysisTest, ReductionEpilogueFusionPartiallyFusedInBoth) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion.1 {
%p0 = f32[1024] parameter(0)
%p1 = f32[] parameter(1)
ROOT %reduce = f32[] reduce(%p0, %p1), dimensions={0}, to_apply=add
}
fusion.2 {
%p0 = f32[] parameter(0)
ROOT %negate = f32[] negate(%p0)
}
ENTRY main {
%p0 = f32[1024] parameter(0)
%p1 = f32[] parameter(1)
%fusion.1 = f32[] fusion(%p0, %p1), kind=kInput, calls=fusion.1
ROOT %fusion.2 = f32[] fusion(%fusion.1), kind=kInput, calls=fusion.2
})"));
auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
auto analysis =
AnalyzeProducerConsumerFusion(*root->operand(0), *root, device_info);
EXPECT_EQ(analysis.GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kReduction);
}
TEST_F(HloFusionAnalysisTest, ReduceMultiOutputFusionWithTransposeBitcast) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
%p0 = f32[1024, 512]{1,0} parameter(0)
%p1 = f32[] parameter(1)
%reduce = f32[1024]{0} reduce(%p0, %p1), dimensions={1}, to_apply=add
%bitcast = f32[512, 1024]{0,1} bitcast(%p0)
ROOT res = (f32[1024]{0}, f32[512, 1024]{0,1}) tuple(%reduce, %bitcast)
}
ENTRY main {
%p0 = f32[1024, 512]{1,0} parameter(0)
%p1 = f32[] parameter(1)
ROOT %fusion = (f32[1024]{0}, f32[512, 1024]{0,1}) fusion(%p0, %p1), kind=kInput, calls=fusion
})"));
auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info);
EXPECT_EQ(analysis.GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kReduction);
}
TEST_F(HloFusionAnalysisTest, InvalidReduceMultiOutputFusion) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
%p0 = f32[1024, 1024]{1,0} parameter(0)
%p1 = f32[] parameter(1)
%reduce = f32[1024]{0} reduce(%p0, %p1), dimensions={0}, to_apply=add
%reduce2 = f32[1024]{0} reduce(%p0, %p1), dimensions={1}, to_apply=add
ROOT res = (f32[1024]{0}, f32[1024]{0}) tuple(reduce, reduce2)
}
ENTRY main {
%p0 = f32[1024, 1024]{1,0} parameter(0)
%p1 = f32[] parameter(1)
ROOT %fusion = (f32[1024]{0}, f32[1024]{0}) fusion(%p0, %p1), kind=kInput, calls=fusion
})"));
auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info);
EXPECT_EQ(analysis.GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kLoop);
}
TEST_F(HloFusionAnalysisTest, InvalidDevice) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
ENTRY main {
%p0 = f32[1024,128] parameter(0)
%p1 = f32[] parameter(1)
%reduce = f32[128] reduce(%p0, %p1), dimensions={0}, to_apply=add
ROOT %bitcast = s32[128] bitcast(%reduce)
})"));
stream_executor::GpuDeviceInfoProto device_info_proto;
stream_executor::DeviceDescription device_info(device_info_proto);
auto* root = module->entry_computation()->root_instruction();
auto analysis_fused =
AnalyzeProducerConsumerFusion(*root->operand(0), *root, device_info);
EXPECT_EQ(analysis_fused.GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kReduction);
}
TEST_F(HloFusionAnalysisTest, ConcatFusion) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
fused_computation {
%p0 = f32[128] parameter(0)
%p1 = f32[128] parameter(1)
%add = f32[128] add(p0, p0)
%concat = f32[256] concatenate(%add, %p1), dimensions={0}
ROOT %negate = f32[256] negate(%concat)
}
ENTRY main {
%p0 = f32[128] parameter(0)
%p1 = f32[128] parameter(1)
ROOT %fusion = f32[256] fusion(%p0, %p1), kind=kInput, calls=fused_computation
})"));
auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
auto analysis = HloFusionAnalysis::Create(
FusionBackendConfig::default_instance(),
HloFusionAdaptor::ForInstruction(root), &device_info);
EXPECT_EQ(analysis.GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kConcatenate);
}
}
} |
2,091 | cpp | tensorflow/tensorflow | softmax_rewriter_triton | third_party/xla/xla/service/gpu/transforms/softmax_rewriter_triton.cc | third_party/xla/xla/service/gpu/transforms/softmax_rewriter_triton_test.cc | #ifndef XLA_SERVICE_GPU_SOFTMAX_REWRITER_TRITON_H_
#define XLA_SERVICE_GPU_SOFTMAX_REWRITER_TRITON_H_
#include <variant>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/service/instruction_fusion.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
struct DiamondChainDescriptor {
HloInstruction* root = nullptr;
HloInstruction* producer = nullptr;
};
using DiamondMatchingDecision = std::variant<FusionDecision, HloInstruction*>;
class SoftmaxRewriterTriton : public HloModulePass {
public:
explicit SoftmaxRewriterTriton(const se::DeviceDescription& device_info,
HloCostAnalysis::ShapeSizeFunction shape_size)
: device_info_(device_info), shape_size_(shape_size) {}
absl::string_view name() const override { return "triton-softmax-rewriter"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
absl::StatusOr<std::vector<DiamondChainDescriptor>>
FindAllFusibleDiamondChains(
HloModule& module,
const absl::flat_hash_set<absl::string_view>& execution_threads) const;
absl::Status FuseDiamondChain(const DiamondChainDescriptor& diamond_chain);
DiamondMatchingDecision MatchesTritonCompatibleClosedReductionDiamond(
HloInstruction* instr) const;
private:
const se::DeviceDescription& device_info_;
const HloCostAnalysis::ShapeSizeFunction shape_size_;
mlir::MLIRContext mlir_context_;
};
}
}
#endif
#include "xla/service/gpu/softmax_rewriter_triton.h"
#include <functional>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/layout_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/model/fusion_analysis_cache.h"
#include "xla/service/gpu/model/gpu_indexing_performance_model.h"
#include "xla/service/gpu/model/symbolic_tile_analysis.h"
#include "xla/service/gpu/model/tiled_hlo_computation.h"
#include "xla/service/gpu/triton_support.h"
#include "xla/service/instruction_fusion.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
using hlo_query::IsBroadcastOfParameter;
using hlo_query::IsBroadcastOfScalarConstant;
bool HasDefaultLayout(const Shape& shape) {
return shape.has_layout() &&
LayoutUtil::IsMonotonicWithDim0Major(shape.layout());
}
bool TrivialEdge(HloInstruction** producer, HloInstruction* consumer,
HloOpcode opcode, const se::GpuComputeCapability& gpu_version);
bool BitcastIsTilingNoop(HloInstruction* bitcast,
const se::GpuComputeCapability& gpu_version) {
CHECK_EQ(bitcast->opcode(), HloOpcode::kBitcast);
if (ShapeUtil::IsEffectiveScalar(bitcast->shape())) {
return true;
}
auto last_dimension = [](const HloInstruction* instr) {
return instr->shape().dimensions().back();
};
HloInstruction* reduce = nullptr;
TrivialEdge(&reduce, bitcast->mutable_operand(0), HloOpcode::kReduce,
gpu_version);
return (HasDefaultLayout(bitcast->shape()) &&
HasDefaultLayout(bitcast->operand(0)->shape()) &&
(reduce != nullptr ||
last_dimension(bitcast->operand(0)) == last_dimension(bitcast)));
}
inline bool HasOneUse(const HloInstruction* instr) {
return instr->user_count() == 1;
}
bool IsBatchOrReductionDimBroadcast(const HloInstruction& hlo) {
CHECK_EQ(hlo.opcode(), HloOpcode::kBroadcast)
<< "Expected broadcast " << hlo.ToShortString();
CHECK_EQ(hlo.operand(0)->opcode(), HloOpcode::kParameter)
<< "Expected parameter " << hlo.operand(0)->ToShortString();
const HloBroadcastInstruction* broadcast =
Cast<HloBroadcastInstruction>(&hlo);
const HloParameterInstruction* parameter =
Cast<HloParameterInstruction>(hlo.operand(0));
if (parameter->shape().dimensions_size() + 1 !=
broadcast->shape().dimensions_size()) {
return false;
}
bool preserve_first_dim = broadcast->dimensions().front() == 0;
bool preserve_last_dim = broadcast->dimensions().back() ==
broadcast->shape().dimensions_size() - 1;
return !(preserve_first_dim && preserve_last_dim);
}
bool IsBroadcastOfAScalar(const HloInstruction& hlo) {
CHECK_EQ(hlo.opcode(), HloOpcode::kBroadcast)
<< "Expected broadcast " << hlo.ToShortString();
return ShapeUtil::IsScalar(hlo.operand(0)->shape());
}
bool IsSingleRowParameterBroadcast(const HloInstruction& hlo) {
CHECK_EQ(hlo.opcode(), HloOpcode::kBroadcast)
<< "Expected broadcast " << hlo.ToShortString();
CHECK_EQ(hlo.operand(0)->opcode(), HloOpcode::kParameter)
<< "Expected parameter " << hlo.operand(0)->ToShortString();
const HloBroadcastInstruction* broadcast =
Cast<HloBroadcastInstruction>(&hlo);
const HloParameterInstruction* parameter =
Cast<HloParameterInstruction>(hlo.operand(0));
if (parameter->shape().dimensions_size() != 1) {
return false;
}
return broadcast->dimensions()[0] == broadcast->shape().dimensions_size() - 1;
}
bool IsSupportedBroadcastOfParameter(const HloInstruction& hlo) {
return IsBroadcastOfParameter(hlo) &&
(IsBatchOrReductionDimBroadcast(hlo) || IsBroadcastOfAScalar(hlo) ||
IsSingleRowParameterBroadcast(hlo));
}
HloInstruction* ChooseOperandForFusionProcessing(HloInstruction* instr) {
CHECK_GT(instr->operand_count(), 0);
CHECK_LE(instr->operand_count(), 2);
if (instr->operand_count() > 1 &&
(IsBroadcastOfScalarConstant(*instr->operand(0)) ||
IsSupportedBroadcastOfParameter(*instr->operand(0)))) {
return instr->mutable_operand(1);
}
return instr->mutable_operand(0);
}
bool IsTriviallyFusible(HloInstruction* instr,
const se::GpuComputeCapability& gpu_version,
int num_allowed_users = 1) {
if (instr->user_count() > num_allowed_users ||
!HasDefaultLayout(instr->shape())) {
return false;
}
if (instr->opcode() == HloOpcode::kBitcast &&
BitcastIsTilingNoop(instr, gpu_version)) {
return true;
}
if (instr->IsElementwise() && instr->operand_count() == 1) {
return static_cast<bool>(
legacy_triton::IsTritonSupportedInstruction(*instr, gpu_version));
}
if (instr->IsElementwiseBinary()) {
const HloInstruction* operand_0 = instr->operand(0);
const HloInstruction* operand_1 = instr->operand(1);
if (operand_0 == operand_1) {
return static_cast<bool>(
legacy_triton::IsTritonSupportedInstruction(*instr, gpu_version));
}
if ((IsBroadcastOfScalarConstant(*operand_0) ||
IsSupportedBroadcastOfParameter(*operand_0)) ^
(IsBroadcastOfScalarConstant(*operand_1) ||
IsSupportedBroadcastOfParameter(*operand_1))) {
return static_cast<bool>(
legacy_triton::IsTritonSupportedInstruction(*instr, gpu_version));
}
}
return false;
}
bool TrivialEdge(HloInstruction** producer, HloInstruction* consumer,
HloOpcode opcode,
const se::GpuComputeCapability& gpu_version) {
while (consumer->opcode() != opcode) {
if (IsTriviallyFusible(consumer, gpu_version)) {
consumer = ChooseOperandForFusionProcessing(consumer);
} else {
return false;
}
}
*producer = consumer;
return true;
}
bool IsTriviallyConnectedProducerOf(
HloInstruction* producer, HloInstruction* consumer,
const se::GpuComputeCapability& gpu_version) {
if (producer == consumer) {
return true;
}
HloInstruction* found_producer = consumer;
while (
TrivialEdge(&found_producer, consumer, producer->opcode(), gpu_version)) {
if (found_producer == producer) {
return true;
}
if (!IsTriviallyFusible(found_producer, gpu_version)) {
return false;
}
consumer = found_producer->mutable_operand(0);
}
return false;
}
HloInstruction* FindFirstNonFusibleDiamondProducer(
HloInstruction* diamond_producer,
const se::GpuComputeCapability& gpu_version) {
if (IsTriviallyFusible(diamond_producer, gpu_version,
2)) {
diamond_producer = ChooseOperandForFusionProcessing(diamond_producer);
while (IsTriviallyFusible(diamond_producer, gpu_version)) {
diamond_producer = ChooseOperandForFusionProcessing(diamond_producer);
}
}
return diamond_producer;
}
absl::StatusOr<HloFusionInstruction*> MakeFusionForDiamondChain(
const DiamondChainDescriptor& diamond_chain) {
auto [root, producer] = diamond_chain;
std::string suggested_name = "triton_softmax";
HloComputation::Builder builder(absl::StrCat(suggested_name, "_computation"));
absl::flat_hash_map<const HloInstruction*, HloInstruction*>
old_to_new_mapping;
int param = 0;
old_to_new_mapping[producer] =
builder.AddInstruction(HloInstruction::CreateParameter(
param, producer->shape(), absl::StrCat("parameter_", param)));
param++;
std::vector<HloInstruction*> parameters = {producer};
std::function<void(HloInstruction*)> create_computation =
[&](HloInstruction* instr) -> void {
if (old_to_new_mapping.contains(instr)) {
return;
}
std::vector<HloInstruction*> new_operands;
for (HloInstruction* operand : instr->mutable_operands()) {
create_computation(operand);
new_operands.push_back(old_to_new_mapping[operand]);
}
if (instr->opcode() == HloOpcode::kParameter) {
old_to_new_mapping[instr] =
builder.AddInstruction(HloInstruction::CreateParameter(
param, instr->shape(), absl::StrCat("parameter_", param)));
parameters.push_back(instr);
param++;
} else {
old_to_new_mapping[instr] = builder.AddInstruction(
instr->CloneWithNewOperands(instr->shape(), new_operands));
}
};
create_computation(root);
HloComputation* computation =
root->GetModule()->AddComputationAndUnifyNamesAndIds(builder.Build(),
false);
HloInstruction* softmax_fusion =
root->parent()->AddInstruction(HloInstruction::CreateFusion(
root->shape(), HloInstruction::FusionKind::kCustom, parameters,
computation));
softmax_fusion->GetModule()->SetAndUniquifyInstrName(softmax_fusion,
"triton_softmax");
TF_ASSIGN_OR_RETURN(auto gpu_config,
softmax_fusion->backend_config<GpuBackendConfig>());
FusionBackendConfig& backend_config =
*gpu_config.mutable_fusion_backend_config();
backend_config.set_kind(std::string(kTritonFusionKind));
TF_RETURN_IF_ERROR(softmax_fusion->set_backend_config(gpu_config));
return xla::Cast<HloFusionInstruction>(softmax_fusion);
}
absl::Status FuseDiamondChainImpl(
const DiamondChainDescriptor& diamond_chain,
GpuPerformanceModelWithIndexingAnalysis& indexing_performance_model) {
TF_ASSIGN_OR_RETURN(HloFusionInstruction * softmax_fusion,
MakeFusionForDiamondChain(diamond_chain));
HloInstruction* root = diamond_chain.root;
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(softmax_fusion);
TF_ASSIGN_OR_RETURN(
TiledRunTimeDataOrError tiled_runtime_data_or,
indexing_performance_model.TryFindBestTilingForFusion(*fusion_adaptor));
if (const auto* fusion_decision =
std::get_if<FusionDecision>(&tiled_runtime_data_or)) {
return absl::FailedPreconditionError(absl::StrCat(
"SymbolicTileAnalysis failed. ", fusion_decision->Explain()));
}
TiledRunTimeData tiled_runtime_data =
std::get<TiledRunTimeData>(std::move(tiled_runtime_data_or));
TF_ASSIGN_OR_RETURN(auto backend_config,
softmax_fusion->backend_config<GpuBackendConfig>());
*backend_config.mutable_fusion_backend_config()
->mutable_block_level_fusion_config() =
tiled_runtime_data.block_level_parameters.ToBlockLevelFusionConfig();
TF_RETURN_IF_ERROR(softmax_fusion->set_backend_config(backend_config));
if (root->IsRoot()) {
root->parent()->set_root_instruction(softmax_fusion);
TF_RETURN_IF_ERROR(
root->parent()->RemoveInstructionAndUnusedOperands(root));
} else {
TF_RETURN_IF_ERROR(
root->parent()->ReplaceInstruction(root, softmax_fusion));
}
VLOG(5) << softmax_fusion->ToString();
return absl::OkStatus();
}
absl::StatusOr<bool> CanSymbolicTileAnalysisTileDiamondChain(
const DiamondChainDescriptor& diamond_chain) {
TF_ASSIGN_OR_RETURN(HloFusionInstruction * softmax_fusion,
MakeFusionForDiamondChain(diamond_chain));
mlir::MLIRContext context;
SymbolicTileAnalysisOrError symbolic_tile_analysis_or_error =
SymbolicTileAnalysis::AnalyzeComputation(
*softmax_fusion->called_computation(), &context);
bool can_tile = std::holds_alternative<SymbolicTileAnalysis>(
symbolic_tile_analysis_or_error);
TF_RETURN_IF_ERROR(diamond_chain.root->GetModule()->RemoveEmbeddedComputation(
softmax_fusion->called_computation()));
TF_RETURN_IF_ERROR(
diamond_chain.root->parent()->RemoveInstruction(softmax_fusion));
return can_tile;
}
}
DiamondMatchingDecision
SoftmaxRewriterTriton::MatchesTritonCompatibleClosedReductionDiamond(
HloInstruction* instr) const {
if (!instr->IsElementwiseBinary()) {
return "Root is not elementwise binary.";
}
if (!legacy_triton::IsTritonSupportedInstruction(
*instr, device_info_.gpu_compute_capability())) {
return "Root is not supported for Triton instruction.";
}
HloInstruction* producer;
HloInstruction* broadcast;
HloInstruction* reduce;
if (!TrivialEdge(&broadcast, instr->mutable_operand(1), HloOpcode::kBroadcast,
device_info_.gpu_compute_capability())) {
return "Could not find a trivial connection from root to a broadcast.";
}
if (!TrivialEdge(&reduce, broadcast->mutable_operand(0), HloOpcode::kReduce,
device_info_.gpu_compute_capability())) {
return "Could not find a trivial connection from matched broadcast to a "
"reduction.";
}
if (!(HasDefaultLayout(broadcast->shape()) &&
HasDefaultLayout(reduce->shape()))) {
return "Broadcast or reduce have non-default layouts.";
}
if (CodegenDecision is_supported =
legacy_triton::IsTritonSupportedInstruction(
*reduce, device_info_.gpu_compute_capability());
!is_supported) {
VLOG(3) << is_supported.Explain();
return is_supported;
}
if (!HasOneUse(broadcast) || !HasOneUse(reduce)) {
return "More than one use of broadcast or reduce.";
}
producer = reduce->mutable_operand(0);
if (absl::c_linear_search(broadcast->dimensions(),
broadcast->shape().rank() - 1)) {
return "Broadcast is not along the reduction dimension.";
}
while (IsTriviallyFusible(producer, device_info_.gpu_compute_capability())) {
producer = ChooseOperandForFusionProcessing(producer);
}
if (!HasDefaultLayout(producer->shape())) {
return "Producer has non-default layout.";
}
if (!IsTriviallyConnectedProducerOf(producer, instr->mutable_operand(0),
device_info_.gpu_compute_capability())) {
return "Producer is not trivially connected.";
}
if (producer != instr->operand(0) && instr->operand(0)->user_count() != 1) {
return "Unsupported root-producer connection.";
}
VLOG(5) << "Matched Softmax diamond with: ";
VLOG(5) << "root: " << instr->ToString();
VLOG(5) << "producer: " << producer->ToString();
VLOG(5) << "broadcast: " << broadcast->ToString();
VLOG(5) << "reduce: " << reduce->ToString();
return producer;
}
absl::StatusOr<std::vector<DiamondChainDescriptor>>
SoftmaxRewriterTriton::FindAllFusibleDiamondChains(
HloModule& module,
const absl::flat_hash_set<absl::string_view>& execution_threads) const {
std::vector<DiamondChainDescriptor> matched_diamonds;
for (HloComputation* comp :
module.MakeNonfusionComputations(execution_threads)) {
if (comp->IsCustomCallComputation()) {
continue;
}
for (HloInstruction* instr : comp->MakeInstructionPostOrder()) {
PrimitiveType element_ty = instr->shape().element_type();
if (element_ty != F16 && element_ty != F32 && element_ty != BF16) {
continue;
}
auto producer = MatchesTritonCompatibleClosedReductionDiamond(instr);
if (std::holds_alternative<HloInstruction*>(producer)) {
DiamondChainDescriptor diamond_chain{
instr, std::get<HloInstruction*>(producer)};
TF_ASSIGN_OR_RETURN(
bool can_tile_diamond_chain,
CanSymbolicTileAnalysisTileDiamondChain(diamond_chain));
if (can_tile_diamond_chain) {
matched_diamonds.push_back(diamond_chain);
} else {
VLOG(5) << "Cannot tile the diamond pattern described by "
<< "instructions " << instr->ToString() << " and "
<< std::get<HloInstruction*>(producer)->ToString() << ".";
continue;
}
} else {
VLOG(5) << "Cannot match the diamond pattern for instruction "
<< instr->ToString()
<< ". Reason: " << std::get<FusionDecision>(producer).Explain();
}
}
}
if (matched_diamonds.empty()) {
return std::vector<DiamondChainDescriptor>();
}
auto reduction_dimension_size_from_diamond_root =
[](HloInstruction* diamond_root) {
HloInstruction* instr = diamond_root->mutable_operand(1);
while (instr->opcode() != HloOpcode::kReduce) {
instr = ChooseOperandForFusionProcessing(instr);
}
int operand_rank = instr->operand(0)->shape().rank();
CHECK_EQ(instr->dimensions().size(), 1);
CHECK_EQ(instr->dimensions(0), operand_rank - 1);
return instr->operand(0)->shape().dimensions(operand_rank - 1);
};
auto last_trivially_fusible_user = [&](HloInstruction* instr) {
while (HasOneUse(instr) && !instr->IsRoot() &&
IsTriviallyFusible(instr->users().front(),
device_info_.gpu_compute_capability())) {
instr = instr->users().front();
}
if (HasOneUse(instr) && !instr->IsRoot() &&
IsTriviallyFusible(
instr->users().front(), device_info_.gpu_compute_capability(),
instr->users().front()->user_count())) {
instr = instr->users().front();
}
return instr;
};
std::vector<DiamondChainDescriptor> diamond_chains;
diamond_chains.reserve(matched_diamonds.size());
HloInstruction* current_fusion_producer = FindFirstNonFusibleDiamondProducer(
matched_diamonds.front().producer, device_info_.gpu_compute_capability());
int current_reduce_dimension_size =
reduction_dimension_size_from_diamond_root(matched_diamonds.front().root);
for (int diamond_idx = 1; diamond_idx < matched_diamonds.size();
++diamond_idx) {
auto [diamond_root, diamond_producer] = matched_diamonds[diamond_idx];
HloInstruction* previous_diamond_root =
matched_diamonds[diamond_idx - 1].root;
HloInstruction* first_non_fusible_diamond_producer =
FindFirstNonFusibleDiamondProducer(
diamond_producer, device_info_.gpu_compute_capability());
int diamond_reduce_dimension_size =
reduction_dimension_size_from_diamond_root(diamond_root);
if (first_non_fusible_diamond_producer == previous_diamond_root &&
((first_non_fusible_diamond_producer != diamond_producer &&
HasOneUse(first_non_fusible_diamond_producer)) ||
(first_non_fusible_diamond_producer == diamond_producer &&
first_non_fusible_diamond_producer->user_count() == 2)) &&
diamond_reduce_dimension_size == current_reduce_dimension_size) {
continue;
}
diamond_chains.push_back(DiamondChainDescriptor{
last_trivially_fusible_user(previous_diamond_root),
current_fusion_producer,
});
current_fusion_producer = first_non_fusible_diamond_producer;
current_reduce_dimension_size = diamond_reduce_dimension_size;
}
diamond_chains.push_back(DiamondChainDescriptor{
last_trivially_fusible_user(matched_diamonds.back().root),
current_fusion_producer});
std::vector<DiamondChainDescriptor> filtered_diamond_chains;
for (const DiamondChainDescriptor& diamond_chain : diamond_chains) {
TF_ASSIGN_OR_RETURN(bool can_tile_diamond_chain,
CanSymbolicTileAnalysisTileDiamondChain(diamond_chain));
if (can_tile_diamond_chain) {
filtered_diamond_chains.push_back(diamond_chain);
}
}
return filtered_diamond_chains;
}
absl::Status SoftmaxRewriterTriton::FuseDiamondChain(
const DiamondChainDescriptor& diamond_chain) {
HloFusionAnalysisCache fusion_analysis_cache(device_info_);
GpuPerformanceModelWithIndexingAnalysis indexing_performance_model(
&device_info_, &fusion_analysis_cache, shape_size_, &mlir_context_);
return FuseDiamondChainImpl(diamond_chain, indexing_performance_model);
}
absl::StatusOr<bool> SoftmaxRewriterTriton::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
auto cuda_compute_capability = std::get_if<se::CudaComputeCapability>(
&device_info_.gpu_compute_capability());
if (!cuda_compute_capability) {
return absl::FailedPreconditionError(
"Triton support is only enabled for CUDA GPUs.");
} else if (!cuda_compute_capability->IsAtLeastAmpere()) {
return absl::FailedPreconditionError(
absl::StrCat("Triton support is only enabled for Ampere GPUs (compute ",
"capability 8.0) and up, but got compute capability ",
cuda_compute_capability->major, ".",
cuda_compute_capability->minor, "."));
}
TF_ASSIGN_OR_RETURN(std::vector<DiamondChainDescriptor> diamond_chains,
FindAllFusibleDiamondChains(*module, execution_threads));
if (diamond_chains.empty()) {
return false;
} | #include "xla/service/gpu/softmax_rewriter_triton.h"
#include <cstdint>
#include <memory>
#include <string>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/optimization.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/substitute.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/instruction_fusion.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
using ::testing::HasSubstr;
GpuHloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() {
return [&](const Shape& shape) {
constexpr int64_t kPointerSize = 8;
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
};
}
bool HasBlockLevelFusionConfig(const HloInstruction* fusion) {
return fusion->opcode() == HloOpcode::kFusion &&
fusion->has_backend_config() &&
fusion->backend_config<GpuBackendConfig>().ok() &&
fusion->backend_config<GpuBackendConfig>()
->fusion_backend_config()
.has_block_level_fusion_config();
}
absl::StatusOr<bool> SoftmaxRewriterTritonMatchAndRewrite(
const se::DeviceDescription& device_info, HloModule* module) {
CHECK_NE(module, nullptr);
SoftmaxRewriterTriton softmax_rewriter_triton(device_info,
ShapeSizeBytesFunction());
TF_ASSIGN_OR_RETURN(std::vector<DiamondChainDescriptor> diamond_chains,
softmax_rewriter_triton.FindAllFusibleDiamondChains(
*module, {}));
for (auto diamond_chain = diamond_chains.rbegin();
diamond_chain != diamond_chains.rend(); ++diamond_chain) {
TF_RETURN_IF_ERROR(
softmax_rewriter_triton.FuseDiamondChain(*diamond_chain));
}
return !diamond_chains.empty();
}
class SoftmaxRewriterTritonTest
: public HloTestBase,
public ::testing::WithParamInterface<PrimitiveType> {
protected:
se::DeviceDescription device_info_{TestGpuDeviceInfo::RTXA6000DeviceInfo()};
};
TEST_P(SoftmaxRewriterTritonTest, CanFuseExactSoftmax) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
add_computation {
arg_0.1 = $0[] parameter(0)
arg_1.1 = $0[] parameter(1)
ROOT add = $0[] add(arg_0.1, arg_1.1)
}
ENTRY main {
param_0 = $0[127,125]{1,0} parameter(0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = $0[127,125]{1,0} broadcast(reduce), dimensions={0}
subtract = $0[127,125]{1,0} subtract(param_0, broadcast)
exponential = $0[127,125]{1,0} exponential(subtract)
constant_zero = $0[] constant(0)
second_reduce = $0[127]{0} reduce(exponential, constant_zero), dimensions={1}, to_apply=add_computation
second_broadcast = $0[127,125]{1,0} broadcast(second_reduce), dimensions={0}
ROOT divide = $0[127,125]{1,0} divide(exponential, second_broadcast)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
EXPECT_TRUE(verifier().Run(module.get()).status().ok());
VLOG(2) << module->ToString();
switch (data_type) {
case F32:
case BF16:
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Parameter())
.WithPredicate(HasBlockLevelFusionConfig)));
break;
case F16:
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Divide(m::Exp(), m::Broadcast())));
break;
default:
ABSL_UNREACHABLE();
}
}
TEST_P(SoftmaxRewriterTritonTest, CanFuseFirstSoftmaxDiamond) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
ENTRY main {
param_0 = $0[127,125]{1,0} parameter(0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = $0[127,125]{1,0} broadcast(reduce), dimensions={0}
ROOT subtract = $0[127,125]{1,0} subtract(param_0, broadcast)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
EXPECT_TRUE(verifier().Run(module.get()).status().ok());
VLOG(2) << module->ToString();
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig)));
}
TEST_F(SoftmaxRewriterTritonTest, CanNotFuseExactSoftmaxF64) {
const std::string hlo_string = R"(
HloModule softmax
max_computation {
arg_0 = f64[] parameter(0)
arg_1 = f64[] parameter(1)
ROOT maximum = f64[] maximum(arg_0, arg_1)
}
add_computation {
arg_0.1 = f64[] parameter(0)
arg_1.1 = f64[] parameter(1)
ROOT add = f64[] add(arg_0.1, arg_1.1)
}
ENTRY main {
param_0 = f64[127,125]{1,0} parameter(0)
constant_neg_inf = f64[] constant(-inf)
reduce = f64[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = f64[127,125]{1,0} broadcast(reduce), dimensions={0}
subtract = f64[127,125]{1,0} subtract(param_0, broadcast)
exponential = f64[127,125]{1,0} exponential(subtract)
constant_zero = f64[] constant(0)
second_reduce = f64[127]{0} reduce(exponential, constant_zero), dimensions={1}, to_apply=add_computation
second_broadcast = f64[127,125]{1,0} broadcast(second_reduce), dimensions={0}
ROOT divide = f64[127,125]{1,0} divide(exponential, second_broadcast)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
}
TEST_F(SoftmaxRewriterTritonTest, CanFuseExactSoftmaxBF16) {
const std::string hlo_string = R"(
HloModule softmax
max_computation {
arg_0 = bf16[] parameter(0)
arg_1 = bf16[] parameter(1)
ROOT maximum = bf16[] maximum(arg_0, arg_1)
}
add_computation {
arg_0.1 = bf16[] parameter(0)
arg_1.1 = bf16[] parameter(1)
ROOT add = bf16[] add(arg_0.1, arg_1.1)
}
ENTRY main {
param_0 = bf16[127,125]{1,0} parameter(0)
constant_neg_inf = bf16[] constant(-inf)
reduce = bf16[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = bf16[127,125]{1,0} broadcast(reduce), dimensions={0}
subtract = bf16[127,125]{1,0} subtract(param_0, broadcast)
exponential = bf16[127,125]{1,0} exponential(subtract)
constant_zero = bf16[] constant(0)
second_reduce = bf16[127]{0} reduce(exponential, constant_zero), dimensions={1}, to_apply=add_computation
second_broadcast = bf16[127,125]{1,0} broadcast(second_reduce), dimensions={0}
ROOT divide = bf16[127,125]{1,0} divide(exponential, second_broadcast)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
EXPECT_TRUE(verifier().Run(module.get()).status().ok());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig)));
}
TEST_P(SoftmaxRewriterTritonTest, CanNotFuseSoftmaxDiamondWithWrongLayout) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
ENTRY main {
param_0 = $0[127,125]{0,1} parameter(0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = $0[127,125]{1,0} broadcast(reduce), dimensions={0}
ROOT subtract = $0[127,125]{1,0} subtract(param_0, broadcast)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
}
TEST_P(SoftmaxRewriterTritonTest,
CanNotFuseSoftmaxDiamondWithWrongReduceDimension) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
ENTRY main {
param_0 = $0[127,125]{1,0} parameter(0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[125]{0} reduce(param_0, constant_neg_inf), dimensions={0}, to_apply=max_computation
broadcast = $0[127,125]{1,0} broadcast(reduce), dimensions={1}
ROOT subtract = $0[127,125]{1,0} subtract(param_0, broadcast)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
}
TEST_P(SoftmaxRewriterTritonTest,
CanNotFuseSoftmaxDiamondWithWrongBroadcastDimension) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
ENTRY main {
param_0 = $0[125,125]{1,0} parameter(0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[125]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = $0[125,125]{1,0} broadcast(reduce), dimensions={1}
ROOT subtract = $0[125,125]{1,0} subtract(param_0, broadcast)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
}
TEST_P(SoftmaxRewriterTritonTest,
CanNotFuseSoftmaxDiamondWithExtraBroadcastUsage) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
ENTRY main {
param_0 = $0[127,125]{1,0} parameter(0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = $0[127,125]{1,0} broadcast(reduce), dimensions={0}
subtract = $0[127,125]{1,0} subtract(param_0, broadcast)
ROOT multiply = $0[127,125]{1,0} multiply(broadcast, subtract)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
}
TEST_P(SoftmaxRewriterTritonTest,
CanFuseSoftmaxWithIntermediateUnaryElementwise) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
add_computation {
arg_0.1 = $0[] parameter(0)
arg_1.1 = $0[] parameter(1)
ROOT add = $0[] add(arg_0.1, arg_1.1)
}
ENTRY main {
param_0 = $0[127,125]{1,0} parameter(0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = $0[127,125]{1,0} broadcast(reduce), dimensions={0}
subtract = $0[127,125]{1,0} subtract(param_0, broadcast)
abs = $0[127,125]{1,0} abs(subtract)
exponential = $0[127,125]{1,0} exponential(abs)
constant_zero = $0[] constant(0)
second_reduce = $0[127]{0} reduce(exponential, constant_zero), dimensions={1}, to_apply=add_computation
second_broadcast = $0[127,125]{1,0} broadcast(second_reduce), dimensions={0}
ROOT divide = $0[127,125]{1,0} divide(exponential, second_broadcast)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
EXPECT_TRUE(verifier().Run(module.get()).status().ok());
switch (data_type) {
case F32:
case BF16:
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Parameter())
.WithPredicate(HasBlockLevelFusionConfig)));
break;
case F16:
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Divide()));
break;
default:
ABSL_UNREACHABLE();
}
}
TEST_P(SoftmaxRewriterTritonTest,
CanFuseTwoDiamondsWithSecondDiamondProducerEqualToFirstDiamondRoot) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
add_computation {
arg_0.1 = $0[] parameter(0)
arg_1.1 = $0[] parameter(1)
ROOT add = $0[] add(arg_0.1, arg_1.1)
}
ENTRY main {
param_0 = $0[127,125]{1,0} parameter(0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = $0[127,125]{1,0} broadcast(reduce), dimensions={0}
subtract = $0[127,125]{1,0} subtract(param_0, broadcast)
constant_zero = $0[] constant(0)
second_reduce = $0[127]{0} reduce(subtract, constant_zero), dimensions={1}, to_apply=add_computation
second_broadcast = $0[127,125]{1,0} broadcast(second_reduce), dimensions={0}
ROOT divide = $0[127,125]{1,0} divide(subtract, second_broadcast)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
EXPECT_TRUE(verifier().Run(module.get()).status().ok());
switch (data_type) {
case F32:
case BF16:
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Parameter())
.WithPredicate(HasBlockLevelFusionConfig)));
break;
case F16:
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Divide()));
break;
default:
ABSL_UNREACHABLE();
}
}
TEST_P(SoftmaxRewriterTritonTest,
CanFuseDiamondWithTrailingUnaryElementwiseAtTheRoot) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
ENTRY main {
param_0 = $0[127,125]{1,0} parameter(0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = $0[127,125]{1,0} broadcast(reduce), dimensions={0}
subtract = $0[127,125]{1,0} subtract(param_0, broadcast)
ROOT abs = $0[127,125]{1,0} abs(subtract)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
EXPECT_TRUE(verifier().Run(module.get()).status().ok());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig)));
}
TEST_P(SoftmaxRewriterTritonTest, CanFuseDiamondWithUnaryElementwisePrefix) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
ENTRY main {
param_0 = $0[127,125]{1,0} parameter(0)
abs = $0[127,125]{1,0} abs(param_0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[127]{0} reduce(abs, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = $0[127,125]{1,0} broadcast(reduce), dimensions={0}
ROOT subtract = $0[127,125]{1,0} subtract(param_0, broadcast)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
EXPECT_TRUE(verifier().Run(module.get()).status().ok());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig)));
}
TEST_P(SoftmaxRewriterTritonTest,
CanFuseDiamondWithMultipleBroadcastDimensions) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
ENTRY main {
param_0 = $0[1,3,125,125]{3,2,1,0} parameter(0)
bitcast = $0[3,125,125]{2,1,0} bitcast($0[1,3,125,125]{3,2,1,0} param_0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[3,125]{1,0} reduce($0[3,125,125]{2,1,0} bitcast, $0[] constant_neg_inf), dimensions={2}, to_apply=max_computation
broadcast = $0[1,3,125,125]{3,2,1,0} broadcast($0[3,125]{1,0} reduce), dimensions={1,2}
ROOT subtract = $0[1,3,125,125]{3,2,1,0} subtract($0[1,3,125,125]{3,2,1,0} param_0, $0[1,3,125,125]{3,2,1,0} broadcast)
})";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
EXPECT_TRUE(verifier().Run(module.get()).status().ok());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig)));
}
TEST_P(SoftmaxRewriterTritonTest,
CanNotFuseSoftmaxDiamondWithNonConstantReducerIdentity) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
ENTRY main {
param_0 = $0[127,125]{1,0} parameter(0)
identity = $0[] parameter(1)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[127]{0} reduce(param_0, identity), dimensions={1}, to_apply=max_computation
broadcast = $0[127,125]{1,0} broadcast(reduce), dimensions={0}
ROOT subtract = $0[127,125]{1,0} subtract(param_0, broadcast)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
}
TEST_P(SoftmaxRewriterTritonTest,
CanNotFuseSoftmaxDiamondWithTritonIncompatibleRoot) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
ENTRY main {
param_0 = $0[127,125]{1,0} parameter(0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = $0[127,125]{1,0} broadcast(reduce), dimensions={0}
divide = $0[127,125]{1,0} divide(param_0, broadcast)
ROOT remainder = $0[127,125]{1,0} remainder(divide, broadcast)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
}
TEST_P(SoftmaxRewriterTritonTest,
CanNotFuseSoftmaxDiamondWithTritonIncompatibleReducer) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
if_0 = pred[] is-finite(arg_0)
c = $0[] convert(if_0)
ROOT maximum = $0[] maximum(c, arg_1)
}
ENTRY main {
param_0 = $0[127,125]{1,0} parameter(0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = $0[127,125]{1,0} broadcast(reduce), dimensions={0}
ROOT subtract = $0[127,125]{1,0} subtract(param_0, broadcast)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
}
TEST_P(SoftmaxRewriterTritonTest,
CanFuseSoftmaxDiamondWithLastDimensionBitcastAfterReduce) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
ENTRY main {
param_0 = $0[3,127,125]{2,1,0} parameter(0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[3,127]{1,0} reduce(param_0, constant_neg_inf), dimensions={2}, to_apply=max_computation
bitcasted_reduce = $0[381]{0} bitcast(reduce)
broadcast = $0[381,125]{1,0} broadcast(bitcasted_reduce), dimensions={0}
bitcasted_broadcast = $0[3,127,125]{2,1,0} bitcast(broadcast)
ROOT subtract = $0[3,127,125]{2,1,0} subtract(param_0, bitcasted_broadcast)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
EXPECT_TRUE(verifier().Run(module.get()).status().ok());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig)));
}
TEST_P(SoftmaxRewriterTritonTest,
CanNotFuseSoftmaxDiamondWithTransposeBitcast) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
ENTRY main {
param_0 = $0[1,127,125]{2,1,0} parameter(0)
constant_neg_inf = $0[] constant(-inf)
bitcasted_param_0 = $0[127,1,125]{2,0,1} bitcast(param_0)
reduce = $0[127,1]{0,1} reduce(bitcasted_param_0, constant_neg_inf), dimensions={2}, to_apply=max_computation
broadcast = $0[127,1,125]{2,0,1} broadcast(reduce), dimensions={0,1}
bitcasted_broadcast = $0[1,127,125]{2,1,0} bitcast(broadcast)
ROOT subtract = $0[1,127,125]{2,1,0} subtract(param_0, bitcasted_broadcast)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
}
TEST_P(SoftmaxRewriterTritonTest,
CanNotFuseTwoDiamondsWithDifferentReductionAxisSizeTogether) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
add_computation {
arg_0.1 = $0[] parameter(0)
arg_1.1 = $0[] parameter(1)
ROOT add = $0[] add(arg_0.1, arg_1.1)
}
ENTRY main {
param_0 = $0[127,625]{1,0} parameter(0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = $0[127,625]{1,0} broadcast(reduce), dimensions={0}
subtract = $0[127,625]{1,0} subtract(param_0, broadcast)
bitcasted_subtract = $0[127,5,125] bitcast(subtract)
exponential = $0[127,5,125] exponential(bitcasted_subtract)
constant_zero = $0[] constant(0)
second_reduce = $0[127,5] reduce(exponential, constant_zero), dimensions={2}, to_apply=add_computation
second_broadcast = $0[127,5,125] broadcast(second_reduce), dimensions={0,1}
ROOT divide = $0[127,5,125] divide(exponential, second_broadcast)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
EXPECT_TRUE(verifier().Run(module.get()).status().ok());
switch (data_type) {
case F32:
case BF16:
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Bitcast(m::Fusion(m::Parameter())
.WithPredicate(
HasBlockLevelFusionConfig)))
.WithPredicate(HasBlockLevelFusionConfig)));
break;
case F16:
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Divide(m::Exp(), m::Broadcast())));
break;
default:
ABSL_UNREACHABLE();
}
}
TEST_P(SoftmaxRewriterTritonTest,
CanNotFuseTwoDiamondsWithExtraUsageForFirstDiamondRoot) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
add_computation {
arg_0.1 = $0[] parameter(0)
arg_1.1 = $0[] parameter(1)
ROOT add = $0[] add(arg_0.1, arg_1.1)
}
ENTRY main {
param_0 = $0[127,125]{1,0} parameter(0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = $0[127,125]{1,0} broadcast(reduce), dimensions={0}
subtract = $0[127,125]{1,0} subtract(param_0, broadcast)
exponential = $0[127,125]{1,0} exponential(subtract)
constant_zero = $0[] constant(0)
second_reduce = $0[127]{0} reduce(exponential, constant_zero), dimensions={1}, to_apply=add_computation
second_broadcast = $0[127,125]{1,0} broadcast(second_reduce), dimensions={0}
divide = $0[127,125]{1,0} divide(exponential, second_broadcast)
ROOT tuple = ($0[127,125]{1,0}, $0[127,125]{1,0}) tuple(divide, subtract)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
EXPECT_TRUE(verifier().Run(module.get()).status().ok());
switch (data_type) {
case F32:
case BF16:
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Fusion(m::Fusion()).WithPredicate(HasBlockLevelFusionConfig),
m::Fusion(m::Parameter())
.WithPredicate(HasBlockLevelFusionConfig))));
break;
case F16:
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::Divide(),
m::Fusion(m::Parameter())
.WithPredicate(HasBlockLevelFusionConfig))));
break;
default:
ABSL_UNREACHABLE();
}
}
TEST_P(SoftmaxRewriterTritonTest,
CanNotFuseTwoDiamondsWithExtraUsageForSecondDiamondProducer) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
add_computation {
arg_0.1 = $0[] parameter(0)
arg_1.1 = $0[] parameter(1)
ROOT add = $0[] add(arg_0.1, arg_1.1)
}
ENTRY main {
param_0 = $0[127,125]{1,0} parameter(0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = $0[127,125]{1,0} broadcast(reduce), dimensions={0}
subtract = $0[127,125]{1,0} subtract(param_0, broadcast)
exponential = $0[127,125]{1,0} exponential(subtract)
constant_zero = $0[] constant(0)
second_reduce = $0[127]{0} reduce(exponential, constant_zero), dimensions={1}, to_apply=add_computation
second_broadcast = $0[127,125]{1,0} broadcast(second_reduce), dimensions={0}
divide = $0[127,125]{1,0} divide(exponential, second_broadcast)
ROOT tuple = ($0[127,125]{1,0}, $0[127,125]{1,0}) tuple(divide, exponential)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
EXPECT_TRUE(verifier().Run(module.get()).status().ok());
switch (data_type) {
case F32:
case BF16:
EXPECT_THAT( |
2,092 | cpp | tensorflow/tensorflow | gpu_latency_hiding_scheduler | third_party/xla/xla/service/gpu/gpu_latency_hiding_scheduler.cc | third_party/xla/xla/service/gpu/gpu_latency_hiding_scheduler_test.cc | #ifndef XLA_SERVICE_GPU_GPU_LATENCY_HIDING_SCHEDULER_H_
#define XLA_SERVICE_GPU_GPU_LATENCY_HIDING_SCHEDULER_H_
#include <cstdint>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/latency_hiding_scheduler.h"
#include "xla/shape.h"
namespace xla {
namespace gpu {
CanonicalAsyncOp GpuGetCanonicalAsyncOp(const HloInstruction& hlo);
int64_t GetSizeOfShape(const Shape& shape, int pointer_size);
enum class GpuResourceType {
kGpuAsyncStreamSend0 = 0,
kGpuAsyncStreamSend1 = 1,
kGpuAsyncStreamRecv0 = 2,
kGpuAsyncStreamRecv1 = 3,
kGpuAsyncStreamCollectives = 4,
kGpuAsyncStreamComputes = 5,
kNumTargetResources = 6,
};
class GpuAsyncTrackerBase : public AsyncTracker {
public:
explicit GpuAsyncTrackerBase(
const SchedulerConfig& config,
GetCanonicalAsyncOpFunc func = GpuGetCanonicalAsyncOp);
bool IsSupportedAsyncDone(const HloInstruction& hlo) const override;
bool IsSupportedAsyncStart(const HloInstruction& hlo) const override;
void PostProcessScheduleGraph(
HloScheduleGraph* schedule_graph,
const LatencyEstimator* latency_estimator) const override;
};
class GpuAsyncTracker : public GpuAsyncTrackerBase {
public:
explicit GpuAsyncTracker(const SchedulerConfig& config);
ResourcesVector GetResourcesFromInstruction(
const HloInstruction& instr) const override;
int64_t GetNumTargetDefinedResources() const override;
int64_t GetNumAvailableResources(int64_t resource_type) const override;
absl::string_view GetResourceName(int64_t resource_type) const override;
ResourceHazardType GetResourceHazardType(
int64_t resource_type) const override;
int64_t GetNumResourcesPerInstruction(
int64_t resource_type, const HloInstruction& instr) const override;
};
class GpuLatencyEstimator : public ApproximateLatencyEstimator {
public:
explicit GpuLatencyEstimator(
int64_t pointer_size,
GetCanonicalAsyncOpFunc func = GpuGetCanonicalAsyncOp);
TimeCost NodeCost(const HloInstruction* instr) const override;
TimeCost GetLatencyBetween(const HloGraphNode& from,
const HloGraphNode& to) const override;
private:
int64_t pointer_size_;
};
}
}
#endif
#include "xla/service/gpu/gpu_latency_hiding_scheduler.h"
#include <cstdint>
#include <tuple>
#include <utility>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/latency_hiding_scheduler.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
namespace xla {
namespace gpu {
namespace {
static constexpr int64_t kCostlyAllReduceThreshold = 30 * 1024 * 1024;
static constexpr int64_t kCostlyAllReduceMultiplier = 4;
bool IsNopInstruction(const HloInstruction& hlo) {
HloOpcode op = hlo.opcode();
return op == HloOpcode::kGetTupleElement || op == HloOpcode::kBitcast ||
op == HloOpcode::kConstant || op == HloOpcode::kParameter ||
hlo.IsEffectiveBitcast();
}
bool IsAsyncComputeOp(const HloInstruction& hlo) {
return (hlo.opcode() == HloOpcode::kAsyncStart ||
hlo.opcode() == HloOpcode::kAsyncDone) &&
!hlo_query::IsCollectiveCommunicationOp(hlo.async_wrapped_opcode()) &&
hlo.async_execution_thread() != hlo.parent()->execution_thread();
}
int64_t GetPipelineStream(const HloInstruction& start) {
auto it = start.frontend_attributes().map().find(kSendRecvPipelineAttr);
if (it != start.frontend_attributes().map().end() && it->second == "1") {
return 1;
}
return 0;
}
std::pair<GpuResourceType, ResourceUsageType> GetP2PResourceAndUsage(
const HloInstruction& instr, const CanonicalAsyncOp& op) {
ResourceUsageType usage = op.outer == HloOpcode::kAsyncStart
? ResourceUsageType::kResourceRelease
: ResourceUsageType::kResourceOccupy;
int64_t pipeline = GetPipelineStream(instr);
HloOpcode opcode = op.inner;
GpuResourceType resource;
if (pipeline == 0) {
resource = opcode == HloOpcode::kSend
? GpuResourceType::kGpuAsyncStreamSend0
: GpuResourceType::kGpuAsyncStreamRecv0;
} else {
resource = opcode == HloOpcode::kSend
? GpuResourceType::kGpuAsyncStreamSend1
: GpuResourceType::kGpuAsyncStreamRecv1;
}
return {resource, usage};
}
}
int64_t GetSizeOfShape(const Shape& shape, int pointer_size) {
int64_t size = ShapeUtil::ByteSizeOf(shape, pointer_size);
if (shape.IsTuple() || shape.is_static()) {
return size;
}
int64_t metadata_size = sizeof(int32_t) * shape.dimensions_size();
return size + metadata_size;
}
CanonicalAsyncOp GpuGetCanonicalAsyncOp(const HloInstruction& hlo) {
switch (hlo.opcode()) {
case HloOpcode::kSend:
return {HloOpcode::kAsyncStart, HloOpcode::kSend};
case HloOpcode::kSendDone:
return {HloOpcode::kAsyncDone, HloOpcode::kSend};
case HloOpcode::kRecv:
return {HloOpcode::kAsyncStart, HloOpcode::kRecv};
case HloOpcode::kRecvDone:
return {HloOpcode::kAsyncDone, HloOpcode::kRecv};
default:
return DefaultGetCanonicalAsyncOp(hlo);
}
}
GpuAsyncTrackerBase::GpuAsyncTrackerBase(const SchedulerConfig& config,
GetCanonicalAsyncOpFunc func)
: AsyncTracker(config, func) {}
bool GpuAsyncTrackerBase::IsSupportedAsyncDone(
const HloInstruction& hlo) const {
return (hlo_query::IsAsyncCollectiveDoneOp(&hlo,
true) &&
!IsSyncCollective(hlo.operand(0))) ||
IsAsyncComputeOp(hlo);
}
bool GpuAsyncTrackerBase::IsSupportedAsyncStart(
const HloInstruction& hlo) const {
return (hlo_query::IsAsyncCollectiveStartOp(&hlo,
true) &&
!IsSyncCollective(&hlo)) ||
IsAsyncComputeOp(hlo);
}
void GpuAsyncTrackerBase::PostProcessScheduleGraph(
HloScheduleGraph* schedule_graph,
const LatencyEstimator* latency_estimator) const {
for (auto inst : schedule_graph->GetOriginalInstrList()) {
if (inst->opcode() == HloOpcode::kRecv) {
if (inst->frontend_attributes().map().count(kSendRecvPipelineAttr) > 0) {
HloGraphNode& node = schedule_graph->GetNode(inst);
node.SetForceEarly(true);
VLOG(5) << "Setting force early for instruction: " << inst->ToString();
}
}
if (inst->has_backend_config()) {
auto gpu_config = inst->backend_config<GpuBackendConfig>();
if (gpu_config.ok()) {
HloGraphNode& node = schedule_graph->GetNode(inst);
node.SetForceDelay(gpu_config->force_earliest_schedule());
VLOG(5) << "Setting force delay for instruction: " << inst->ToString();
}
}
}
}
GpuAsyncTracker::GpuAsyncTracker(const SchedulerConfig& config)
: GpuAsyncTrackerBase(config) {}
ResourcesVector GpuAsyncTracker::GetResourcesFromInstruction(
const HloInstruction& instr) const {
CanonicalAsyncOp op = GetCanonicalAsyncOp(instr);
if (op.outer == HloOpcode::kAsyncStart || op.outer == HloOpcode::kAsyncDone) {
ResourceUsageType usage;
GpuResourceType resource;
if (op.inner == HloOpcode::kSend || op.inner == HloOpcode::kRecv) {
std::tie(resource, usage) = GetP2PResourceAndUsage(instr, op);
} else {
usage = op.outer == HloOpcode::kAsyncStart
? ResourceUsageType::kResourceRelease
: ResourceUsageType::kResourceOccupy;
resource = hlo_query::IsCollectiveCommunicationOp(op.inner)
? GpuResourceType::kGpuAsyncStreamCollectives
: GpuResourceType::kGpuAsyncStreamComputes;
}
return {std::make_pair(
GetFirstTargetDefinedResource() + static_cast<int64_t>(resource),
usage)};
}
return GpuAsyncTrackerBase::GetResourcesFromInstruction(instr);
}
int64_t GpuAsyncTracker::GetNumTargetDefinedResources() const {
return static_cast<int64_t>(GpuResourceType::kNumTargetResources);
};
int64_t GpuAsyncTracker::GetNumAvailableResources(int64_t resource_type) const {
const int64_t first_target_resource = GetFirstTargetDefinedResource();
if (resource_type < first_target_resource) {
return GpuAsyncTrackerBase::GetNumAvailableResources(resource_type);
}
CHECK_LT(resource_type,
first_target_resource +
static_cast<int64_t>(GpuResourceType::kNumTargetResources));
if ((resource_type - first_target_resource) ==
static_cast<int64_t>(GpuResourceType::kGpuAsyncStreamComputes)) {
return 2;
}
return 1;
}
absl::string_view GpuAsyncTracker::GetResourceName(
int64_t resource_type) const {
const int64_t first_target_resource = GetFirstTargetDefinedResource();
if (resource_type < first_target_resource) {
return GpuAsyncTrackerBase::GetResourceName(resource_type);
}
CHECK_LE(resource_type,
first_target_resource + GetNumTargetDefinedResources());
switch (static_cast<GpuResourceType>(resource_type - first_target_resource)) {
case GpuResourceType::kGpuAsyncStreamSend0:
return "kGpuAsyncStreamSend0";
case GpuResourceType::kGpuAsyncStreamSend1:
return "kGpuAsyncStreamSend1";
case GpuResourceType::kGpuAsyncStreamRecv0:
return "kGpuAsyncStreamRecv0";
case GpuResourceType::kGpuAsyncStreamRecv1:
return "kGpuAsyncStreamRecv1";
case GpuResourceType::kGpuAsyncStreamCollectives:
return "kGpuAsyncStreamCollectives";
case GpuResourceType::kGpuAsyncStreamComputes:
return "kGpuAsyncStreamComputes";
default:
return "kUnsupportedResource";
}
}
ResourceHazardType GpuAsyncTracker::GetResourceHazardType(
int64_t resource_type) const {
const int64_t first_target_resource = GetFirstTargetDefinedResource();
if (resource_type < first_target_resource) {
return GpuAsyncTrackerBase::GetResourceHazardType(resource_type);
}
CHECK_LE(resource_type,
first_target_resource + GetNumTargetDefinedResources());
return ResourceHazardType::kUnshareable;
}
int64_t GpuAsyncTracker::GetNumResourcesPerInstruction(
int64_t resource_type, const HloInstruction& instr) const {
int64_t num_resources =
GpuAsyncTrackerBase::GetNumResourcesPerInstruction(resource_type, instr);
if (num_resources <= 0 || instr.opcode() != HloOpcode::kWhile) {
return num_resources;
}
int64_t first_p2p_resource =
GetFirstTargetDefinedResource() +
static_cast<int64_t>(GpuResourceType::kGpuAsyncStreamSend0);
if (resource_type < first_p2p_resource ||
resource_type > first_p2p_resource + 4) {
return num_resources;
}
auto find_instruction_for_pipeline = [&](HloOpcode opcode, int64_t pipeline) {
for (auto user1 : instr.users()) {
if (user1->opcode() == HloOpcode::kGetTupleElement) {
for (auto user2 : user1->users()) {
if (user2->opcode() == opcode) {
if (GetPipelineStream(*user2) == pipeline) {
return true;
}
}
}
}
}
return false;
};
bool found;
if (resource_type == first_p2p_resource) {
found = find_instruction_for_pipeline(HloOpcode::kSendDone, 0);
} else if (resource_type == first_p2p_resource + 1) {
found = find_instruction_for_pipeline(HloOpcode::kSendDone, 1);
} else if (resource_type == first_p2p_resource + 2) {
found = find_instruction_for_pipeline(HloOpcode::kRecvDone, 0);
} else {
found = find_instruction_for_pipeline(HloOpcode::kRecvDone, 1);
}
return num_resources - (found ? 1 : 0);
}
GpuLatencyEstimator::GpuLatencyEstimator(int64_t pointer_size,
GetCanonicalAsyncOpFunc func)
: ApproximateLatencyEstimator(func), pointer_size_(pointer_size) {}
ApproximateLatencyEstimator::TimeCost GpuLatencyEstimator::NodeCost(
const HloInstruction* instr) const {
if (IsNopInstruction(*instr)) {
return 0.0;
}
if (instr->opcode() == HloOpcode::kCustomCall) {
if (IsCublasGemm(*instr) || IsCustomCallToDnnConvolution(*instr)) {
return ApproximateLatencyEstimator::kMediumCost;
}
return ApproximateLatencyEstimator::kMediumCost;
}
return ApproximateLatencyEstimator::NodeCost(instr);
}
ApproximateLatencyEstimator::TimeCost GpuLatencyEstimator::GetLatencyBetween(
const HloGraphNode& from, const HloGraphNode& to) const {
if (IsAsyncPair(from, to)) {
if (from.GetInstr().opcode() == HloOpcode::kRecv) {
return ApproximateLatencyEstimator::kLowLatency;
} else if (from.GetInstr().opcode() == HloOpcode::kSend) {
return ApproximateLatencyEstimator::kHighLatency * 10;
}
bool enable_approx_collectives =
from.GetInstr()
.GetModule()
->config()
.debug_options()
.xla_gpu_enable_approx_costly_collectives();
bool is_all_reduce = from.GetInstr().opcode() == HloOpcode::kAllReduceStart;
bool collective_size_exceeds_threshold =
GetSizeOfShape(from.GetInstr().shape(), pointer_size_) >
kCostlyAllReduceThreshold;
if (enable_approx_collectives && is_all_reduce &&
collective_size_exceeds_threshold) {
return ApproximateLatencyEstimator::kHighLatency *
kCostlyAllReduceMultiplier;
}
return ApproximateLatencyEstimator::kHighLatency;
}
return ApproximateLatencyEstimator::kLowLatency;
}
}
} | namespace xla::gpu {
namespace {
}
} |
2,093 | cpp | tensorflow/tensorflow | gpu_convert_async_collectives_to_sync | null | null | #ifndef XLA_SERVICE_GPU_GPU_CONVERT_ASYNC_COLLECTIVES_TO_SYNC_H_
#define XLA_SERVICE_GPU_GPU_CONVERT_ASYNC_COLLECTIVES_TO_SYNC_H_
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/convert_async_collectives_to_sync.h"
namespace xla {
namespace gpu {
class GpuConvertAsyncCollectivesToSync : public ConvertAsyncCollectivesToSync {
public:
using ConvertAsyncCollectivesToSync::ConvertAsyncCollectivesToSync;
absl::string_view name() const override {
return "gpu-convert-async-collectives-to-sync";
}
absl::Status ConvertAsyncInstructionsToSync(
HloComputation* computation,
absl::Span<const std::pair<HloInstruction*, HloInstruction*>> async_pairs)
const override;
};
}
}
#endif
#include "xla/service/gpu/gpu_convert_async_collectives_to_sync.h"
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
absl::Status GpuConvertAsyncCollectivesToSync::ConvertAsyncInstructionsToSync(
HloComputation* computation,
absl::Span<const std::pair<HloInstruction*, HloInstruction*>> async_pairs)
const {
absl::flat_hash_map<HloInstruction*, HloInstruction*> replaced_ops;
CollectiveBackendConfig sync_config;
sync_config.set_is_sync(true);
for (auto& [async_start, async_done] : async_pairs) {
TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config,
async_start->backend_config<GpuBackendConfig>());
*gpu_config.mutable_collective_backend_config() = sync_config;
TF_RETURN_IF_ERROR(async_start->set_backend_config(gpu_config));
replaced_ops[async_start] = nullptr;
replaced_ops[async_done] = async_start;
}
HloModule* module = computation->parent();
const HloInstructionSequence& sequence =
module->schedule().sequence(computation);
std::vector<HloInstruction*> new_sequence;
new_sequence.reserve(sequence.size());
for (HloInstruction* instr : sequence.instructions()) {
auto it = replaced_ops.find(instr);
if (it == replaced_ops.end()) {
new_sequence.push_back(instr);
continue;
}
if (it->second == nullptr) {
continue;
}
new_sequence.push_back(it->second);
new_sequence.push_back(instr);
}
module->schedule().set_sequence(computation, new_sequence);
return absl::OkStatus();
}
}
} | #include "xla/service/gpu/gpu_convert_async_collectives_to_sync.h"
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::IsFalse;
using ::testing::IsTrue;
class GpuConvertAsyncCollectivesToSyncTest : public HloTestBase {
public:
absl::Status RunPass(HloModule *module, bool expect_change,
HloPredicate is_nop = {}) {
TF_ASSIGN_OR_RETURN(bool changed,
GpuConvertAsyncCollectivesToSync{is_nop}.Run(module));
EXPECT_EQ(changed, expect_change);
return absl::OkStatus();
}
bool IsSync(HloModule *module, std::string_view name) {
const HloInstruction *inst = FindInstruction(module, name);
if (inst == nullptr) {
return false;
}
auto backend_config = inst->backend_config<GpuBackendConfig>()
.value()
.collective_backend_config();
return backend_config.is_sync();
}
HloPredicate is_nop_simple_ =
HloPredicateIsOp<HloOpcode::kBitcast, HloOpcode::kGetTupleElement,
HloOpcode::kParameter>;
};
TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleAllReduce) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
ROOT done = u32[] all-reduce-done(start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "start"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleAllReduceWithNop) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3, replica_groups={{0,1}, {2,3}}
id2 = f32[] bitcast(id)
ROOT done = u32[] all-reduce-done(start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true, is_nop_simple_));
EXPECT_THAT(IsSync(module.get(), "start"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleCollectiveBroadcast) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
collective_broadcast {
p0 = u32[8] parameter(0)
ROOT result = u32[8] collective-broadcast(p0), replica_groups={{0,1}, {2,3}}
}
ENTRY main {
data = u32[8] parameter(0)
cb-start = ((u32[8]{0}), u32[8]{0}) async-start(u32[8]{0} %data), calls=collective_broadcast
ROOT %ars = u32[8]{0} async-done(((u32[8]{0}), u32[8]{0}) %cb-start), calls=collective_broadcast
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "cb-start"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleAllReduceWithNonNop) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
id2 = u32[] add(id, id)
ROOT done = u32[] all-reduce-done(start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), false));
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleAllGather) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
ENTRY test_computation {
a1 = u32[1, 2] parameter(0)
ags = (u32[1, 2], u32[2, 2]) all-gather-start(a1), dimensions={0}, channel_id=3
ROOT allgather = u32[2,2] all-gather-done(ags)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "ags"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleCollectivePermute) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
ENTRY test_computation {
p = u32[2] parameter(0)
start = (u32[2], u32[2], u32[], u32[]) collective-permute-start(p), source_target_pairs={{0,1}, {1,0}}
ROOT done = u32[2] collective-permute-done(start)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "start"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleReduceScatter) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
add {
lhs = u32[] parameter(0)
rhs = u32[] parameter(1)
ROOT add = u32[] add(lhs, rhs)
}
reduce_scatter {
p0 = u32[8] parameter(0)
ROOT result = u32[4] reduce-scatter(p0), replica_groups={{0,3}, {1,2}},
dimensions={0}, to_apply=add
}
ENTRY main {
data = u32[8] parameter(0)
rs-start = ((u32[8]{0}), u32[4]{0}) async-start(u32[8]{0} %data), calls=reduce_scatter
ROOT %ars = u32[4]{0} async-done(((u32[8]{0}), u32[4]{0}) %rs-start), calls=reduce_scatter
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "rs-start"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleAllToAll) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
all_to_all {
p0 = u32[2] parameter(0)
ROOT result = u32[2] all-to-all(p0), dimensions={0}, replica_groups={{0,1},{2,3}}
}
ENTRY test_computation {
a1 = u32[2] parameter(0)
a2a-start = ((u32[2]), u32[2]) async-start(u32[2] a1), calls=all_to_all
ROOT a2s = u32[2] async-done(a2a-start), calls=all_to_all
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "a2a-start"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, ControlDeps) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start1 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
done1 = u32[] all-reduce-done(start1)
start2 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=4, control-predecessors={done1}
done2 = u32[] all-reduce-done(start2)
ROOT x = u32[] add(done1, done2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "start1"), IsTrue());
EXPECT_THAT(IsSync(module.get(), "start2"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, MultipleInFlightStreaming) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start1 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
start2 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=4
done1 = u32[] all-reduce-done(start1)
done2 = u32[] all-reduce-done(start2)
ROOT x = u32[] add(done1, done2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "start1"), IsTrue());
EXPECT_THAT(IsSync(module.get(), "start2"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, MultipleInFlightNested) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start1 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
start2 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=4
done2 = u32[] all-reduce-done(start2)
done1 = u32[] all-reduce-done(start1)
ROOT x = u32[] add(done1, done2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "start1"), IsTrue());
EXPECT_THAT(IsSync(module.get(), "start2"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, MultipleInFlightNestedPartial) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start1 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
start2 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=4
done2 = u32[] all-reduce-done(start2)
id2 = u32[] add(done2, done2)
done1 = u32[] all-reduce-done(start1)
ROOT x = u32[] add(done1, done2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "start1"), IsFalse());
EXPECT_THAT(IsSync(module.get(), "start2"), IsTrue());
}
}
}
} |
2,094 | cpp | tensorflow/tensorflow | topk_splitter | third_party/xla/xla/service/gpu/transforms/topk_splitter.cc | third_party/xla/xla/service/gpu/transforms/topk_splitter_test.cc | #ifndef XLA_SERVICE_GPU_TOPK_SPLITTER_H_
#define XLA_SERVICE_GPU_TOPK_SPLITTER_H_
#include <cstddef>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class TopKSplitter : public HloModulePass {
public:
explicit TopKSplitter(size_t split_threshold = 1024 * 1024)
: split_threshold_(split_threshold) {}
absl::string_view name() const override { return "topk-splitter"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
const size_t split_threshold_;
};
}
}
#endif
#include "xla/service/gpu/topk_splitter.h"
#include <algorithm>
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <string>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/numeric/bits.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
constexpr size_t kRequiredAlignment = 1024;
constexpr size_t kMaximumBatchSize = 1024;
class TopkSplitterVisitor : public DfsHloRewriteVisitor {
public:
explicit TopkSplitterVisitor(size_t split_threshold)
: split_threshold_(split_threshold) {}
absl::Status HandleCustomCall(HloInstruction* inst) override {
HloCustomCallInstruction* topk = DynCast<HloCustomCallInstruction>(inst);
if (topk == nullptr || topk->custom_call_target() != "TopK") {
return absl::OkStatus();
}
HloComputation* comp = inst->parent();
Shape data_shape = topk->operand(0)->shape();
bool has_batch = data_shape.dimensions_size() == 2;
if (has_batch && data_shape.dimensions(0) != 1) {
return absl::OkStatus();
}
size_t n = data_shape.dimensions(has_batch ? 1 : 0);
int64_t k = topk->shape().tuple_shapes(0).dimensions(has_batch ? 1 : 0);
if (k > sqrt(n)) {
return absl::OkStatus();
}
if (n % kRequiredAlignment != 0) {
return absl::OkStatus();
}
if (n < split_threshold_) return absl::OkStatus();
int new_batch =
std::min(absl::bit_floor(n / split_threshold_), kMaximumBatchSize);
int new_n = n / new_batch;
Shape split_input_shape =
ShapeUtil::MakeShape(data_shape.element_type(), {new_batch, new_n});
TF_ASSIGN_OR_RETURN(
HloInstruction * reshaped,
MakeReshapeHlo(split_input_shape, topk->mutable_operand(0)));
Shape batch_topk_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(data_shape.element_type(), {new_batch, k}),
ShapeUtil::MakeShape(S32, {new_batch, k})});
HloInstruction* batch_topk =
comp->AddInstruction(HloInstruction::CreateCustomCall(
batch_topk_shape, {reshaped}, topk->to_apply(), "TopK",
""));
TF_ASSIGN_OR_RETURN(HloInstruction * indices,
MakeGetTupleElementHlo(batch_topk, 1));
TF_ASSIGN_OR_RETURN(HloInstruction * values,
MakeGetTupleElementHlo(batch_topk, 0));
Shape iota_shape = ShapeUtil::MakeShape(S32, {new_batch});
TF_ASSIGN_OR_RETURN(
HloInstruction * fix,
MakeBinaryHlo(
HloOpcode::kMultiply, MakeIotaHlo(comp, iota_shape, 0),
MakeBroadcastHlo(MakeR0ConstantHlo<int32_t>(comp, new_n),
{}, iota_shape)));
TF_ASSIGN_OR_RETURN(
indices, MakeBinaryHlo(HloOpcode::kAdd, indices,
MakeBroadcastHlo(fix, {0}, indices->shape())));
Shape linear_index_shape = ShapeUtil::MakeShape(S32, {k * new_batch});
Shape linear_shape = ShapeUtil::ChangeElementType(
linear_index_shape, data_shape.element_type());
Shape linear_sort_shape =
ShapeUtil::MakeTupleShape({linear_shape, linear_index_shape});
HloInstruction* aggregated_sort =
comp->AddInstruction(HloInstruction::CreateSort(
linear_sort_shape, 0,
{*MakeReshapeHlo(linear_shape, values),
*MakeReshapeHlo(linear_index_shape, indices)},
topk->to_apply(), true));
auto slice_tuple = [&](HloInstruction* sort, const size_t index) {
return *MakeReshapeHlo(
topk->shape().tuple_shapes(index),
*MakeSliceHlo(*MakeGetTupleElementHlo(sort, index), {0}, {k}, {1}));
};
return ReplaceInstruction(topk,
comp->AddInstruction(HloInstruction::CreateTuple({
slice_tuple(aggregated_sort, 0),
slice_tuple(aggregated_sort, 1),
})));
}
private:
size_t split_threshold_;
};
}
absl::StatusOr<bool> TopKSplitter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
return TopkSplitterVisitor(split_threshold_)
.RunOnModule(module, execution_threads);
}
}
} | #include "xla/service/gpu/topk_splitter.h"
#include <stdint.h>
#include <cstddef>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/topk_rewriter.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace m = ::xla::match;
namespace xla {
namespace gpu {
namespace {
using ::tsl::testing::IsOkAndHolds;
using TopkSplitterTest = HloTestBase;
constexpr absl::string_view kComparator = R"(
%compare {
%p.1.lhs.40628 = s32[] parameter(2)
%p.1.rhs.40629 = s32[] parameter(3)
%constant.40630 = pred[] constant(true)
%broadcast.40631 = pred[] broadcast(pred[] %constant.40630), dimensions={}
%p.0.lhs.40626 = f32[] parameter(0)
%p.0.rhs.40627 = f32[] parameter(1)
%compare.40632 = pred[] compare(f32[] %p.0.lhs.40626, f32[] %p.0.rhs.40627), direction=GT, type=TOTALORDER
ROOT %select.40633 = pred[] select(pred[] %broadcast.40631, pred[] %compare.40632, pred[] %broadcast.40631)
})";
TEST_F(TopkSplitterTest, SplitsTopK) {
const std::string hlo_string = absl::Substitute(R"(
HloModule module
$0
ENTRY cluster {
%arg.1 = f32[1,1073741824] parameter(0)
ROOT %cc.2 = (f32[1,5], s32[1,5]) custom-call(%arg.1), custom_call_target= "TopK", to_apply=%compare
})",
kComparator);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_THAT(RunHloPass(TopKSplitter(), module.get()), IsOkAndHolds(true));
auto first_topk = m::CustomCall(m::Reshape(m::Parameter(0)));
auto slice_result = [&](auto input, size_t i) {
return m::Reshape(m::Slice(m::GetTupleElement(input, i)));
};
auto index_correction =
m::Broadcast(m::Multiply(m::Iota(), m::Broadcast(m::Constant())));
auto sorted = m::Sort(
m::Reshape(m::GetTupleElement(first_topk, 0)),
m::Reshape(m::Add(m::GetTupleElement(first_topk, 1), index_correction)));
EXPECT_TRUE(
Match(module->entry_computation()->root_instruction(),
m::Tuple(slice_result(sorted, 0), slice_result(sorted, 1))));
}
TEST_F(TopkSplitterTest, SplitsTopKNoBatchDimension) {
const std::string hlo_string = absl::Substitute(R"(
HloModule module
$0
ENTRY cluster {
%arg.1 = f32[1073741824] parameter(0)
ROOT %cc.2 = (f32[5], s32[5]) custom-call(%arg.1), custom_call_target= "TopK", to_apply=%compare
})",
kComparator);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_THAT(RunHloPass(TopKSplitter(), module.get()), IsOkAndHolds(true));
auto first_topk = m::CustomCall(m::Reshape(m::Parameter(0)));
auto slice_result = [&](auto input, size_t i) {
return m::Reshape(m::Slice(m::GetTupleElement(input, i)));
};
auto index_correction =
m::Broadcast(m::Multiply(m::Iota(), m::Broadcast(m::Constant())));
auto sorted = m::Sort(
m::Reshape(m::GetTupleElement(first_topk, 0)),
m::Reshape(m::Add(m::GetTupleElement(first_topk, 1), index_correction)));
EXPECT_TRUE(
Match(module->entry_computation()->root_instruction(),
m::Tuple(slice_result(sorted, 0), slice_result(sorted, 1))));
}
TEST_F(TopkSplitterTest, SplitFailsUnderThreshold) {
const std::string hlo_string = absl::Substitute(R"(
HloModule module
$0
ENTRY cluster {
%arg.1 = f32[1,524288] parameter(0)
ROOT %cc.2 = (f32[1,5], s32[1,5]) custom-call(%arg.1), custom_call_target= "TopK", to_apply=%compare
})",
kComparator);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_THAT(
RunHloPass(TopKSplitter(1048576), module.get()),
IsOkAndHolds(false));
}
TEST_F(TopkSplitterTest, SplitFailsUnaligned) {
const std::string hlo_string = absl::Substitute(R"(
HloModule module
$0
ENTRY cluster {
%arg.1 = f32[1,524289] parameter(0)
ROOT %cc.2 = (f32[1,5], s32[1,5]) custom-call(%arg.1), custom_call_target= "TopK", to_apply=%compare
})",
kComparator);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_THAT(RunHloPass(TopKSplitter(1024), module.get()),
IsOkAndHolds(false));
}
TEST_F(TopkSplitterTest, SplitFailsLargeK) {
const std::string hlo_string = absl::Substitute(R"(
HloModule module
$0
ENTRY cluster {
%arg.1 = f32[1,524288] parameter(0)
ROOT %cc.2 = (f32[1,1024], s32[1,1024]) custom-call(%arg.1), custom_call_target= "TopK", to_apply=%compare
})",
kComparator);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_THAT(RunHloPass(TopKSplitter(1024), module.get()),
IsOkAndHolds(false));
}
TEST_F(TopkSplitterTest, Equivalent) {
const std::string hlo_string = absl::Substitute(R"(
HloModule module
$0
ENTRY cluster {
%arg.1 = f32[1,16384] parameter(0)
ROOT %cc.2 = (f32[1,5], s32[1,5]) custom-call(%arg.1), custom_call_target= "TopK", to_apply=%compare
})",
kComparator);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_THAT(TopkDecomposer().Run(module.get()), IsOkAndHolds(true));
auto round_trip = [](HloModule* module) {
EXPECT_THAT(TopkRewriter([](const HloSortInstruction*, int64_t) {
return true;
}).Run(module),
IsOkAndHolds(true));
EXPECT_THAT(TopKSplitter(1024).Run(module), IsOkAndHolds(true));
EXPECT_THAT(TopkDecomposer().Run(module), IsOkAndHolds(true));
EXPECT_TRUE(HloDCE().Run(module).status().ok());
};
EXPECT_TRUE(RunAndCompare(std::move(module), std::nullopt, round_trip));
}
TEST_F(TopkSplitterTest, StableSorts) {
const std::string hlo_string = absl::Substitute(R"(
HloModule module
$0
ENTRY cluster {
%constant.1 = f32[] constant(42)
%broadcast.2= f32[1,16384] broadcast(f32[] %constant.1), dimensions={}
ROOT %cc.3 = (f32[1,5], s32[1,5]) custom-call(%broadcast.2), custom_call_target= "TopK", to_apply=%compare
})",
kComparator);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_THAT(TopkDecomposer().Run(module.get()), IsOkAndHolds(true));
auto round_trip = [](HloModule* module) {
EXPECT_THAT(TopkRewriter([](const HloSortInstruction*, int64_t) {
return true;
}).Run(module),
IsOkAndHolds(true));
EXPECT_THAT(TopKSplitter(1024).Run(module), IsOkAndHolds(true));
EXPECT_THAT(TopkDecomposer().Run(module), IsOkAndHolds(true));
EXPECT_TRUE(HloDCE().Run(module).status().ok());
};
EXPECT_TRUE(RunAndCompare(std::move(module), std::nullopt, round_trip));
}
}
}
} |
2,095 | cpp | tensorflow/tensorflow | cudnn_fused_mha_rewriter | third_party/xla/xla/service/gpu/transforms/cudnn_fused_mha_rewriter.cc | third_party/xla/xla/service/gpu/transforms/cudnn_fused_mha_rewriter_test.cc | #ifndef XLA_SERVICE_GPU_CUDNN_FUSED_MHA_REWRITER_H_
#define XLA_SERVICE_GPU_CUDNN_FUSED_MHA_REWRITER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/dnn.h"
namespace xla {
namespace gpu {
class CudnnFusedMHARewriter : public HloModulePass {
public:
explicit CudnnFusedMHARewriter(se::CudaComputeCapability cc,
se::StreamExecutor* stream_executor)
: compute_capability_(cc), stream_executor_(stream_executor) {}
explicit CudnnFusedMHARewriter(se::CudaComputeCapability cc,
se::dnn::VersionInfo cudnn_version)
: compute_capability_(cc), cudnn_version_(cudnn_version) {}
absl::string_view name() const override {
return "cudnn-fused-multi-headed-attention-rewriter";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
const se::CudaComputeCapability compute_capability_;
se::StreamExecutor* stream_executor_ = nullptr;
const se::dnn::VersionInfo cudnn_version_;
};
}
}
#endif
#include "xla/service/gpu/cudnn_fused_mha_rewriter.h"
#include <algorithm>
#include <cstdint>
#include <numeric>
#include <optional>
#include <queue>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/permutation_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/dnn.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#if GOOGLE_CUDA
#include "third_party/gpus/cuda/include/cuda.h"
#endif
namespace xla {
namespace gpu {
namespace {
namespace m = match;
struct MatchFwdResult {
HloInstruction* matched_bmm_1 = nullptr;
HloInstruction* matched_bmm_2 = nullptr;
HloInstruction* matched_bias = nullptr;
HloInstruction* matched_scale = nullptr;
HloInstruction* matched_softmax_input = nullptr;
HloInstruction* matched_reduce_sum = nullptr;
double matched_dropout_rate = 0.0;
bool need_canonicalization = false;
bool is_training = false;
bool is_causal_mask = false;
bool has_match = false;
std::string matched_custom_call_name;
};
struct MatchBwdResult {
HloInstruction* matched_bmm_1_grad_1 = nullptr;
HloInstruction* matched_bmm_1_grad_2 = nullptr;
HloInstruction* matched_bmm_2_grad_1 = nullptr;
HloInstruction* matched_bmm_2_grad_2 = nullptr;
HloInstruction* matched_dbias = nullptr;
bool bmm_1_grad_1_need_canonicalization = false;
bool bmm_1_grad_2_need_canonicalization = false;
bool bmm_2_grad_1_need_canonicalization = false;
bool bmm_2_grad_2_need_canonicalization = false;
bool has_match = false;
std::string matched_custom_call_name;
};
template <typename Pattern>
auto OptionalReshape(Pattern pattern) {
auto shared = m::SharedSubpattern(pattern);
return m::AnyOf<HloInstruction>(m::Reshape(shared), shared);
}
template <typename Pattern>
auto OptionalConvert(Pattern pattern) {
auto shared = m::SharedSubpattern(pattern);
return m::AnyOf<HloInstruction>(m::Convert(shared), shared);
}
template <typename Pattern>
auto OptionalBitcast(Pattern pattern) {
auto shared = m::SharedSubpattern(pattern);
return m::AnyOf<HloInstruction>(m::Bitcast(shared), shared);
}
template <typename Pattern>
auto OptionalBroadcast(Pattern pattern) {
auto shared = m::SharedSubpattern(pattern);
return m::AnyOf<HloInstruction>(m::Broadcast(shared), shared);
}
bool IsBatchedMatmul(const HloInstruction* instr) {
if (instr->opcode() != HloOpcode::kDot) return false;
if (Cast<HloDotInstruction>(instr)->sparse_operands()) return false;
const DotDimensionNumbers& dot_dims = instr->dot_dimension_numbers();
bool is_batch_dot = !dot_dims.lhs_batch_dimensions().empty() ||
!dot_dims.rhs_batch_dimensions().empty();
return is_batch_dot;
}
bool IsSharingOperandWithFwdMha(HloInstruction* gemm) {
for (int64_t i = 0; i < gemm->operands().size(); i++) {
std::queue<HloInstruction*> visit_list;
visit_list.push(gemm->mutable_operand(i));
while (!visit_list.empty()) {
HloInstruction* current_instr = visit_list.front();
for (auto user : current_instr->users()) {
switch (user->opcode()) {
case HloOpcode::kBitcast:
case HloOpcode::kReshape:
case HloOpcode::kTranspose: {
visit_list.push(user);
break;
}
case HloOpcode::kCustomCall: {
if (IsFwdCustomCallTofMHA(*user)) {
return true;
}
} break;
default:
break;
}
}
visit_list.pop();
}
}
return false;
}
bool IsFirstFwdMatmul(HloInstruction* gemm) {
return IsBatchedMatmul(gemm) && !IsFwdCustomCallTofMHA(*gemm->operand(0)) &&
!IsFwdCustomCallTofMHA(*gemm->operand(1)) &&
!IsSharingOperandWithFwdMha(gemm);
}
bool IsScalar(const HloInstruction* instr) {
return ShapeUtil::IsEffectiveScalar(instr->shape());
}
bool IsReduceMax(const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kReduce &&
instr->to_apply()->root_instruction()->opcode() == HloOpcode::kMaximum;
}
bool IsReduceSum(const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kReduce &&
instr->to_apply()->root_instruction()->opcode() == HloOpcode::kAdd;
}
auto GetUnfusedReduceMaxSumSoftmaxPattern(
HloInstruction** softmax_input = nullptr,
HloInstruction** softmax_reduce_sum = nullptr,
HloInstruction** softmax_reduce_sum_bcast = nullptr) {
auto unfused_softmax_max_subpattern = m::SharedSubpattern(
m::Subtract(
m::Op(),
m::Broadcast(OptionalConvert(
m::Op()
.WithPredicate(IsReduceMax)
.WithOneUse()
.WithOperand(0, OptionalBitcast(OptionalConvert(
m::Op(softmax_input).WithNumUser(2)))))))
.WithOneUse());
auto unfused_softmax_sum_subpattern = m::SharedSubpattern(m::Divide(
OptionalBitcast(m::Exp(unfused_softmax_max_subpattern)),
m::Broadcast(
softmax_reduce_sum_bcast,
OptionalConvert(
m::Op(softmax_reduce_sum)
.WithOperand(0, OptionalBitcast(OptionalConvert(
m::Exp(unfused_softmax_max_subpattern))))
.WithPredicate(IsReduceSum)
.WithAtMostNumUser(2)))
.WithAtMostNumUser(2)));
return unfused_softmax_sum_subpattern;
}
std::optional<double> GetConstantValue(const HloInstruction* inst) {
if (!IsScalar(inst)) {
return std::nullopt;
}
switch (inst->shape().element_type()) {
case F16:
return static_cast<float>(inst->literal().GetFirstElement<half>());
case BF16:
return static_cast<float>(inst->literal().GetFirstElement<bfloat16>());
case F32:
return inst->literal().GetFirstElement<float>();
case F64:
return inst->literal().GetFirstElement<double>();
default:
return std::nullopt;
}
}
double GetDropoutRateFromHlo(HloInstruction* dropout) {
std::optional<double> dropout_rate_inv;
dropout_rate_inv = GetConstantValue(dropout);
if (!dropout_rate_inv.has_value()) {
return 0.0;
}
return (1.0 - (1.0 / *dropout_rate_inv));
}
bool IsComputeCapabilityAndCudnnSupported(
stream_executor::CudaComputeCapability cc,
stream_executor::dnn::VersionInfo cudnn_version,
stream_executor::dnn::VersionInfo supported_cudnn_version) {
if (cc.IsAtLeastAmpere() && cc.minor == 0 &&
cudnn_version >= supported_cudnn_version) {
return true;
}
VLOG(2) << absl::StrFormat(
"CudnnFusedMHARewriter did not run. Unsupported compute "
"capability(%s; major should be >= 8, minor should be 0) or cudnn version"
"(%s; should be >= %s)",
cc.ToString(), cudnn_version.ToString(),
supported_cudnn_version.ToString());
return false;
}
bool IsSupportedPrimitiveType(const HloInstruction* bmm) {
PrimitiveType dtype = bmm->shape().element_type();
return dtype == BF16 || dtype == F16;
}
std::vector<int64_t> GetDimensionVector(absl::Span<const int64_t> dimensions,
absl::Span<const int64_t> dim_nums) {
std::vector<int64_t> vec(dim_nums.size());
for (int i = 0; i < dim_nums.size(); i++) {
vec[i] = dimensions.at(dim_nums.at(i));
}
return vec;
}
struct QKVLayout {
int64_t batch;
int64_t num_heads;
int64_t seqlen_q;
int64_t seqlen_kv;
int64_t hidden_dim;
};
absl::StatusOr<std::optional<QKVLayout>> GetQKVLayout(
HloInstruction* bmm_1, HloInstruction* bmm_2, bool need_canonicalization) {
const DotDimensionNumbers& bmm1_dnums = bmm_1->dot_dimension_numbers();
TF_ASSIGN_OR_RETURN(
std::vector<int64_t> bmm1_s_q_dims,
GetNonContractingDims(bmm_1->operand(0)->shape(),
bmm1_dnums.lhs_batch_dimensions(),
bmm1_dnums.lhs_contracting_dimensions()));
TF_ASSIGN_OR_RETURN(
std::vector<int64_t> bmm1_s_kv_dims,
GetNonContractingDims(bmm_1->operand(1)->shape(),
bmm1_dnums.rhs_batch_dimensions(),
bmm1_dnums.rhs_contracting_dimensions()));
std::vector<int64_t> bmm1_bh =
GetDimensionVector(bmm_1->operand(0)->shape().dimensions(),
bmm1_dnums.lhs_batch_dimensions());
std::vector<int64_t> bmm1_s_q = GetDimensionVector(
bmm_1->operand(0)->shape().dimensions(), bmm1_s_q_dims);
std::vector<int64_t> bmm1_s_kv = GetDimensionVector(
bmm_1->operand(1)->shape().dimensions(), bmm1_s_kv_dims);
std::vector<int64_t> bmm1_d =
GetDimensionVector(bmm_1->operand(0)->shape().dimensions(),
bmm1_dnums.lhs_contracting_dimensions());
TF_RET_CHECK(bmm1_bh.size() == 2);
TF_RET_CHECK(bmm1_s_q.size() == 1);
TF_RET_CHECK(bmm1_s_kv.size() == 1);
TF_RET_CHECK(bmm1_d.size() == 1);
const DotDimensionNumbers& bmm2_dnums = bmm_2->dot_dimension_numbers();
TF_ASSIGN_OR_RETURN(
std::vector<int64_t> bmm2_lhs_non_contracting_dims,
GetNonContractingDims(bmm_2->operand(0)->shape(),
bmm2_dnums.lhs_batch_dimensions(),
bmm2_dnums.lhs_contracting_dimensions()));
TF_ASSIGN_OR_RETURN(
std::vector<int64_t> bmm2_rhs_non_contracting_dims,
GetNonContractingDims(bmm_2->operand(1)->shape(),
bmm2_dnums.rhs_batch_dimensions(),
bmm2_dnums.rhs_contracting_dimensions()));
std::vector<int64_t> bmm2_bh =
GetDimensionVector(bmm_2->operand(0)->shape().dimensions(),
bmm2_dnums.lhs_batch_dimensions());
std::vector<int64_t> bmm2_s_kv =
GetDimensionVector(bmm_2->operand(0)->shape().dimensions(),
bmm2_dnums.lhs_contracting_dimensions());
std::vector<int64_t> bmm2_s_q =
need_canonicalization
? GetDimensionVector(bmm_2->operand(1)->shape().dimensions(),
bmm2_rhs_non_contracting_dims)
: GetDimensionVector(bmm_2->operand(0)->shape().dimensions(),
bmm2_lhs_non_contracting_dims);
std::vector<int64_t> bmm2_d =
need_canonicalization
? GetDimensionVector(bmm_2->operand(0)->shape().dimensions(),
bmm2_lhs_non_contracting_dims)
: GetDimensionVector(bmm_2->operand(1)->shape().dimensions(),
bmm2_rhs_non_contracting_dims);
TF_RET_CHECK(bmm2_bh.size() == 2);
TF_RET_CHECK(bmm2_s_q.size() == 1);
TF_RET_CHECK(bmm2_s_kv.size() == 1);
TF_RET_CHECK(bmm2_d.size() == 1);
if (bmm1_bh[0] != bmm2_bh[0] || bmm1_bh[1] != bmm2_bh[1] ||
bmm1_s_q[0] != bmm2_s_q[0] || bmm1_s_kv[0] != bmm2_s_kv[0] ||
bmm1_d[0] != bmm2_d[0]) {
return std::nullopt;
}
QKVLayout qkv_layout;
qkv_layout.batch = bmm1_bh[0];
qkv_layout.num_heads = bmm1_bh[1];
qkv_layout.seqlen_q = bmm1_s_q[0];
qkv_layout.seqlen_kv = bmm1_s_kv[0];
qkv_layout.hidden_dim = bmm1_d[0];
return qkv_layout;
}
absl::StatusOr<bool> IsFlashAttention(
QKVLayout qkv_layout, bool is_training,
stream_executor::CudaComputeCapability cc,
stream_executor::dnn::VersionInfo cudnn_version) {
int64_t s_q = qkv_layout.seqlen_q;
int64_t s_kv = qkv_layout.seqlen_kv;
int64_t hidden_dim = qkv_layout.hidden_dim;
bool is_seqlen_supported = (!is_training || (s_q % 2 == 0 && s_kv % 2 == 0));
bool is_hidden_dim_supported = hidden_dim <= 128 && hidden_dim % 8 == 0;
bool is_flash_attention = is_seqlen_supported && is_hidden_dim_supported;
if (!is_flash_attention) return false;
if ((is_training && (s_q < 64 || s_kv < 64)) &&
!IsComputeCapabilityAndCudnnSupported(
cc, cudnn_version, stream_executor::dnn::VersionInfo(9, 0, 0))) {
VLOG(2) << "Flash attention training with seq < 64 not supported cuDNN < "
"9.0.0.";
return false;
}
if ((hidden_dim != 64 && hidden_dim != 128) &&
!IsComputeCapabilityAndCudnnSupported(
cc, cudnn_version, stream_executor::dnn::VersionInfo(8, 9, 6))) {
VLOG(2) << "Flash attention head dim != 64 or 128 not supported with cuDNN "
"< 8.9.6.";
return false;
}
if ((is_training && s_kv % 64 != 0) &&
!IsComputeCapabilityAndCudnnSupported(
cc, cudnn_version, stream_executor::dnn::VersionInfo(8, 9, 5))) {
VLOG(2) << "Flash attention training with seq kv % 64 != 0 not supported "
"with cuDNN < 8.9.5.";
return false;
}
if (!IsComputeCapabilityAndCudnnSupported(
cc, cudnn_version, stream_executor::dnn::VersionInfo(8, 9, 4))) {
VLOG(2) << "Require cuDNN 8.9.4 to run flash attention.";
return false;
}
return is_flash_attention;
}
bool IsCausalMaskPattern(HloInstruction* mask) {
auto causal_mask =
m::Select(m::Compare(m::Iota(), m::Iota()), m::Broadcast(m::Constant()),
m::Broadcast(m::Constant()));
auto causal_mask_pattern_fwd_remat =
m::Broadcast(OptionalBitcast(causal_mask));
auto causal_mask_pattern_bwd = m::Broadcast(m::Convert(OptionalBitcast(
m::Minimum(m::Op(), m::Broadcast(OptionalBitcast(causal_mask))))));
HloInstruction* param = nullptr;
HloInstruction* gte = nullptr;
auto causal_mask_pattern_fwd = m::Broadcast(
OptionalBitcast(m::GetTupleElement(>e, m::Parameter(¶m))));
auto causal_mask_pattern = m::AnyOf<HloInstruction>(
causal_mask_pattern_fwd_remat, causal_mask_pattern_fwd,
causal_mask_pattern_bwd);
if (Match(mask, causal_mask_pattern)) {
if (param != nullptr && param->parent()->IsWhileBodyComputation()) {
auto while_instr = param->parent()->WhileCallInstruction();
auto mask_index = gte->tuple_index();
auto actual_mask =
while_instr->mutable_operand(0)->mutable_operand(mask_index);
auto causal_mask_pattern_fwd =
OptionalBitcast(m::Convert(m::MinimumAnyOrder(
m::Op(),
OptionalBitcast(m::MinimumAnyOrder(
m::Op(), m::Broadcast(OptionalBitcast(causal_mask)))))));
return Match(actual_mask, causal_mask_pattern_fwd);
}
return true;
}
return false;
}
MatchFwdResult MatchSoftmaxDropoutBmm(MatchFwdResult previous_result,
int64_t bmm2_operand_position,
HloInstruction* instr) {
MatchFwdResult match_result = previous_result;
HloInstruction* softmax_reduce_sum;
HloInstruction* softmax_reduce_sum_bcast;
HloInstruction* bmm_2;
HloInstruction* softmax_input;
HloInstruction* dropout = nullptr;
auto dropout_softmax_pattern_form_1 = m::Select(
m::Op(),
OptionalConvert(m::MultiplyAnyOrder(
OptionalBitcast(OptionalReshape(
OptionalConvert(GetUnfusedReduceMaxSumSoftmaxPattern(
&softmax_input, &softmax_reduce_sum,
&softmax_reduce_sum_bcast)))),
m::Broadcast(
OptionalConvert(m::Constant(&dropout).WithPredicate(IsScalar))))),
m::Op());
auto dropout_softmax_pattern_form_2 =
OptionalBitcast(OptionalBitcast(OptionalConvert(m::MultiplyAnyOrder(
OptionalReshape(OptionalConvert(GetUnfusedReduceMaxSumSoftmaxPattern(
&softmax_input, &softmax_reduce_sum, &softmax_reduce_sum_bcast))),
m::Broadcast(
OptionalConvert(OptionalBitcast(OptionalReshape(m::Select(
m::Op(),
m::Broadcast(m::Constant(&dropout).WithPredicate(IsScalar)),
m::Op())))))))));
auto dropout_softmax_pattern_form_3 = m::MultiplyAnyOrder(
m::MultiplyAnyOrder(
OptionalConvert(GetUnfusedReduceMaxSumSoftmaxPattern(
&softmax_input, &softmax_reduce_sum, &softmax_reduce_sum_bcast)),
m::Op()),
m::Broadcast(m::Constant(&dropout).WithPredicate(IsScalar)));
auto softmax_dropout_bmm2_pattern =
m::Op(&bmm_2)
.WithPredicate(IsBatchedMatmul)
.WithOperand(bmm2_operand_position,
m::AnyOf<HloInstruction>(
OptionalBitcast(OptionalConvert(
GetUnfusedReduceMaxSumSoftmaxPattern(
&softmax_input, &softmax_reduce_sum,
&softmax_reduce_sum_bcast))),
dropout_softmax_pattern_form_1,
dropout_softmax_pattern_form_2,
dropout_softmax_pattern_form_3));
if (!Match(instr, softmax_dropout_bmm2_pattern) ||
!IsSupportedPrimitiveType(bmm_2)) {
match_result.has_match = false;
return match_result;
}
if (softmax_reduce_sum->users()[0]->opcode() == HloOpcode::kConvert) {
softmax_reduce_sum = softmax_reduce_sum->users()[0];
}
match_result.is_training = softmax_reduce_sum->user_count() == 2 &&
softmax_reduce_sum_bcast->user_count() == 2;
match_result.matched_bmm_2 = bmm_2;
if (dropout) {
match_result.matched_dropout_rate = GetDropoutRateFromHlo(dropout);
}
match_result.matched_softmax_input = softmax_input;
match_result.matched_reduce_sum = softmax_reduce_sum;
match_result.has_match = true;
return match_result;
}
MatchFwdResult MatchBmm1UnfusedBiasSoftmaxBmm2(MatchFwdResult previous_result,
HloInstruction* softmax_input,
bool has_dropout) {
MatchFwdResult match_result = previous_result;
HloInstruction* bmm_1;
HloInstruction* bias = nullptr;
HloInstruction* scale = nullptr;
auto first_bmm_pattern =
m::SharedSubpattern(m::Op(&bmm_1).WithPredicate(IsBatchedMatmul));
auto unfused_scaled_bmm_subpattern = m::MultiplyAnyOrder(
OptionalConvert(first_bmm_pattern.WithOneUse()),
OptionalConvert(
m::Broadcast(m::Constant(&scale).WithPredicate(IsScalar))));
if (Match(softmax_input,
OptionalConvert(OptionalBitcast(m::AnyOf<HloInstruction>(
first_bmm_pattern, unfused_scaled_bmm_subpattern))))) {
match_result.matched_bmm_1 = bmm_1;
match_result.matched_scale = scale;
match_result.matched_custom_call_name =
has_dropout ? kCudnnfMHASoftmaxDropoutCallTarget
: kCudnnfMHASoftmaxCallTarget;
match_result.has_match = true;
} else if (Match(softmax_input,
OptionalBitcast(m::AddAnyOrder(
OptionalConvert(OptionalBitcast(m::AnyOf<HloInstruction>(
unfused_scaled_bmm_subpattern.WithOneUse(),
first_bmm_pattern.WithOneUse()))),
m::Op(&bias))))) {
match_result.matched_bmm_1 = bmm_1;
match_result.matched_scale = scale;
match_result.matched_custom_call_name =
has_dropout ? kCudnnfMHAScaleBiasSoftmaxDropoutCallTarget
: kCudnnfMHAScaleBiasSoftmaxCallTarget;
match_result.is_causal_mask |= IsCausalMaskPattern(bias);
if (!match_result.is_causal_mask &&
bias->opcode() == HloOpcode::kBroadcast) {
auto dims = Cast<HloBroadcastInstruction>(bias)->dimensions();
if (dims == std::vector<int64_t>{2, 3} ||
dims == std::vector<int64_t>{0, 2, 3} ||
dims == std::vector<int64_t>{1, 2, 3}) {
HloInstruction* bias_bc = bias->mutable_operand(0);
std::vector<int64_t> bitcast_dims(bias->shape().rank(), 1);
for (int dim : dims) {
bitcast_dims[dim] = bias->shape().dimensions()[dim];
}
bias = bias_bc->AddInstruction(HloInstruction::CreateBitcast(
ShapeUtil::MakeShape(bias->shape().element_type(), bitcast_dims),
bias_bc));
}
}
match_result.matched_bias = bias;
match_result.has_match = true;
} else {
match_result.has_match = false;
}
return match_result;
}
MatchFwdResult MatchFwdMHAPatternsForCanonicalization(HloInstruction* instr) {
MatchFwdResult match_result;
for (auto bmm2_operand_pos : {0, 1}) {
if (bmm2_operand_pos == 1) {
match_result.need_canonicalization = true;
}
bool has_dropout = false;
match_result =
MatchSoftmaxDropoutBmm(match_result, bmm2_operand_pos, instr);
if (!match_result.has_match) {
continue;
}
has_dropout = match_result.matched_dropout_rate > 0.0;
match_result = MatchBmm1UnfusedBiasSoftmaxBmm2(
match_result, match_result.matched_softmax_input, has_dropout);
if (match_result.has_match) {
return match_result;
}
}
match_result.need_canonicalization = false;
return match_result;
}
bool IsBmm2GradGemm2(HloInstruction* instr) {
return (instr->user_count() == 1) || (instr->user_count() == 2);
}
MatchBwdResult MatchBmm1GradGemm1(MatchBwdResult previous_result,
HloInstruction* bmm_1) {
MatchBwdResult match_result = previous_result;
match_result.has_match = false;
const HloInstruction* q_tensor = bmm_1->operand(0);
for (int64_t i = 0; i < q_tensor->user_count(); i++) {
HloInstruction* q_tensor_user_i = q_tensor->users()[i];
if (IsBatchedMatmul(q_tensor_user_i) && q_tensor_user_i != bmm_1) {
match_result.matched_bmm_1_grad_1 = q_tensor_user_i;
if (match_result.matched_bmm_1_grad_1->operand_index(q_tensor) != 1) {
match_result.bmm_1_grad_1_need_canonicalization = true;
}
match_result.has_match = true;
}
}
return match_result;
}
MatchBwdResult MatchBmm1GradGemm2(MatchBwdResult previous_result,
HloInstruction* fwd_fmha_call) {
HloInstruction* bmm_1_grad_2 = nullptr;
MatchBwdResult match_result = previous_result;
match_result.has_match = false;
int64_t d_s_index = match_result.bmm_1_grad_1_need_canonicalization ? 1 : 0;
HloInstruction* d_s_user_0 = match_result.matched_bmm_1_grad_1;
HloInstruction* d_s = d_s_user_0->mutable_operand(d_s_index);
if (d_s->opcode() == HloOpcode::kBitcast && d_s->user_count() == 1) {
d_s = d_s->mutable_operand(0);
}
auto bmm_1_grad_2_it = std::find_if(
d_s->users().begin(), d_s->users().end(), [&](HloInstruction* instr) {
return instr != match_result.matched_bmm_1_grad_1 &&
instr->opcode() == HloOpcode::kDot;
});
if (bmm_1_grad_2_it != d_s->users().end()) {
bmm_1_grad_2 = *bmm_1_grad_2_it;
} else {
return match_result;
}
match_result.matched_bmm_1_grad_2 = bmm_1_grad_2;
if (match_result.matched_bmm_1_grad_2->operand_index(d_s) != 0) {
match_result.bmm_1_grad_2_need_canonicalization = true;
}
match_result.has_match = true;
return match_result;
}
MatchBwdResult MatchBmm2GradGemm1(HloInstruction* fwd_fmha_call) {
HloInstruction* bmm_2_grad_1 = nullptr;
MatchBwdResult matched_result;
int64_t activation_out_gte_index = 1;
if (fwd_fmha_call->user_count() < 2 ||
fwd_fmha_call->users()[activation_out_gte_index]->opcode() !=
HloOpcode::kGetTupleElement ||
fwd_fmha_call->users()[activation_out_gte_index]->user_count() > 1 ||
!IsBatchedMatmul(
fwd_fmha_call->users()[activation_out_gte_index]->users()[0])) {
matched_result.has_match = false;
return matched_result;
}
bmm_2_grad_1 = fwd_fmha_call->users()[activation_out_gte_index]->users()[0];
matched_result.matched_bmm_2_grad_1 = bmm_2_grad_1;
if (bmm_2_grad_1->operand_index(
fwd_fmha_call->users()[activation_out_gte_index]) != 0) {
matched_result.bmm_ | #include "xla/service/gpu/cudnn_fused_mha_rewriter.h"
#include <cstddef>
#include <memory>
#include <optional>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/strings/string_view.h"
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/computation_layout.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/cudnn_fused_mha_transpose_fusion.h"
#include "xla/service/hlo_cse.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_verifier.h"
#include "xla/service/layout_normalization.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/service/reshape_decomposer.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/dnn.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#if GOOGLE_CUDA
#include "third_party/gpus/cuda/include/cuda.h"
#include "third_party/gpus/cudnn/cudnn.h"
#endif
namespace xla {
namespace gpu {
namespace {
namespace m = xla::match;
class CudnnFusedMhaRewriterTestHloTest : public HloTestBase {
public:
se::CudaComputeCapability GetCudaComputeCapability() {
return se::CudaComputeCapability(8, 0);
}
se::CudaComputeCapability GetRealCudaComputeCapability() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
}
se::dnn::VersionInfo GetCudnnVersion() {
return se::dnn::VersionInfo(8, 9, 4);
}
CudnnFusedMhaRewriterTestHloTest()
: HloTestBase(false,
false,
{}) {
#if !defined(GOOGLE_CUDA) || CUDA_VERSION < 12000
skip_reason_ = "cuDNN fused MHA requires CUDA 12 or later.";
return;
#endif
}
protected:
size_t CountFusedAttentionCall(HloModule* module, bool is_backward = false) {
return absl::c_count_if(module->entry_computation()->instructions(),
[&](const HloInstruction* instr) {
if (is_backward) {
return IsBwdCustomCallTofMHA(*instr);
} else {
return IsFwdCustomCallTofMHA(*instr);
}
});
}
DebugOptions GetDebugOptionsForTest() override {
auto debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_cudnn_fmha(true);
debug_options.set_xla_gpu_fused_attention_use_cudnn_rng(true);
return debug_options;
}
HloModuleConfig GetModuleConfig() {
DebugOptions debug_options = GetDebugOptionsForTest();
HloModuleConfig config_with_fmha;
config_with_fmha.set_debug_options(debug_options);
return config_with_fmha;
}
std::optional<absl::string_view> skip_reason_;
};
class CudnnFusedMhaRewriterPipelineTest
: public CudnnFusedMhaRewriterTestHloTest {
public:
CudnnFusedMhaRewriterPipelineTest() {
if (skip_reason_) return;
#if !defined(GOOGLE_CUDA) || CUDNN_VERSION < 8800
skip_reason_ = "Pipeline test requires cuDNN 8.8.0 or later.";
return;
#endif
stream_executor::CudaComputeCapability cc = GetRealCudaComputeCapability();
if (!cc.IsAtLeastAmpere() || cc.minor != 0) {
skip_reason_ =
"Pipeline test requires Nvidia AMPERE+ GPUs with minor "
"compute capability == 0.";
return;
}
}
};
constexpr absl::string_view
hlo_BF16Bmm1SoftmaxBmm2Pattern_k_hidden_not_most_minor = R"(
HloModule fmha_test, entry_computation_layout={(bf16[16,16,256,64]{3,2,1,0},bf16[16,16,256,64]{3,2,1,0},bf16[16,16,256,64]{3,2,1,0})->bf16[16,16,256,64]{3,2,1,0}}
region_0.7 {
Arg_0.8 = bf16[] parameter(0)
Arg_1.9 = bf16[] parameter(1)
ROOT maximum = bf16[] maximum(Arg_0.8, Arg_1.9)
}
region_1.19 {
Arg_0.20 = f32[] parameter(0)
Arg_1.21 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0.20, Arg_1.21)
}
ENTRY main.6 {
Arg_2.3 = bf16[16,16,256,64]{3,2,1,0} parameter(2)
Arg_0.1 = bf16[16,16,256,64]{3,2,1,0} parameter(0)
Arg_1.2 = bf16[16,16,256,64]{2,3,1,0} parameter(1)
dot.0 = bf16[16,16,256,256]{3,2,1,0} dot(Arg_0.1, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3}, metadata={}
constant = bf16[] constant(-inf)
reduce.11 = bf16[16,16,256]{2,1,0} reduce(dot.0, constant), dimensions={3}, to_apply=region_0.7
broadcast.3 = bf16[16,16,256,256]{3,2,1,0} broadcast(reduce.11), dimensions={0,1,2}
subtract.1 = bf16[16,16,256,256]{3,2,1,0} subtract(dot.0, broadcast.3)
exponential.1 = bf16[16,16,256,256]{3,2,1,0} exponential(subtract.1)
convert.1 = f32[16,16,256,256]{3,2,1,0} convert(exponential.1)
constant.1 = f32[] constant(0)
reduce.23 = f32[16,16,256]{2,1,0} reduce(convert.1, constant.1), dimensions={3}, to_apply=region_1.19
convert.2 = bf16[16,16,256]{2,1,0} convert(reduce.23)
broadcast.4 = bf16[16,16,256,256]{3,2,1,0} broadcast(convert.2), dimensions={0,1,2}
divide = bf16[16,16,256,256]{3,2,1,0} divide(exponential.1, broadcast.4)
ROOT dot.1 = bf16[16,16,256,64]{3,2,1,0} dot(divide, Arg_2.3), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}, metadata={}
})";
TEST_F(CudnnFusedMhaRewriterTestHloTest,
BF16Bmm1SoftmaxBmm2Pattern_bmm1_rhs_contracting_dim_not_most_minor) {
if (skip_reason_) GTEST_SKIP() << *skip_reason_;
TF_ASSERT_OK_AND_ASSIGN(
auto m, ParseAndReturnVerifiedModule(
hlo_BF16Bmm1SoftmaxBmm2Pattern_k_hidden_not_most_minor));
CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(),
GetCudnnVersion()};
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&fusedMhaRewriter, m.get()));
EXPECT_TRUE(result);
const HloInstruction* fmha;
SCOPED_TRACE(m->ToString());
EXPECT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall(&fmha, {kCudnnfMHASoftmaxCallTarget}), 0)
.WithShape(BF16, {16, 16, 256, 64})));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
fmha->backend_config<GpuBackendConfig>());
const CudnnfMHABackendConfig& config = gpu_config.cudnn_fmha_backend_config();
EXPECT_EQ(config.bmm1_dot_dimension_numbers().rhs_contracting_dimensions()[0],
2);
}
constexpr absl::string_view
hlo_BF16Bmm1SoftmaxBmm2Pattern_q_hidden_not_most_minor = R"(
HloModule fmha_test, entry_computation_layout={(bf16[16,16,256,64]{3,2,1,0},bf16[16,16,256,64]{3,2,1,0},bf16[16,16,256,64]{3,2,1,0})->bf16[16,16,256,64]{3,2,1,0}}
region_0.7 {
Arg_0.8 = bf16[] parameter(0)
Arg_1.9 = bf16[] parameter(1)
ROOT maximum = bf16[] maximum(Arg_0.8, Arg_1.9)
}
region_1.19 {
Arg_0.20 = f32[] parameter(0)
Arg_1.21 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0.20, Arg_1.21)
}
ENTRY main.6 {
Arg_2.3 = bf16[16,16,256,64]{3,2,1,0} parameter(2)
Arg_0.1 = bf16[16,16,256,64]{2,3,1,0} parameter(0)
Arg_1.2 = bf16[16,16,256,64]{2,3,1,0} parameter(1)
dot.0 = bf16[16,16,256,256]{3,2,1,0} dot(Arg_0.1, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3}, metadata={}
constant = bf16[] constant(-inf)
reduce.11 = bf16[16,16,256]{2,1,0} reduce(dot.0, constant), dimensions={3}, to_apply=region_0.7
broadcast.3 = bf16[16,16,256,256]{3,2,1,0} broadcast(reduce.11), dimensions={0,1,2}
subtract.1 = bf16[16,16,256,256]{3,2,1,0} subtract(dot.0, broadcast.3)
exponential.1 = bf16[16,16,256,256]{3,2,1,0} exponential(subtract.1)
convert.1 = f32[16,16,256,256]{3,2,1,0} convert(exponential.1)
constant.1 = f32[] constant(0)
reduce.23 = f32[16,16,256]{2,1,0} reduce(convert.1, constant.1), dimensions={3}, to_apply=region_1.19
convert.2 = bf16[16,16,256]{2,1,0} convert(reduce.23)
broadcast.4 = bf16[16,16,256,256]{3,2,1,0} broadcast(convert.2), dimensions={0,1,2}
divide = bf16[16,16,256,256]{3,2,1,0} divide(exponential.1, broadcast.4)
ROOT dot.1 = bf16[16,16,256,64]{3,2,1,0} dot(divide, Arg_2.3), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}, metadata={}
})";
TEST_F(CudnnFusedMhaRewriterTestHloTest,
BF16Bmm1SoftmaxBmm2Pattern_bmm1_lhs_contracting_dim_not_most_minor) {
if (skip_reason_) GTEST_SKIP() << *skip_reason_;
TF_ASSERT_OK_AND_ASSIGN(
auto m, ParseAndReturnVerifiedModule(
hlo_BF16Bmm1SoftmaxBmm2Pattern_q_hidden_not_most_minor));
CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(),
GetCudnnVersion()};
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&fusedMhaRewriter, m.get()));
EXPECT_TRUE(result);
const HloInstruction* fmha;
SCOPED_TRACE(m->ToString());
EXPECT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall(&fmha, {kCudnnfMHASoftmaxCallTarget}), 0)
.WithShape(BF16, {16, 16, 256, 64})));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
fmha->backend_config<GpuBackendConfig>());
const CudnnfMHABackendConfig& config = gpu_config.cudnn_fmha_backend_config();
EXPECT_EQ(config.bmm1_dot_dimension_numbers().lhs_contracting_dimensions()[0],
2);
EXPECT_EQ(config.bmm1_dot_dimension_numbers().rhs_contracting_dimensions()[0],
2);
}
constexpr absl::string_view
hlo_BF16Bmm1SoftmaxBmm2Pattern_v_hidden_dim_not_most_minor = R"(
HloModule fmha_test, entry_computation_layout={(bf16[16,16,256,64]{3,2,1,0},bf16[16,16,256,64]{3,2,1,0},bf16[16,16,256,64]{3,2,1,0})->bf16[16,16,256,64]{3,2,1,0}}
region_0.7 {
Arg_0.8 = bf16[] parameter(0)
Arg_1.9 = bf16[] parameter(1)
ROOT maximum = bf16[] maximum(Arg_0.8, Arg_1.9)
}
region_1.19 {
Arg_0.20 = f32[] parameter(0)
Arg_1.21 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0.20, Arg_1.21)
}
ENTRY main.6 {
Arg_2.3 = bf16[16,16,256,64]{2,3,1,0} parameter(2)
Arg_0.1 = bf16[16,16,256,64]{2,3,1,0} parameter(0)
Arg_1.2 = bf16[16,16,256,64]{2,3,1,0} parameter(1)
dot.0 = bf16[16,16,256,256]{3,2,1,0} dot(Arg_0.1, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3}, metadata={}
constant = bf16[] constant(-inf)
reduce.11 = bf16[16,16,256]{2,1,0} reduce(dot.0, constant), dimensions={3}, to_apply=region_0.7
broadcast.3 = bf16[16,16,256,256]{3,2,1,0} broadcast(reduce.11), dimensions={0,1,2}
subtract.1 = bf16[16,16,256,256]{3,2,1,0} subtract(dot.0, broadcast.3)
exponential.1 = bf16[16,16,256,256]{3,2,1,0} exponential(subtract.1)
convert.1 = f32[16,16,256,256]{3,2,1,0} convert(exponential.1)
constant.1 = f32[] constant(0)
reduce.23 = f32[16,16,256]{2,1,0} reduce(convert.1, constant.1), dimensions={3}, to_apply=region_1.19
convert.2 = bf16[16,16,256]{2,1,0} convert(reduce.23)
broadcast.4 = bf16[16,16,256,256]{3,2,1,0} broadcast(convert.2), dimensions={0,1,2}
divide = bf16[16,16,256,256]{3,2,1,0} divide(exponential.1, broadcast.4)
ROOT dot.1 = bf16[16,16,256,64]{3,2,1,0} dot(divide, Arg_2.3), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}, metadata={}
})";
TEST_F(CudnnFusedMhaRewriterTestHloTest,
BF16Bmm1SoftmaxBmm2Pattern_bmm2_non_contracting_dim_not_most_minor) {
if (skip_reason_) GTEST_SKIP() << *skip_reason_;
TF_ASSERT_OK_AND_ASSIGN(
auto m, ParseAndReturnVerifiedModule(
hlo_BF16Bmm1SoftmaxBmm2Pattern_v_hidden_dim_not_most_minor));
CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(),
GetCudnnVersion()};
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&fusedMhaRewriter, m.get()));
EXPECT_TRUE(result);
const HloInstruction* fmha;
SCOPED_TRACE(m->ToString());
EXPECT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall(&fmha, {kCudnnfMHASoftmaxCallTarget}), 0)
.WithShape(BF16, {16, 16, 256, 64})));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
fmha->backend_config<GpuBackendConfig>());
const CudnnfMHABackendConfig& config = gpu_config.cudnn_fmha_backend_config();
EXPECT_EQ(config.bmm2_dot_dimension_numbers().lhs_contracting_dimensions()[0],
3);
EXPECT_EQ(config.bmm2_dot_dimension_numbers().rhs_contracting_dimensions()[0],
3);
}
TEST_F(CudnnFusedMhaRewriterTestHloTest, BF16Bmm1CombinedMaskBiasSoftmaxBmm2) {
if (skip_reason_) GTEST_SKIP() << *skip_reason_;
const char* module_str = R"(
HloModule jit__unnamed_wrapped_function_,
entry_computation_layout={(bf16[16,256,16,64]{3,2,1,0},bf16[16,256,16,64]{3,2,1,0},bf16[16,256,16,64]{3,2,1,0},bf16[1,16,256,256]{3,2,1,0},pred[16,1,256,256]{3,2,1,0})->bf16[16,256,16,64]{3,2,1,0}}
region_0.32.clone {
Arg_0.0 = f32[] parameter(0)
Arg_1.0 = f32[] parameter(1)
ROOT maximum.1 = f32[] maximum(Arg_0.0, Arg_1.0)
}
region_1.44 {
Arg_0.45 = f32[] parameter(0)
Arg_1.46 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0.45, Arg_1.46)
}
ENTRY main.61 {
Arg_2.3 = bf16[16,256,16,64]{3,2,1,0} parameter(2), sharding={replicated}
transpose.5 = bf16[16,16,64,256]{3,2,1,0} transpose(Arg_2.3), dimensions={0,2,3,1}
Arg_0.1 = bf16[16,256,16,64]{3,2,1,0} parameter(0), sharding={replicated}
transpose.6 = bf16[16,16,256,64]{3,2,1,0} transpose(Arg_0.1), dimensions={0,2,1,3}
Arg_1.2 = bf16[16,256,16,64]{3,2,1,0} parameter(1), sharding={replicated}
transpose.7 = bf16[16,16,64,256]{3,2,1,0} transpose(Arg_1.2), dimensions={0,2,3,1}
Arg_4.5 = pred[16,1,256,256]{3,2,1,0} parameter(4), sharding={replicated}
bitcast.35 = pred[16,256,256]{2,1,0} bitcast(Arg_4.5)
convert.49 = s32[16,256,256]{2,1,0} convert(bitcast.35)
constant.5 = s32[] constant(0)
broadcast.10 = s32[16,256,256]{2,1,0} broadcast(constant.5), dimensions={}
compare = pred[16,256,256]{2,1,0} compare(convert.49, broadcast.10), direction=GT
constant.7 = bf16[] constant(0)
broadcast.12 = bf16[16,256,256]{2,1,0} broadcast(constant.7), dimensions={}
constant.9 = bf16[] constant(-9.999e+09)
broadcast.13 = bf16[16,256,256]{2,1,0} broadcast(constant.9), dimensions={}
select = bf16[16,256,256]{2,1,0} select(compare, broadcast.12, broadcast.13)
convert.51 = f32[16,256,256]{2,1,0} convert(select)
broadcast.14 = f32[16,16,256,256]{3,2,1,0} broadcast(convert.51), dimensions={0,2,3}
Arg_3.4 = bf16[1,16,256,256]{3,2,1,0} parameter(3), sharding={replicated}
bitcast.52 = bf16[16,256,256]{2,1,0} bitcast(Arg_3.4)
convert.52 = f32[16,256,256]{2,1,0} convert(bitcast.52)
broadcast.15 = f32[16,16,256,256]{3,2,1,0} broadcast(convert.52), dimensions={1,2,3}
add.1 = f32[16,16,256,256]{3,2,1,0} add(broadcast.14, broadcast.15)
dot.2 = bf16[16,16,256,256]{3,2,1,0} dot(transpose.6, transpose.7), lhs_contracting_dims={3}, rhs_contracting_dims={2}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1}
convert.55 = f32[16,16,256,256]{3,2,1,0} convert(dot.2)
add.18 = f32[16,16,256,256]{3,2,1,0} add(convert.55, add.1)
constant.11 = f32[] constant(-inf)
reduce.36 = f32[16,16,256]{2,1,0} reduce(add.18, constant.11), dimensions={3}, to_apply=region_0.32.clone
broadcast.17 = f32[16,16,256,256]{3,2,1,0} broadcast(reduce.36), dimensions={0,1,2}
subtract.1 = f32[16,16,256,256]{3,2,1,0} subtract(add.18, broadcast.17)
exponential.1 = f32[16,16,256,256]{3,2,1,0} exponential(subtract.1)
constant.14 = f32[] constant(0)
reduce.48 = f32[16,16,256]{2,1,0} reduce(exponential.1, constant.14), dimensions={3}, to_apply=region_1.44
broadcast.18 = f32[16,16,256,256]{3,2,1,0} broadcast(reduce.48), dimensions={0,1,2}
divide = f32[16,16,256,256]{3,2,1,0} divide(exponential.1, broadcast.18)
convert.68 = bf16[16,16,256,256]{3,2,1,0} convert(divide)
dot.1 = bf16[16,16,64,256]{3,2,1,0} dot(transpose.5, convert.68), lhs_contracting_dims={3}, rhs_contracting_dims={3}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1}
ROOT transpose.8 = bf16[16,256,16,64]{3,2,1,0} transpose(dot.1), dimensions={0,3,1,2}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(),
GetCudnnVersion()};
TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status());
const HloInstruction* fmha;
SCOPED_TRACE(m->ToString());
EXPECT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(
m::Transpose(
m::Transpose(m::GetTupleElement(
m::CustomCall(&fmha, {kCudnnfMHAScaleBiasSoftmaxCallTarget}),
0)))
.WithShape(BF16, {16, 256, 16, 64})));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
fmha->backend_config<GpuBackendConfig>());
EXPECT_EQ(fmha->operands().size(), 4);
}
TEST_F(CudnnFusedMhaRewriterTestHloTest, F16Bmm1UnfusedSoftmaxBmm2) {
if (skip_reason_) GTEST_SKIP() << *skip_reason_;
const char* module_str = R"(
HloModule jit__unnamed_wrapped_function_, entry_computation_layout={(f16[2,6,40,64]{3,2,1,0},f16[2,6,64,40]{3,2,1,0},f16[2,6,40,64]{3,2,1,0})->f16[2,6,40,64]{3,2,1,0}}
region_0.7 {
Arg_0.8 = f16[] parameter(0)
Arg_1.9 = f16[] parameter(1)
ROOT maximum = f16[] maximum(Arg_0.8, Arg_1.9)
}
region_1.19 {
Arg_0.20 = f32[] parameter(0)
Arg_1.21 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0.20, Arg_1.21)
}
ENTRY main.31 {
Arg_0.1 = f16[2,6,40,64]{3,2,1,0} parameter(0), sharding={replicated}
Arg_1.2 = f16[2,6,64,40]{3,2,1,0} parameter(1), sharding={replicated}
dot = f16[2,6,40,40]{3,2,1,0} dot(Arg_0.1, Arg_1.2), lhs_contracting_dims={3}, rhs_contracting_dims={2}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1}
constant = f16[] constant(-inf)
reduce.11 = f16[2,6,40]{2,1,0} reduce(dot, constant), dimensions={3}, to_apply=region_0.7
broadcast.3 = f16[2,6,40,40]{3,2,1,0} broadcast(reduce.11), dimensions={0,1,2}
subtract.1 = f16[2,6,40,40]{3,2,1,0} subtract(dot, broadcast.3)
exponential.1 = f16[2,6,40,40]{3,2,1,0} exponential(subtract.1)
convert.1 = f32[2,6,40,40]{3,2,1,0} convert(exponential.1)
constant.1 = f32[] constant(0)
reduce.23 = f32[2,6,40]{2,1,0} reduce(convert.1, constant.1), dimensions={3}, to_apply=region_1.19
convert.2 = f16[2,6,40]{2,1,0} convert(reduce.23)
broadcast.4 = f16[2,6,40,40]{3,2,1,0} broadcast(convert.2), dimensions={0,1,2}
divide = f16[2,6,40,40]{3,2,1,0} divide(exponential.1, broadcast.4)
Arg_2.3 = f16[2,6,40,64]{3,2,1,0} parameter(2), sharding={replicated}
ROOT dot.1 = f16[2,6,40,64]{3,2,1,0} dot(divide, Arg_2.3), lhs_contracting_dims={3}, rhs_contracting_dims={2}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(),
GetCudnnVersion()};
TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status());
const HloInstruction* fmha;
SCOPED_TRACE(m->ToString());
EXPECT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall(&fmha, {kCudnnfMHASoftmaxCallTarget}), 0)
.WithShape(F16, {2, 6, 40, 64})));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
fmha->backend_config<GpuBackendConfig>());
const CudnnfMHABackendConfig& config = gpu_config.cudnn_fmha_backend_config();
EXPECT_FLOAT_EQ(config.fmha_scale(), 1.0);
EXPECT_FLOAT_EQ(config.dropout_rate(), 0.0);
EXPECT_EQ(fmha->operands().size(), 3);
}
TEST_F(CudnnFusedMhaRewriterTestHloTest,
BF16Bmm1ConvertedMaskAddedAfterFirstGemmSoftmaxBmm2) {
if (skip_reason_) GTEST_SKIP() << *skip_reason_;
const char* module_str = R"(
HloModule jit__unnamed_wrapped_function_, entry_computation_layout={(bf16[16,256,16,64]{3,2,1,0},bf16[16,256,16,64]{3,2,1,0},bf16[16,256,16,64]{3,2,1,0},pred[16,1,256,256]{3,2,1,0})->bf16[16,256,16,64]{3,2,1,0}}
region_0.27.clone {
Arg_0.0 = f32[] parameter(0)
Arg_1.0 = f32[] parameter(1)
ROOT maximum.1 = f32[] maximum(Arg_0.0, Arg_1.0)
}
region_1.39 {
Arg_0.40 = f32[] parameter(0)
Arg_1.41 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0.40, Arg_1.41)
}
ENTRY main.56 {
Arg_2.3 = bf16[16,256,16,64]{3,2,1,0} parameter(2), sharding={replicated}
transpose.5 = bf16[16,16,64,256]{3,2,1,0} transpose(Arg_2.3), dimensions={0,2,3,1}
Arg_0.1 = bf16[16,256,16,64]{3,2,1,0} parameter(0), sharding={replicated}
transpose.6 = bf16[16,16,256,64]{3,2,1,0} transpose(Arg_0.1), dimensions={0,2,1,3}
Arg_1.2 = bf16[16,256,16,64]{3,2,1,0} parameter(1), sharding={replicated}
transpose.7 = bf16[16,16,64,256]{3,2,1,0} transpose(Arg_1.2), dimensions={0,2,3,1}
dot = bf16[16,16,256,256]{3,2,1,0} dot(transpose.6, transpose.7), lhs_contracting_dims={3}, rhs_contracting_dims={2}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1}
convert.47 = f32[16,16,256,256]{3,2,1,0} convert(dot)
Arg_3.4 = pred[16,1,256,256]{3,2,1,0} parameter(3), sharding={replicated}
bitcast.37 = pred[16,256,256]{2,1,0} bitcast(Arg_3.4)
convert.42 = s32[16,256,256]{2,1,0} convert(bitcast.37)
constant.6 = s32[] constant(0)
broadcast.9 = s32[16,256,256]{2,1,0} broadcast(constant.6), dimensions={}
compare = pred[16,256,256]{2,1,0} compare(convert.42, broadcast.9), direction=GT
constant.8 = bf16[] constant(0)
broadcast.11 = bf16[16,256,256]{2,1,0} broadcast(constant.8), dimensions={}
constant.10 = bf16[] constant(-9.999e+09)
broadcast.12 = bf16[16,256,256]{2,1,0} broadcast(constant.10), dimensions={}
select = bf16[16,256,256]{2,1,0} select(compare, broadcast.11, broadcast.12)
convert.48 = f32[16,256,256]{2,1,0} convert(select)
broadcast.14 = f32[16,16,256,256]{3,2,1,0} broadcast(convert.48), dimensions={0,2,3}
add.2 = f32[16,16,256,256]{3,2,1,0} add(convert.47, broadcast.14)
constant.13 = f32[] constant(-inf)
reduce.31 = f32[16,16,256]{2,1,0} reduce(add.2, constant.13), dimensions={3}, to_apply=region_0.27.clone
broadcast.16 = f32[16,16,256,256]{3,2,1,0} broadcast(reduce.31), dimensions={0,1,2}
subtract.1 = f32[16,16,256,256]{3,2,1,0} subtract(add.2, broadcast.16)
exponential.1 = f32[16,16,256,256]{3,2,1,0} exponential(subtract.1)
constant.14 = f32[] constant(0)
reduce.43 = f32[16,16,256]{2,1,0} reduce(exponential.1, constant.14), dimensions={3}, to_apply=region_1.39
broadcast.17 = f32[16,16,256,256]{3,2,1,0} broadcast(reduce.43), dimensions={0,1,2}
divide = f32[16,16,256,256]{3,2,1,0} divide(exponential.1, broadcast.17)
convert.63 = bf16[16,16,256,256]{3,2,1,0} convert(divide)
dot.1 = bf16[16,16,64,256]{3,2,1,0} dot(transpose.5, convert.63), lhs_contracting_dims={3}, rhs_contracting_dims={3}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1}
ROOT transpose.8 = bf16[16,256,16,64]{3,2,1,0} transpose(dot.1), dimensions={0,3,1,2}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(),
GetCudnnVersion()};
TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status());
const HloInstruction* fmha;
SCOPED_TRACE(m->ToString());
EXPECT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(
m::Transpose(
m::Transpose(m::GetTupleElement(
m::CustomCall(&fmha, {kCudnnfMHAScaleBiasSoftmaxCallTarget}),
0)))
.WithShape(BF16, {16, 256, 16, 64})));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
fmha->backend_config<GpuBackendConfig>());
EXPECT_EQ(fmha->operands().size(), 4);
}
TEST_F(CudnnFusedMhaRewriterTestHloTest,
BF16Bmm1Bmm2Pattern_bmm1_contracting_dim_not_equal_64) {
if (skip_reason_) GTEST_SKIP() << *skip_reason_;
const char* module_str = R"(
HloModule fmha_test, entry_computation_layout={(bf16[16,16,256,32]{3,2,1,0},bf16[16,16,256,32]{3,2,1,0},bf16[16,16,256,64]{3,2,1,0})->bf16[16,16,256,64]{3,2,1,0}}
ENTRY main.6 {
Arg_2.3 = bf16[16,16,256,64]{3,2,1,0} parameter(2)
Arg_0.1 = bf16[16,16,256,32]{3,2,1,0} parameter(0)
Arg_1.2 = bf16[16,16,256,32]{3,2,1,0} parameter(1)
dot.0 = bf16[16,16,256,256]{3,2,1,0} dot(Arg_0.1, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3}, metadata={}
ROOT dot.1 = bf16[16,16,256,64]{3,2,1,0} dot(dot.0, Arg_2.3), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}, metadata={}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(),
GetCudnnVersion()};
TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status());
const HloInstruction* fmha;
SCOPED_TRACE(m->ToString());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Dot(&fmha, m::Dot(m::Parameter(0), m::Parameter(1)),
m::Parameter(2))
.WithShape(BF16, {16, 16, 256, 64})));
}
TEST_F(CudnnFusedMhaRewriterTestHloTest,
BF16Bmm1Bmm2Pattern_bmm2_rhs_non_contracting_dim_not_equal_64) {
if (skip_reason_) GTEST_SKIP() << *skip_reason_;
const char* module_str = R"(
HloModule fmha_test, entry_computation_layout={(bf16[16,16,256,64]{3,2,1,0},bf16[16,16,256,64]{3,2,1,0},bf16[16,16,256,32]{3,2,1,0})->bf16[16,16,256,32]{3,2,1,0}}
ENTRY main.6 {
Arg_2.3 = bf16[16,16,256,32]{3,2,1,0} parameter(2)
Arg_0.1 = bf16[16,16,256,64]{3,2,1,0} parameter(0)
Arg_1.2 = bf16[16,16,256,64]{3,2,1,0} parameter(1)
dot.0 = bf16[16,16,256,256]{3,2,1,0} dot(Arg_0.1, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3}, metadata={}
ROOT dot.1 = bf16[16,16,256,32]{3,2,1,0} dot(dot.0, Arg_2.3), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}, metadata={}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(),
GetCudnnVersion()};
TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status());
const HloInstruction* fmha;
SCOPED_TRACE(m->ToString());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Dot(&fmha, m::Op(), m::Parameter(2))
.WithShape(BF16, {16, 16, 256, 32})));
}
TEST_F(CudnnFusedMhaRewriterTestHloTest,
BF16Bmm1Bmm2PatternUncanonicalized_bmm1_contracting_dim_not_equal_64) {
if (skip_reason_) GTEST_SKIP() << *skip_reason_;
const char* module_str = R"(
HloModule fmha_test, entry_computation_layout={(bf16[16,16,256,32]{3,2,1,0},bf16[16,16,256,32]{3,2,1,0},bf16[16,16,256,64]{3,2,1,0})->bf16[16,16,64,256]{3,2,1,0}}
ENTRY main.6 {
Arg_2.3 = bf16[16,16,256,64]{3,2,1,0} parameter(2)
Arg_0.1 = bf16[16,16,256,32]{3,2,1,0} parameter(0)
Arg_1.2 = bf16[16,16,256,32]{3,2,1,0} parameter(1)
dot.0 = bf16[16,16,256,256]{3,2,1,0} dot(Arg_0.1, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3}, metadata={}
ROOT dot.1 = bf16[16,16,64,256]{3,2,1,0} dot(Arg_2.3, dot.0), lhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_batch_dims={0,1}, rhs_contracting_dims={3}, metadata={}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(),
GetCudnnVersion()};
TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status());
const HloInstruction* fmha;
SCOPED_TRACE(m->ToString());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Dot(&fmha, m::Parameter(2), m::Op())
.WithShape(BF16, {16, 16, 64, 256})));
}
TEST_F(CudnnFusedMhaRewriterTestHloTest, BF16Bmm1BiasSoftmaxDropoutBmm2) {
if (skip_reason_) GTEST_SKIP() << *skip_reason_;
const char* module_str = R"(
HloModule jit__unnamed_wrapped_function_, entry_computation_layout={(bf16[16,256,16,64]{3,2,1,0},bf16[16,256,16,64]{3,2,1,0},bf16[16,256,16,64]{3,2,1,0},bf16[1,16,256,256]{3,2,1,0})->bf16[16,256,16,64]{3,2,1,0}}
region_0.34 {
Arg_0.35 = bf16[] parameter(0)
Arg_1.36 = bf16[] parameter(1)
ROOT maximum.37 = bf16[] maximum(Arg_0.35, Arg_1.36)
}
region_1.46 {
Arg_0.47 = f32[] parameter(0)
Arg_1.48 = f32[] parameter(1)
ROOT add.49 = f32[] add(Arg_0.47, Arg_1.48)
}
ENTRY main.82 {
Arg_2.3 = bf16[16,256,16,64]{3,2,1,0} parameter(2), sharding={replicated}
copy = bf16[16,256,16,64]{1,3,2,0} copy(Arg_2.3), sharding={replicated}
transpose.2 = bf16[16,16,64,256]{3,2,1,0} transpose(copy), dimensions={0,2,3,1}
Arg_0.1 = bf16[16,256,16,64]{3,2,1,0} parameter(0), sharding={replicated}
copy.1 = bf16[16,256,16,64]{3,1,2,0} copy(Arg_0.1), sharding={replicated}
transpose = bf16[16,16,256,64]{3,2,1,0} transpose(copy.1), dimensions={0,2,1,3}
Arg_1.2 = bf16[16,256,16,64]{3,2,1,0} parameter(1), sharding={replicated}
copy.2 = bf16[16,256,16,64]{1,3,2,0} copy(Arg_1.2), sharding={replicated}
transpose.1 = bf16[16,16,64,256]{3,2,1,0} transpose(copy.2), dimensions={0,2,3,1}
dot = bf16[16,16,256,256]{3,2,1,0} dot(transpose, transpose.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
Arg_3.4 = bf16[1,16,256,256]{3,2,1,0} parameter(3), sharding={replicated}
reshape.31 = bf16[16,256,256]{2,1,0} reshape(Arg_3.4)
broadcast.32 = bf16[16,16,256,256]{3,2,1,0} broadcast(reshape.31), dimensions={1,2,3}
add.33 = bf16[16,16,256,256]{3,2,1,0} add(dot, broadcast.32)
constant.21 = bf16[] constant(-inf)
reduce.38 = bf16[16,16,256]{2,1,0} reduce(add.33, constant.21), dimensions={3}, to_apply=region_0.34
broadcast.42 = bf16[16,16,256,256]{3,2,1,0} broadcast(reduce.38), dimensions={0,1,2}
subtract.43 = bf16[16,16,256,256]{3,2,1,0} subtract(add.33, broadcast.42)
exponential.44 = bf16[16,16,256,256]{3,2,1,0} exponential(subtract.43)
convert.45 = f32[16,16,256,256]{3,2,1,0} convert(exponential.44)
constant.9 = f32[] constant(0)
reduce.50 = f32[16,16,256]{2,1,0} reduce(convert.45, constant.9), dimensions={3}, to_apply=region_1.46
convert.1 = bf16[16,16,256]{2,1,0} convert(reduce.50)
broadcast.55 = bf16[16,16,256,256]{3,2,1,0} broadcast(convert.1), dimensions={0,1,2}
divide.56 = bf16[16,16,256,256]{3,2,1,0} divide(exponential.44, broadcast.55)
constant.18 = u32[ |
2,096 | cpp | tensorflow/tensorflow | alias_passthrough_params | third_party/xla/xla/service/gpu/transforms/alias_passthrough_params.cc | third_party/xla/xla/service/gpu/transforms/alias_passthrough_params_test.cc | #ifndef XLA_SERVICE_GPU_ALIAS_PASSTHROUGH_PARAMS_H_
#define XLA_SERVICE_GPU_ALIAS_PASSTHROUGH_PARAMS_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class AliasPassthroughParams : public HloModulePass {
public:
AliasPassthroughParams() = default;
~AliasPassthroughParams() override = default;
absl::string_view name() const override { return "alias_passthrough_params"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
}
#endif
#include "xla/service/gpu/alias_passthrough_params.h"
#include <cstdint>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace gpu {
absl::StatusOr<bool> AliasPassthroughParams::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
const HloInstruction* root = module->entry_computation()->root_instruction();
if (module->entry_computation()->num_parameters() == 0 ||
root->opcode() != HloOpcode::kTuple) {
return false;
}
bool changed = false;
absl::flat_hash_set<int64_t> used_params;
for (int64_t i = 0; i < root->operand_count(); ++i) {
if (root->operand(i)->opcode() == HloOpcode::kParameter &&
used_params.count(root->operand(i)->parameter_number()) == 0) {
VLOG(2) << "Parameter " << root->operand(i)->parameter_number()
<< " with shape " << root->operand(i)->shape().ToString()
<< " in module " << module->name()
<< " is passed-through to root tuple element " << i << ": "
<< root->shape().ToString();
if (module->input_output_alias_config().OutputHasAlias({i}) ||
module->input_output_alias_config().ParameterHasAlias(
root->operand(i)->parameter_number(), {})) {
VLOG(2) << "Skip setting the above pass-through alias as an alias may"
<< " have been set up for alising resource update.";
continue;
}
TF_RETURN_IF_ERROR(module->input_output_alias_config().SetUpAlias(
{i},
root->operand(i)->parameter_number(),
{}));
used_params.insert(root->operand(i)->parameter_number());
changed = true;
}
}
return changed;
}
}
} | #include "xla/service/gpu/alias_passthrough_params.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
class AliasPassthroughParamsTest : public HloTestBase {};
TEST_F(AliasPassthroughParamsTest, AliasPassThroughParams) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
p0 = f16[2048,1024] parameter(0)
p1 = f16[2048,1024] parameter(1)
sum = f16[2048,1024] add(p0, p1)
ROOT root = (f16[2048,1024], f16[2048,1024], f16[2048,1024]) tuple(p0, sum, p1)
})")
.value();
EXPECT_TRUE(AliasPassthroughParams().Run(module.get()).value());
const auto& alias_config = module->input_output_alias_config();
EXPECT_EQ(0, alias_config.GetAliasedParameter({0})->parameter_number);
EXPECT_FALSE(alias_config.OutputHasAlias({1}));
EXPECT_EQ(1, alias_config.GetAliasedParameter({2})->parameter_number);
}
TEST_F(AliasPassthroughParamsTest, DoNotAliasPassThroughParamsMoreThanOnce) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
p0 = f16[2048,1024] parameter(0)
ROOT root = (f16[2048,1024], f16[2048,1024]) tuple(p0, p0)
})")
.value();
EXPECT_TRUE(AliasPassthroughParams().Run(module.get()).value());
const auto& alias_config = module->input_output_alias_config();
EXPECT_EQ(0, alias_config.GetAliasedParameter({0})->parameter_number);
EXPECT_FALSE(alias_config.OutputHasAlias({1}));
}
TEST_F(AliasPassthroughParamsTest, PresetAliases) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
p0 = f16[2048,1024] parameter(0)
p1 = f16[2048,1024] parameter(1)
sum = f16[2048,1024] add(p0, p1)
ROOT root = (f16[2048,1024], f16[2048,1024], f16[2048,1024]) tuple(p0, sum, p1)
})")
.value();
auto& preset_alias = module->input_output_alias_config();
TF_EXPECT_OK(preset_alias.SetUpAlias({1},
0,
{}));
EXPECT_TRUE(AliasPassthroughParams().Run(module.get()).value());
const auto& alias_result = module->input_output_alias_config();
EXPECT_EQ(1, alias_result.GetAliasedParameter({2})->parameter_number);
EXPECT_FALSE(alias_result.OutputHasAlias({0}));
}
}
} |
2,097 | cpp | tensorflow/tensorflow | dot_operand_converter | third_party/xla/xla/service/gpu/transforms/dot_operand_converter.cc | third_party/xla/xla/service/gpu/transforms/dot_operand_converter_test.cc | #ifndef XLA_SERVICE_GPU_DOT_OPERAND_CONVERTER_H_
#define XLA_SERVICE_GPU_DOT_OPERAND_CONVERTER_H_
#include <utility>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/op_expander_pass.h"
#include "xla/util.h"
namespace xla::gpu {
class DotOperandConverter : public OpExpanderPass {
public:
explicit DotOperandConverter(HloPredicate extra_filter = nullptr)
: OpExpanderPass(std::move(extra_filter)) {}
absl::string_view name() const override { return "operand_converter"; }
protected:
bool InstructionMatchesPattern(HloInstruction* instruction) override;
absl::StatusOr<HloInstruction*> ExpandInstruction(
HloInstruction* instruction) override;
};
}
#endif
#include "xla/service/gpu/dot_operand_converter.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
namespace xla::gpu {
bool DotOperandConverter::InstructionMatchesPattern(
HloInstruction* instruction) {
if (instruction->opcode() != HloOpcode::kDot) {
return false;
}
HloInstruction* lhs = instruction->mutable_operand(0);
HloInstruction* rhs = instruction->mutable_operand(1);
PrimitiveType lhs_type = lhs->shape().element_type();
PrimitiveType rhs_type = rhs->shape().element_type();
if (lhs_type == rhs_type) {
return false;
}
absl::flat_hash_set<PrimitiveType> non_converting = {F8E4M3FN, F8E5M2};
if (non_converting.contains(lhs_type) && non_converting.contains(rhs_type)) {
return false;
}
PrimitiveType desired_type =
ShapeUtil::HigherPrecisionElementType(lhs->shape(), rhs->shape());
return desired_type == lhs_type || desired_type == rhs_type;
}
absl::StatusOr<HloInstruction*> DotOperandConverter::ExpandInstruction(
HloInstruction* instruction) {
HloInstruction* lhs = instruction->mutable_operand(0);
HloInstruction* rhs = instruction->mutable_operand(1);
PrimitiveType desired_type =
ShapeUtil::HigherPrecisionElementType(lhs->shape(), rhs->shape());
int operand_index = desired_type == lhs->shape().element_type() ? 1 : 0;
HloInstruction* inst_to_replace =
desired_type == lhs->shape().element_type() ? rhs : lhs;
auto upcast_shape = inst_to_replace->shape();
upcast_shape.set_element_type(desired_type);
auto* convert_inst = instruction->AddInstruction(
HloInstruction::CreateConvert(upcast_shape, inst_to_replace));
TF_RETURN_IF_ERROR(instruction->ReplaceOperandWithDifferentShape(
operand_index, convert_inst));
return nullptr;
}
} | #include "xla/service/gpu/dot_operand_converter.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/primitive_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
namespace op = ::xla::testing::opcode_matchers;
class DotOperandConverterTest : public HloTestBase {
public:
void TestConvert(bool left_less_precise, PrimitiveType lhs_type,
PrimitiveType rhs_type, PrimitiveType result_type) {
absl::string_view module_tmpl = R"(
HloModule module
ENTRY main {
p0 = $0[2,3]{1,0} parameter(0)
p1 = $1[3,2]{1,0} parameter(1)
ROOT dot = $2[2,2]{1,0} dot(p0, p1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
auto module_string = absl::Substitute(
module_tmpl, primitive_util::LowercasePrimitiveTypeName(lhs_type),
primitive_util::LowercasePrimitiveTypeName(rhs_type),
primitive_util::LowercasePrimitiveTypeName(result_type));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool upcasted,
DotOperandConverter().Run(module.get()));
EXPECT_TRUE(upcasted);
if (left_less_precise) {
auto original_lhs = op::Parameter(0);
auto upcasted_lhs =
AllOf(op::Convert(original_lhs),
op::Shape(absl::Substitute(
"$0[2,3]{1,0}",
primitive_util::LowercasePrimitiveTypeName(rhs_type))));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
AllOf(op::Dot(upcasted_lhs, op::Parameter(1)),
op::Shape(absl::Substitute(
"$0[2,2]{1,0}",
primitive_util::LowercasePrimitiveTypeName(result_type)))));
} else {
auto original_rhs = op::Parameter(1);
auto upcasted_rhs =
AllOf(op::Convert(original_rhs),
op::Shape(absl::Substitute(
"$0[3,2]{1,0}",
primitive_util::LowercasePrimitiveTypeName(lhs_type))));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
AllOf(op::Dot(op::Parameter(0), upcasted_rhs),
op::Shape(absl::Substitute(
"$0[2,2]{1,0}",
primitive_util::LowercasePrimitiveTypeName(result_type)))));
}
}
};
TEST_F(DotOperandConverterTest, ConvertsLeftAndRight) {
TestConvert(true, S8, BF16, F32);
TestConvert(false, BF16, S8, F32);
}
TEST_F(DotOperandConverterTest, NoConvertHappensWithSameTypes) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = s8[2,3]{1,0} parameter(0)
p1 = s8[3,2]{1,0} parameter(1)
ROOT dot = bf16[2,2]{1,0} dot(p0, p1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool upcasted,
DotOperandConverter().Run(module.get()));
EXPECT_FALSE(upcasted);
}
TEST_F(DotOperandConverterTest, NoConvertFromF8toF8) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = f8e4m3fn[2,3]{1,0} parameter(0)
p1 = f8e5m2[3,2]{1,0} parameter(1)
ROOT dot = bf16[2,2]{1,0} dot(p0, p1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool upcasted,
DotOperandConverter().Run(module.get()));
EXPECT_FALSE(upcasted);
}
TEST_F(DotOperandConverterTest, CompilerOptimizesUsingDotOperandConverter) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = s8[2,3]{1,0} parameter(0)
p1 = bf16[3,2]{1,0} parameter(1)
ROOT dot = bf16[2,2]{1,0} dot(p0, p1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
GetOptimizedModule(module_string));
}
}
} |
2,098 | cpp | tensorflow/tensorflow | all_reduce_blueconnect | third_party/xla/xla/service/gpu/transforms/all_reduce_blueconnect.cc | third_party/xla/xla/service/gpu/transforms/all_reduce_blueconnect_test.cc | #ifndef XLA_SERVICE_GPU_ALL_REDUCE_BLUECONNECT_H_
#define XLA_SERVICE_GPU_ALL_REDUCE_BLUECONNECT_H_
#include <cstddef>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class AllReduceBlueConnect : public HloModulePass {
public:
explicit AllReduceBlueConnect(size_t num_devices_per_host)
: num_devices_per_host_(num_devices_per_host) {}
absl::string_view name() const override { return "all-reduce-blueconnect"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
size_t num_devices_per_host_;
};
}
#endif
#include "xla/service/gpu/all_reduce_blueconnect.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <optional>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/btree_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/computation_placer.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
std::vector<HloInstruction*> GetOutputs(HloInstruction& instruction) {
if (!instruction.shape().IsTuple()) {
return {&instruction};
}
std::vector<HloInstruction*> outputs;
outputs.reserve(instruction.shape().tuple_shapes_size());
HloComputation& computation = *instruction.parent();
for (int i = 0; i < instruction.shape().tuple_shapes_size(); ++i) {
outputs.push_back(computation.AddInstruction(
HloInstruction::CreateGetTupleElement(&instruction, i)));
}
return outputs;
}
struct DecomposedReplicaGroups {
std::vector<ReplicaGroup> scatter_gather_groups;
std::vector<ReplicaGroup> new_all_reduce_groups;
};
absl::StatusOr<std::optional<DecomposedReplicaGroups>> TryDecomposeReplicaGroup(
const ReplicaGroup& replica_group,
const DeviceAssignment& device_assignment, size_t num_devices_per_host) {
int group_size = replica_group.replica_ids_size();
TF_RET_CHECK(group_size > 0);
absl::btree_map<int, std::vector<int64_t>> replica_ids_by_host;
for (int64_t replica_id : replica_group.replica_ids()) {
int device_id = device_assignment(replica_id, 0);
TF_RET_CHECK(device_id >= 0);
int host_id = device_id / num_devices_per_host;
replica_ids_by_host[host_id].push_back(replica_id);
}
size_t num_local_devices = replica_ids_by_host.begin()->second.size();
bool same_num_devices_on_each_host =
absl::c_all_of(replica_ids_by_host, [&](const auto& entry) {
return entry.second.size() == num_local_devices;
});
if (!same_num_devices_on_each_host) {
return {std::nullopt};
}
std::vector<int64_t> sorted_replica_group;
sorted_replica_group.reserve(group_size);
for (const auto& entry : replica_ids_by_host) {
absl::c_copy(entry.second, std::back_inserter(sorted_replica_group));
}
size_t scatter_group_size = std::max(num_local_devices, size_t(2));
size_t num_scatter_groups = group_size / scatter_group_size;
if ((group_size % scatter_group_size != 0) || (num_scatter_groups < 2)) {
return {std::nullopt};
}
std::vector<ReplicaGroup> scatter_gather_groups(num_scatter_groups);
std::vector<ReplicaGroup> new_all_reduce_groups(scatter_group_size);
for (size_t i = 0; i < group_size; ++i) {
int64_t replica_id = sorted_replica_group[i];
scatter_gather_groups[i / scatter_group_size].add_replica_ids(replica_id);
new_all_reduce_groups[i % scatter_group_size].add_replica_ids(replica_id);
}
return {DecomposedReplicaGroups{std::move(scatter_gather_groups),
std::move(new_all_reduce_groups)}};
}
absl::StatusOr<std::optional<DecomposedReplicaGroups>>
TryDecomposeReplicaGroups(const HloAllReduceInstruction& all_reduce,
size_t num_devices_per_host) {
const DeviceAssignment& device_assignment =
all_reduce.GetModule()->config().static_device_assignment();
absl::Span<const ReplicaGroup> replica_groups = all_reduce.replica_groups();
ReplicaGroup all_replicas;
if (replica_groups.empty()) {
for (int i = 0; i < device_assignment.replica_count(); ++i) {
all_replicas.add_replica_ids(i);
}
replica_groups = absl::MakeSpan(&all_replicas, 1);
}
std::vector<ReplicaGroup> scatter_gather_groups;
std::vector<ReplicaGroup> new_all_reduce_groups;
for (const ReplicaGroup& replica_group : replica_groups) {
TF_ASSIGN_OR_RETURN(
std::optional<DecomposedReplicaGroups> decomposed_groups,
TryDecomposeReplicaGroup(replica_group, device_assignment,
num_devices_per_host));
if (!decomposed_groups) return {std::nullopt};
int scatter_group_size =
decomposed_groups->scatter_gather_groups[0].replica_ids_size();
if (scatter_gather_groups.empty()) {
for (const HloInstruction* operand : all_reduce.operands()) {
TF_RET_CHECK(operand->shape().IsArray());
int64_t num_elements = ShapeUtil::ElementsIn(operand->shape());
if (num_elements % scatter_group_size != 0) {
return {std::nullopt};
}
}
scatter_gather_groups.reserve(
replica_groups.size() *
decomposed_groups->scatter_gather_groups.size());
new_all_reduce_groups.reserve(
replica_groups.size() *
decomposed_groups->new_all_reduce_groups.size());
} else if (scatter_group_size !=
scatter_gather_groups[0].replica_ids_size()) {
return {std::nullopt};
}
absl::c_move(decomposed_groups->scatter_gather_groups,
std::back_inserter(scatter_gather_groups));
absl::c_move(decomposed_groups->new_all_reduce_groups,
std::back_inserter(new_all_reduce_groups));
}
return {DecomposedReplicaGroups{std::move(scatter_gather_groups),
std::move(new_all_reduce_groups)}};
}
absl::StatusOr<bool> TryDecomposeAllReduce(HloAllReduceInstruction* all_reduce,
size_t num_devices_per_host) {
TF_RET_CHECK(all_reduce);
TF_RET_CHECK(!all_reduce->has_sharding());
HloComputation& computation = *all_reduce->parent();
PrimitiveType element_type = all_reduce->operand(0)->shape().element_type();
TF_ASSIGN_OR_RETURN(
std::optional<DecomposedReplicaGroups> decomposed_groups,
TryDecomposeReplicaGroups(*all_reduce, num_devices_per_host));
if (!decomposed_groups) return false;
std::vector<HloInstruction*> flat_operands;
flat_operands.reserve(all_reduce->operand_count());
std::vector<Shape> flat_shapes;
flat_shapes.reserve(all_reduce->operand_count());
std::vector<Shape> scattered_shapes;
scattered_shapes.reserve(all_reduce->operand_count());
int scatter_group_size =
decomposed_groups->scatter_gather_groups[0].replica_ids_size();
for (HloInstruction* operand : all_reduce->operands()) {
TF_RET_CHECK(operand->shape().IsArray());
int64_t num_elements = ShapeUtil::ElementsIn(operand->shape());
Shape flat_shape = ShapeUtil::MakeShape(element_type, {num_elements});
flat_operands.push_back(computation.AddInstruction(
HloInstruction::CreateBitcast(flat_shape, operand)));
flat_shapes.push_back(std::move(flat_shape));
scattered_shapes.push_back(ShapeUtil::MakeShape(
element_type, {num_elements / scatter_group_size}));
}
Shape reduce_scatter_shape = ShapeUtil::MakeMaybeTupleShape(scattered_shapes);
HloInstruction* reduce_scatter =
computation.AddInstruction(HloInstruction::CreateReduceScatter(
reduce_scatter_shape, flat_operands, all_reduce->to_apply(),
CollectiveDeviceList(decomposed_groups->scatter_gather_groups),
false, all_reduce->channel_id(),
all_reduce->use_global_device_ids(),
0));
HloInstruction* new_all_reduce =
computation.AddInstruction(HloInstruction::CreateAllReduce(
reduce_scatter_shape, GetOutputs(*reduce_scatter),
all_reduce->to_apply(),
CollectiveDeviceList(decomposed_groups->new_all_reduce_groups),
false, all_reduce->channel_id(),
all_reduce->use_global_device_ids()));
HloInstruction* all_gather =
computation.AddInstruction(HloInstruction::CreateAllGather(
ShapeUtil::MakeMaybeTupleShape(flat_shapes),
GetOutputs(*new_all_reduce),
0,
CollectiveDeviceList(decomposed_groups->scatter_gather_groups),
false, all_reduce->channel_id(),
all_reduce->use_global_device_ids()));
std::vector<HloInstruction*> outputs = GetOutputs(*all_gather);
for (int64_t i = 0; i < outputs.size(); ++i) {
outputs[i] = computation.AddInstruction(HloInstruction::CreateBitcast(
all_reduce->operand(i)->shape(), outputs[i]));
}
HloInstruction* replacement = MaybeMakeTuple(outputs);
TF_RETURN_IF_ERROR(
all_reduce->CopyAllControlDepsTo(reduce_scatter, replacement));
TF_RETURN_IF_ERROR(all_reduce->DropAllControlDeps());
TF_RETURN_IF_ERROR(computation.ReplaceInstruction(all_reduce, replacement));
TF_RETURN_IF_ERROR(
TryDecomposeAllReduce(Cast<HloAllReduceInstruction>(new_all_reduce),
num_devices_per_host)
.status());
return true;
}
}
absl::StatusOr<bool> AllReduceBlueConnect::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(1) << "Running AllReduceBlueConnect";
if (hlo_query::ContainsLayoutConstrainedAllReduce(*module)) {
VLOG(1)
<< "Skip AllReduceBlueConnect because the module contains all-reduce "
"with constrained layouts";
return false;
}
if (!module->config().has_static_device_assignment()) {
VLOG(1)
<< "Skip AllReduceBlueConnect because the module doesn't have static "
"device assignment";
return false;
}
std::vector<HloAllReduceInstruction*> all_reduces;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kAllReduce) {
all_reduces.push_back(Cast<HloAllReduceInstruction>(instruction));
}
}
}
bool changed = false;
for (HloAllReduceInstruction* all_reduce : all_reduces) {
TF_ASSIGN_OR_RETURN(
bool all_reduce_changed,
TryDecomposeAllReduce(all_reduce, num_devices_per_host_));
changed |= all_reduce_changed;
}
return changed;
}
} | #include "xla/service/gpu/all_reduce_blueconnect.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/computation_placer.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ::tsl::testing::IsOkAndHolds;
namespace m = ::xla::match;
using AllReduceBlueConnectTest = HloTestBase;
void SetModuleConfig(HloModule& module, size_t replica_count) {
DeviceAssignment device_assignment(replica_count, 1);
device_assignment.FillIota(0);
auto& module_config = module.mutable_config();
module_config.set_replica_count(replica_count);
module_config.set_static_device_assignment(device_assignment);
}
TEST_F(AllReduceBlueConnectTest, OneStage) {
constexpr absl::string_view hlo_string = R"(
HloModule module
%add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %comp {
p0 = f32[4,4] parameter(0)
ROOT crs = f32[4,4] all-reduce(p0), to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SetModuleConfig(*module, 8);
AllReduceBlueConnect pass(4);
EXPECT_THAT(pass.Run(module.get()), IsOkAndHolds(true));
std::vector<std::vector<int64_t>> scatter_gather_groups = {
{0, 1, 2, 3}, {4, 5, 6, 7}};
std::vector<std::vector<int64_t>> new_all_reduce_groups = {
{0, 4}, {1, 5}, {2, 6}, {3, 7}};
auto bitcast = m::Bitcast(m::Parameter(0)).WithShape(F32, {16});
auto reduce_scatter =
m::ReduceScatter(bitcast).WithShape(F32, {4}).WithReplicaGroups(
scatter_gather_groups);
auto all_reduce = m::AllReduce(reduce_scatter)
.WithShape(F32, {4})
.WithReplicaGroups(new_all_reduce_groups);
auto all_gather = m::AllGather(all_reduce)
.WithShape(F32, {16})
.WithReplicaGroups(scatter_gather_groups);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Bitcast(all_gather).WithShape(F32, {4, 4})));
}
TEST_F(AllReduceBlueConnectTest, TwoStage) {
constexpr absl::string_view hlo_string = R"(
HloModule module
%add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %comp {
p0 = f32[4,4] parameter(0)
ROOT crs = f32[4,4] all-reduce(p0), to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SetModuleConfig(*module, 16);
AllReduceBlueConnect pass(4);
EXPECT_THAT(pass.Run(module.get()), IsOkAndHolds(true));
std::vector<std::vector<int64_t>> outer_scatter_gather_groups = {
{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}, {12, 13, 14, 15}};
std::vector<std::vector<int64_t>> inner_scatter_gather_groups = {
{0, 4}, {8, 12}, {1, 5}, {9, 13}, {2, 6}, {10, 14}, {3, 7}, {11, 15}};
std::vector<std::vector<int64_t>> new_all_reduce_groups = {
{0, 8}, {4, 12}, {1, 9}, {5, 13}, {2, 10}, {6, 14}, {3, 11}, {7, 15}};
auto bitcast0 = m::Bitcast(m::Parameter(0)).WithShape(F32, {16});
auto reduce_scatter0 =
m::ReduceScatter(bitcast0).WithShape(F32, {4}).WithReplicaGroups(
outer_scatter_gather_groups);
auto bitcast1 = m::Bitcast(reduce_scatter0).WithShape(F32, {4});
auto reduce_scatter1 =
m::ReduceScatter(bitcast1).WithShape(F32, {2}).WithReplicaGroups(
inner_scatter_gather_groups);
auto all_reduce = m::AllReduce(reduce_scatter1)
.WithShape(F32, {2})
.WithReplicaGroups(new_all_reduce_groups);
auto all_gather0 = m::AllGather(all_reduce)
.WithShape(F32, {4})
.WithReplicaGroups(inner_scatter_gather_groups);
auto bitcast2 = m::Bitcast(all_gather0).WithShape(F32, {4});
auto all_gather1 =
m::AllGather(bitcast2).WithShape(F32, {16}).WithReplicaGroups(
outer_scatter_gather_groups);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Bitcast(all_gather1).WithShape(F32, {4, 4})));
}
TEST_F(AllReduceBlueConnectTest, TwoOperands) {
constexpr absl::string_view hlo_string = R"(
HloModule module
%add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %comp {
p0 = f32[4,4] parameter(0)
p1 = f32[4,4,2] parameter(1)
ROOT crs = (f32[4,4], f32[4,4,2]) all-reduce(p0, p1), to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SetModuleConfig(*module, 8);
AllReduceBlueConnect pass(4);
EXPECT_THAT(pass.Run(module.get()), IsOkAndHolds(true));
std::vector<std::vector<int64_t>> scatter_gather_groups = {
{0, 1, 2, 3}, {4, 5, 6, 7}};
std::vector<std::vector<int64_t>> new_all_reduce_groups = {
{0, 4}, {1, 5}, {2, 6}, {3, 7}};
auto bitcast0 = m::Bitcast(m::Parameter(0)).WithShape(F32, {16});
auto bitcast1 = m::Bitcast(m::Parameter(1)).WithShape(F32, {32});
Shape expected0 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {4}), ShapeUtil::MakeShape(F32, {8})});
Shape expected1 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {16}), ShapeUtil::MakeShape(F32, {32})});
auto reduce_scatter = m::ReduceScatter(bitcast0, bitcast1)
.WithShapeEqualTo(&expected0)
.WithReplicaGroups(scatter_gather_groups);
auto all_reduce = m::AllReduce(m::GetTupleElement(reduce_scatter, 0),
m::GetTupleElement(reduce_scatter, 1))
.WithShapeEqualTo(&expected0)
.WithReplicaGroups(new_all_reduce_groups);
auto all_gather = m::AllGather(m::GetTupleElement(all_reduce, 0),
m::GetTupleElement(all_reduce, 1))
.WithShapeEqualTo(&expected1)
.WithReplicaGroups(scatter_gather_groups);
auto bitcast2 =
m::Bitcast(m::GetTupleElement(all_gather, 0)).WithShape(F32, {4, 4});
auto bitcast3 =
m::Bitcast(m::GetTupleElement(all_gather, 1)).WithShape(F32, {4, 4, 2});
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(bitcast2, bitcast3)));
}
TEST_F(AllReduceBlueConnectTest, DifferentNumLocalDevicesWithinReplicaGroup) {
constexpr absl::string_view hlo_string = R"(
HloModule module
%add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %comp {
p0 = f32[4,4] parameter(0)
ROOT crs = f32[4,4] all-reduce(p0),
replica_groups={{0,1,2,7},{3,4,5,6}}, to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SetModuleConfig(*module, 8);
AllReduceBlueConnect pass(4);
EXPECT_THAT(pass.Run(module.get()), IsOkAndHolds(false));
}
TEST_F(AllReduceBlueConnectTest, DifferentNumLocalDevicesAcrossReplicaGroups) {
constexpr absl::string_view hlo_string = R"(
HloModule module
%add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %comp {
p0 = f32[4,4] parameter(0)
ROOT crs = f32[4,4] all-reduce(p0),
replica_groups={{0,1,4,5},{2,3,6,7},{8,9,10,11},{12,13,14,15}}, to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SetModuleConfig(*module, 16);
AllReduceBlueConnect pass(4);
EXPECT_THAT(pass.Run(module.get()), IsOkAndHolds(false));
}
TEST_F(AllReduceBlueConnectTest, OperandIndivisible) {
constexpr absl::string_view hlo_string = R"(
HloModule module
%add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %comp {
p0 = f32[4,4] parameter(0)
p1 = f32[9] parameter(1)
ROOT crs = (f32[4,4], f32[9]) all-reduce(p0, p1), to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SetModuleConfig(*module, 8);
AllReduceBlueConnect pass(4);
EXPECT_THAT(pass.Run(module.get()), IsOkAndHolds(false));
}
TEST_F(AllReduceBlueConnectTest, ControlDeps) {
constexpr absl::string_view hlo_string = R"(
HloModule module
%add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %comp {
p0 = f32[4,4] parameter(0)
p1 = f32[4,4] parameter(1)
add = f32[4,4] add(p0, p1)
crs = f32[4,4] all-reduce(p0), to_apply=add, control-predecessors={add}
ROOT add1 = f32[4,4] add(crs, add), control-predecessors={crs}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SetModuleConfig(*module, 8);
const HloInstruction* ar =
module->entry_computation()->root_instruction()->operand(0);
auto expected_preds = ar->control_predecessors();
auto expected_succs = ar->control_successors();
AllReduceBlueConnect pass(4);
EXPECT_THAT(pass.Run(module.get()), IsOkAndHolds(true));
std::vector<std::vector<int64_t>> scatter_gather_groups = {
{0, 1, 2, 3}, {4, 5, 6, 7}};
std::vector<std::vector<int64_t>> new_all_reduce_groups = {
{0, 4}, {1, 5}, {2, 6}, {3, 7}};
const HloInstruction *matched_rs, *matched_bitcast;
auto bitcast = m::Bitcast(m::Parameter(0)).WithShape(F32, {16});
auto reduce_scatter = m::ReduceScatter(&matched_rs, bitcast)
.WithShape(F32, {4})
.WithReplicaGroups(scatter_gather_groups);
auto all_reduce = m::AllReduce(reduce_scatter)
.WithShape(F32, {4})
.WithReplicaGroups(new_all_reduce_groups);
auto all_gather = m::AllGather(all_reduce)
.WithShape(F32, {16})
.WithReplicaGroups(scatter_gather_groups);
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_THAT(root, GmockMatch(m::Add()));
EXPECT_THAT(
root->operand(0),
GmockMatch(
m::Bitcast(&matched_bitcast, all_gather).WithShape(F32, {4, 4})));
EXPECT_THAT(matched_rs, GmockMatch(m::Op().WithControlDeps(
absl::MakeSpan(expected_preds), {})));
EXPECT_THAT(matched_bitcast, GmockMatch(m::Op().WithControlDeps(
{}, absl::MakeSpan(expected_succs))));
}
}
} |
2,099 | cpp | tensorflow/tensorflow | move_copy_to_users | third_party/xla/xla/service/gpu/transforms/move_copy_to_users.cc | third_party/xla/xla/service/gpu/transforms/move_copy_to_users_test.cc | #ifndef XLA_SERVICE_GPU_MOVE_COPY_TO_USERS_H_
#define XLA_SERVICE_GPU_MOVE_COPY_TO_USERS_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class MoveCopyToUsers : public HloModulePass {
public:
absl::string_view name() const override { return "move_copy_to_users"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/gpu/move_copy_to_users.h"
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/service/hlo_creation_utils.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class MoveCopyToUsersVisitor : public DfsHloRewriteVisitor {
absl::Status HandlePad(HloInstruction* hlo) override {
HloInstruction* operand = hlo->mutable_operand(0);
HloInstruction* c = hlo->mutable_operand(1);
if (operand->opcode() == HloOpcode::kCopy) {
HloInstruction* copied = operand->mutable_operand(0);
TF_ASSIGN_OR_RETURN(
HloInstruction * earlier_pad,
MakePadHlo(copied, c, hlo->padding_config(), &hlo->metadata()));
*earlier_pad->mutable_shape()->mutable_layout() =
copied->shape().layout();
HloInstruction* later_copy = MakeCopyHlo(earlier_pad, hlo->shape());
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, later_copy));
}
return absl::OkStatus();
}
absl::Status HandleSlice(HloInstruction* hlo) override {
HloInstruction* operand = hlo->mutable_operand(0);
if (operand->opcode() == HloOpcode::kCopy) {
HloInstruction* copied = operand->mutable_operand(0);
TF_ASSIGN_OR_RETURN(
HloInstruction * earlier_slice,
MakeSliceHlo(copied, hlo->slice_starts(), hlo->slice_limits(),
hlo->slice_strides(), &hlo->metadata()));
*earlier_slice->mutable_shape()->mutable_layout() =
copied->shape().layout();
HloInstruction* later_copy = MakeCopyHlo(earlier_slice, hlo->shape());
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, later_copy));
}
return absl::OkStatus();
}
absl::Status HandleDynamicSlice(HloInstruction* hlo) override {
HloInstruction* operand = hlo->mutable_operand(0);
if (operand->opcode() == HloOpcode::kCopy) {
HloInstruction* copied = operand->mutable_operand(0);
TF_ASSIGN_OR_RETURN(
HloInstruction * earlier_slice,
MakeDynamicSliceHlo(
copied,
absl::Span<HloInstruction* const>(hlo->operands()).subspan(1),
hlo->dynamic_slice_sizes(), &hlo->metadata()));
*earlier_slice->mutable_shape()->mutable_layout() =
copied->shape().layout();
HloInstruction* later_copy = MakeCopyHlo(earlier_slice, hlo->shape());
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, later_copy));
}
return absl::OkStatus();
}
absl::Status HandleReduceWindow(HloInstruction* hlo) override {
HloInstruction* operand = hlo->mutable_operand(0);
if (operand->opcode() == HloOpcode::kCopy) {
HloInstruction* copied = operand->mutable_operand(0);
TF_ASSIGN_OR_RETURN(
HloInstruction * earlier_reduce_window,
MakeReduceWindowHlo(copied, hlo->mutable_operand(1), hlo->window(),
hlo->called_computations()[0], &hlo->metadata()));
*earlier_reduce_window->mutable_shape()->mutable_layout() =
copied->shape().layout();
HloInstruction* later_copy =
MakeCopyHlo(earlier_reduce_window, hlo->shape());
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, later_copy));
}
return absl::OkStatus();
}
absl::Status HandleReduce(HloInstruction* hlo) override {
HloInstruction* operand = hlo->mutable_operand(0);
if (operand->opcode() == HloOpcode::kCopy && !hlo->shape().IsTuple()) {
HloInstruction* new_reduce = hlo->AddInstruction(
hlo->CloneWithNewOperands(hlo->shape(), {operand->mutable_operand(0),
hlo->mutable_operand(1)}));
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, new_reduce));
}
return absl::OkStatus();
}
absl::Status HandleBitcastConvert(HloInstruction* hlo) override {
return absl::OkStatus();
}
absl::Status HandleElementwiseUnary(HloInstruction* hlo) override {
HloInstruction* operand = hlo->mutable_operand(0);
if (hlo->opcode() == HloOpcode::kReducePrecision) {
return absl::OkStatus();
}
if (operand->opcode() == HloOpcode::kCopy) {
HloInstruction* copied = operand->mutable_operand(0);
TF_ASSIGN_OR_RETURN(
HloInstruction * earlier_elementwise,
MakeUnaryHlo(hlo->opcode(), copied, &hlo->metadata()));
HloInstruction* later_copy =
MakeCopyHlo(earlier_elementwise, hlo->shape());
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, later_copy));
}
return absl::OkStatus();
}
absl::Status HandleReverse(HloInstruction* hlo) override {
HloInstruction* operand = hlo->mutable_operand(0);
if (operand->opcode() == HloOpcode::kCopy) {
HloInstruction* copied = operand->mutable_operand(0);
TF_ASSIGN_OR_RETURN(
HloInstruction * earlier_reverse,
MakeReverseHlo(copied, hlo->dimensions(), &hlo->metadata()));
HloInstruction* later_copy = MakeCopyHlo(earlier_reverse, hlo->shape());
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, later_copy));
}
return absl::OkStatus();
}
absl::Status HandleConvert(HloInstruction* hlo) override {
HloInstruction* operand = hlo->mutable_operand(0);
if (operand->opcode() == HloOpcode::kCopy) {
HloInstruction* copied = operand->mutable_operand(0);
HloInstruction* earlier_convert = MakeConvertToHlo(
copied, hlo->shape().element_type(), &hlo->metadata());
HloInstruction* later_copy = MakeCopyHlo(earlier_convert, hlo->shape());
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, later_copy));
}
return absl::OkStatus();
}
absl::Status HandleElementwiseBinary(HloInstruction* hlo) override {
HloInstruction* a = hlo->mutable_operand(0);
HloInstruction* b = hlo->mutable_operand(1);
if (a->opcode() == HloOpcode::kCopy && b->opcode() == HloOpcode::kCopy) {
HloInstruction* copied_a = a->mutable_operand(0);
HloInstruction* copied_b = b->mutable_operand(0);
if (copied_a->shape() == copied_b->shape()) {
HloInstruction* earlier_elementwise;
if (hlo->opcode() == HloOpcode::kCompare) {
TF_ASSIGN_OR_RETURN(
earlier_elementwise,
MakeCompareHlo(hlo->comparison_direction(), copied_a, copied_b,
&hlo->metadata()));
} else {
TF_ASSIGN_OR_RETURN(earlier_elementwise,
MakeBinaryHlo(hlo->opcode(), copied_a, copied_b,
&hlo->metadata()));
}
HloInstruction* later_copy =
MakeCopyHlo(earlier_elementwise, hlo->shape());
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, later_copy));
}
}
return absl::OkStatus();
}
absl::Status HandleConcatenate(HloInstruction* hlo) override {
const HloInstruction* first = hlo->operand(0);
if (first->opcode() != HloOpcode::kCopy) {
return absl::OkStatus();
}
const HloInstruction* inner_op = first->operand(0);
const Layout& inner_op_layout = inner_op->shape().layout();
std::vector<HloInstruction*> new_operands;
new_operands.reserve(hlo->operand_count());
for (HloInstruction* op : hlo->mutable_operands()) {
if (op->opcode() != HloOpcode::kCopy ||
op->operand(0)->shape().layout() != inner_op_layout) {
VLOG(3) << "Mismatch between " << op->ToString()
<< " and expected op layout " << inner_op_layout.ToString();
return absl::OkStatus();
}
new_operands.push_back(op->mutable_operand(0));
}
TF_ASSIGN_OR_RETURN(
HloInstruction * new_concat,
MakeConcatHlo(new_operands, hlo->concatenate_dimension()));
*new_concat->mutable_shape()->mutable_layout() = inner_op_layout;
HloInstruction* new_copy = MakeCopyHlo(new_concat, hlo->shape());
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, new_copy));
return absl::OkStatus();
}
};
}
absl::StatusOr<bool> MoveCopyToUsers::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
return MoveCopyToUsersVisitor{}.RunOnModule(module, execution_threads);
}
} | #include "xla/service/gpu/move_copy_to_users.h"
#include <optional>
#include "absl/strings/string_view.h"
#include "xla/service/layout_assignment.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class MoveCopyToUsersTest : public HloTestBase {
public:
MoveCopyToUsersTest()
: HloTestBase(true,
true,
LayoutAssignment::InstructionCanChangeLayout) {}
void CheckMoveCopyToUsers(absl::string_view hlo,
std::optional<absl::string_view> expected) {
RunAndFilecheckHloRewrite(hlo, MoveCopyToUsers{}, expected);
}
};
TEST_F(MoveCopyToUsersTest, Pad) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = s8[1,17,9,9]{3,1,2,0} parameter(0)
copy = s8[1,17,9,9]{1,3,2,0} copy(input)
constant = s8[] constant(0)
ROOT pad = s8[1,32,9,9]{1,3,2,0} pad(copy, constant), padding=0_0x0_15x0_0x0_0
}
)";
CheckMoveCopyToUsers(hlo, R"(
)");
}
TEST_F(MoveCopyToUsersTest, Unary) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = f32[1,17,9,9]{3,2,1,0} parameter(0)
copy = f32[1,17,9,9]{1,3,2,0} copy(input)
ROOT pad = f32[1,17,9,9]{1,3,2,0} sqrt(copy)
}
)";
CheckMoveCopyToUsers(hlo, R"(
)");
}
TEST_F(MoveCopyToUsersTest, Reverse) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = f32[1,17,9,9]{3,2,1,0} parameter(0)
copy = f32[1,17,9,9]{1,3,2,0} copy(input)
ROOT pad = f32[1,17,9,9]{1,3,2,0} reverse(copy), dimensions={1,2}
}
)";
CheckMoveCopyToUsers(hlo, R"(
)");
}
TEST_F(MoveCopyToUsersTest, Convert) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = f32[1,17,9,9]{3,2,1,0} parameter(0)
copy = f32[1,17,9,9]{1,3,2,0} copy(input)
ROOT converted = f16[1,17,9,9]{1,3,2,0} convert(copy)
}
)";
CheckMoveCopyToUsers(hlo, R"(
)");
}
TEST_F(MoveCopyToUsersTest, Slice) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = f32[1,17,9,9]{3,2,1,0} parameter(0)
copy = f32[1,17,9,9]{1,3,2,0} copy(input)
ROOT slice = f32[1,4,6,6]{1,3,2,0} slice(copy), slice={[0:1],[0:4],[0:6],[0:6]}
}
)";
CheckMoveCopyToUsers(hlo, R"(
)");
}
TEST_F(MoveCopyToUsersTest, DynamicSlice) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = f32[1,17,9,9]{3,2,1,0} parameter(0)
copy = f32[1,17,9,9]{1,3,2,0} copy(input)
p0 = s32[] parameter(1)
p1 = s32[] parameter(2)
p2 = s32[] parameter(3)
p3 = s32[] parameter(4)
ROOT ds = f32[1,4,6,6]{1,3,2,0} dynamic-slice(copy, p0, p1, p2, p3), dynamic_slice_sizes={1,4,6,6}
}
)";
CheckMoveCopyToUsers(hlo, R"(
)");
}
TEST_F(MoveCopyToUsersTest, ReduceWindow) {
const char* hlo = R"(
HloModule R2Window
mul {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT mul = f32[] multiply(lhs, rhs)
}
ENTRY R2Window {
operand = f32[256,384]{1,0} parameter(0)
c = f32[256,384]{0,1} copy(operand)
constant = f32[] constant(1)
ROOT reduce-window = f32[256,384]{0,1} reduce-window(c, constant), window={size=2x3 pad=0_1x1_1}, to_apply=mul
}
)";
CheckMoveCopyToUsers(hlo, R"(
)");
}
TEST_F(MoveCopyToUsersTest, Reduce) {
const char* hlo = R"(
HloModule R2
mul {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT mul = f32[] multiply(lhs, rhs)
}
ENTRY R2 {
operand = f32[256,384,10]{2,1,0} parameter(0)
c = f32[256,384,10]{0,1,2} copy(operand)
constant = f32[] constant(1)
ROOT reduce = f32[384,10]{0,1} reduce(c, constant), dimensions={0}, to_apply=mul
}
)";
CheckMoveCopyToUsers(hlo, R"(
)");
}
TEST_F(MoveCopyToUsersTest, Binary) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = f32[1,17,9,9]{3,2,1,0} parameter(0)
input2 = f32[1,17,9,9]{3,2,1,0} parameter(1)
copy = f32[1,17,9,9]{1,3,2,0} copy(input)
copy2 = f32[1,17,9,9]{1,3,2,0} copy(input2)
ROOT add = f32[1,17,9,9]{1,3,2,0} add(copy, copy2)
}
)";
CheckMoveCopyToUsers(hlo, R"(
)");
}
TEST_F(MoveCopyToUsersTest, BinaryDifferentLayoutNoChange) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = f32[1,17,9,9]{3,2,0,1} parameter(0)
input2 = f32[1,17,9,9]{3,2,1,0} parameter(1)
copy = f32[1,17,9,9]{1,3,2,0} copy(input)
copy2 = f32[1,17,9,9]{1,3,2,0} copy(input2)
ROOT add = f32[1,17,9,9]{1,3,2,0} add(copy, copy2)
}
)";
CheckMoveCopyToUsers(hlo, std::nullopt);
}
TEST_F(MoveCopyToUsersTest, Concat) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = f32[1,17,9,9]{3,2,1,0} parameter(0)
input2 = f32[5,17,9,9]{3,2,1,0} parameter(1)
copy = f32[1,17,9,9]{1,3,2,0} copy(input)
copy2 = f32[5,17,9,9]{1,3,2,0} copy(input2)
ROOT add = f32[6,17,9,9]{1,3,2,0} concatenate(copy, copy2), dimensions={0}
}
)";
CheckMoveCopyToUsers(hlo, R"(
)");
}
TEST_F(MoveCopyToUsersTest, ConcatDifferentLayoutNoChange) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = f32[1,17,9,9]{3,2,0,1} parameter(0)
input2 = f32[1,17,9,9]{3,2,1,0} parameter(1)
copy = f32[1,17,9,9]{1,3,2,0} copy(input)
copy2 = f32[1,17,9,9]{1,3,2,0} copy(input2)
ROOT add = f32[2,17,9,9]{1,3,2,0} concatenate(copy, copy2), dimensions={0}
}
)";
CheckMoveCopyToUsers(hlo, std::nullopt);
}
}
} |