ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
44
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 16
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 127
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 96
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
383cd2d6-6362-4b74-9646-61a16848ebcc | cpp | google/arolla | concat | arolla/jagged_shape/dense_array/util/concat.h | arolla/jagged_shape/util/concat_test.cc | #ifndef AROLLA_JAGGED_SHAPE_DENSE_ARRAY_UTIL_CONCAT_H_
#define AROLLA_JAGGED_SHAPE_DENSE_ARRAY_UTIL_CONCAT_H_
#include <cstdint>
#include "absl/types/span.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/jagged_shape/util/concat.h"
namespace arolla {
namespace jagged_shape_internal {
template <typename T>
struct ConcatResultArrayBuilderHelper<DenseArray<T>> {
DenseArrayBuilder<T> operator()(
absl::Span<const DenseArray<T>> arrays) const {
int64_t result_size = 0;
for (const auto& array : arrays) {
result_size += array.size();
}
return DenseArrayBuilder<T>(result_size);
}
};
}
}
#endif | #include "arolla/jagged_shape/util/concat.h"
#include <cmath>
#include <cstdint>
#include <utility>
#include <vector>
#include "benchmark/benchmark.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/array/array.h"
#include "arolla/array/edge.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/edge.h"
#include "arolla/jagged_shape/array/jagged_shape.h"
#include "arolla/jagged_shape/array/util/concat.h"
#include "arolla/jagged_shape/dense_array/jagged_shape.h"
#include "arolla/jagged_shape/dense_array/util/concat.h"
#include "arolla/jagged_shape/jagged_shape.h"
#include "arolla/jagged_shape/testing/matchers.h"
#include "arolla/memory/buffer.h"
#include "arolla/memory/optional_value.h"
using ::absl_testing::StatusIs;
using ::arolla::testing::IsEquivalentTo;
using ::testing::ElementsAre;
namespace arolla {
namespace {
class JaggedArrayShapeHelper {
public:
using Shape = JaggedArrayShape;
using Edge = Shape::Edge;
static absl::string_view ReprName() { return "JaggedArrayShape"; }
static absl::StatusOr<ArrayEdge> EdgeFromSplitPoints(
absl::Span<const OptionalValue<int64_t>> split_points) {
return ArrayEdge::FromSplitPoints(CreateArray<int64_t>(split_points));
}
static absl::StatusOr<ArrayEdge> EdgeFromMapping(
absl::Span<const OptionalValue<int64_t>> mapping, int64_t parent_size) {
return ArrayEdge::FromMapping(CreateArray<int64_t>(mapping), parent_size);
}
static const Buffer<int64_t>& GetSplitPoints(const ArrayEdge& edge) {
return edge.edge_values().dense_data().values;
}
template <typename T>
static Array<T> MakeArray(absl::Span<const OptionalValue<T>> data) {
return CreateArray<T>(data);
}
template <typename T>
static Array<T> MakeArray(int64_t size, const T& value) {
return Array<T>(CreateConstDenseArray<T>(size, value));
}
};
class JaggedDenseArrayShapeHelper {
public:
using Shape = JaggedDenseArrayShape;
using Edge = Shape::Edge;
static absl::string_view ReprName() { return "JaggedShape"; }
static absl::StatusOr<DenseArrayEdge> EdgeFromSplitPoints(
absl::Span<const OptionalValue<int64_t>> split_points) {
return DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>(split_points));
}
static absl::StatusOr<DenseArrayEdge> EdgeFromMapping(
absl::Span<const OptionalValue<int64_t>> mapping, int64_t parent_size) {
return DenseArrayEdge::FromMapping(CreateDenseArray<int64_t>(mapping),
parent_size);
}
static const Buffer<int64_t>& GetSplitPoints(const DenseArrayEdge& edge) {
return edge.edge_values().values;
}
template <typename T>
static DenseArray<T> MakeArray(absl::Span<const OptionalValue<T>> data) {
return CreateDenseArray<T>(data);
}
template <typename T>
static DenseArray<T> MakeArray(int64_t size, const T& value) {
return CreateConstDenseArray<T>(size, value);
}
};
template <typename JaggedShapeHelper>
class ConcatTest : public ::testing::Test {
public:
using Helper = JaggedShapeHelper;
using Shape = typename JaggedShapeHelper::Shape;
Shape MakeShape(absl::Span<const absl::Span<const int64_t>> shape) {
std::vector<typename Helper::Edge> edges;
edges.reserve(shape.size());
for (const auto& edge_sizes : shape) {
std::vector<OptionalValue<int64_t>> split_points;
split_points.reserve(edge_sizes.size() + 1);
split_points.push_back(0);
for (int64_t edge_size : edge_sizes) {
split_points.push_back(split_points.back().value + edge_size);
}
auto edge = Helper::EdgeFromSplitPoints(split_points).value();
edges.push_back(std::move(edge));
}
return Shape::FromEdges(std::move(edges)).value();
}
template <typename T>
auto MakeArray(absl::Span<const T> values) {
std::vector<OptionalValue<T>> array_values;
array_values.reserve(values.size());
for (const T& value : values) {
array_values.push_back(OptionalValue<T>(value));
}
return Helper::MakeArray(absl::MakeConstSpan(array_values));
}
};
using ConcatTestTypes =
::testing::Types<JaggedArrayShapeHelper, JaggedDenseArrayShapeHelper>;
TYPED_TEST_SUITE(ConcatTest, ConcatTestTypes);
TYPED_TEST(ConcatTest, StackOrConcatJaggedShapesAlongDimension) {
{
ASSERT_OK_AND_ASSIGN(
typename TestFixture::Shape result_shape,
StackJaggedShapesAlongDimension<typename TestFixture::Shape>(
{
TestFixture::Shape::Empty(),
},
0));
EXPECT_THAT(result_shape, IsEquivalentTo(TestFixture::MakeShape({{1}})));
}
{
ASSERT_OK_AND_ASSIGN(
typename TestFixture::Shape result_shape,
StackJaggedShapesAlongDimension<typename TestFixture::Shape>(
{
TestFixture::Shape::Empty(),
TestFixture::Shape::Empty(),
TestFixture::Shape::Empty(),
},
0));
EXPECT_THAT(result_shape, IsEquivalentTo(TestFixture::MakeShape({{3}})));
}
{
ASSERT_OK_AND_ASSIGN(
typename TestFixture::Shape result_shape,
StackJaggedShapesAlongDimension<typename TestFixture::Shape>(
{
TestFixture::MakeShape({{2}, {1, 2}, {3, 4, 5}}),
TestFixture::MakeShape({{3}, {1, 3, 1}, {6, 7, 8, 9, 10}}),
},
0));
EXPECT_THAT(result_shape, IsEquivalentTo(TestFixture::MakeShape({
{2},
{2, 3},
{1, 2, 1, 3, 1},
{3, 4, 5, 6, 7, 8, 9, 10},
})));
}
{
ASSERT_OK_AND_ASSIGN(
typename TestFixture::Shape result_shape,
ConcatJaggedShapesAlongDimension<typename TestFixture::Shape>(
{
TestFixture::MakeShape({{2}, {1, 2}, {3, 4, 5}}),
TestFixture::MakeShape({{3}, {1, 3, 1}, {6, 7, 8, 9, 10}}),
},
0));
EXPECT_THAT(result_shape, IsEquivalentTo(TestFixture::MakeShape({
{5},
{1, 2, 1, 3, 1},
{3, 4, 5, 6, 7, 8, 9, 10},
})));
}
{
ASSERT_OK_AND_ASSIGN(
typename TestFixture::Shape result_shape,
StackJaggedShapesAlongDimension<typename TestFixture::Shape>(
{
TestFixture::MakeShape({{2}, {1, 2}, {3, 4, 5}}),
TestFixture::MakeShape({{2}, {1, 3}, {6, 7, 8, 9}}),
},
1));
EXPECT_THAT(result_shape, IsEquivalentTo(TestFixture::MakeShape({
{2},
{2, 2},
{1, 1, 2, 3},
{3, 6, 4, 5, 7, 8, 9},
})));
}
{
ASSERT_OK_AND_ASSIGN(
typename TestFixture::Shape result_shape,
ConcatJaggedShapesAlongDimension<typename TestFixture::Shape>(
{
TestFixture::MakeShape({{2}, {1, 2}, {3, 4, 5}}),
TestFixture::MakeShape({{2}, {1, 3}, {6, 7, 8, 9}}),
},
1));
EXPECT_THAT(result_shape, IsEquivalentTo(TestFixture::MakeShape({
{2},
{2, 5},
{3, 6, 4, 5, 7, 8, 9},
})));
}
{
ASSERT_OK_AND_ASSIGN(
typename TestFixture::Shape result_shape,
StackJaggedShapesAlongDimension<typename TestFixture::Shape>(
{
TestFixture::MakeShape({{2}, {1, 2}, {3, 4, 5}}),
TestFixture::MakeShape({{2}, {1, 2}, {6, 7, 8}}),
},
2));
EXPECT_THAT(result_shape, IsEquivalentTo(TestFixture::MakeShape({
{2},
{1, 2},
{2, 2, 2},
{3, 6, 4, 7, 5, 8},
})));
}
{
ASSERT_OK_AND_ASSIGN(
typename TestFixture::Shape result_shape,
ConcatJaggedShapesAlongDimension<typename TestFixture::Shape>(
{
TestFixture::MakeShape({{2}, {1, 2}, {3, 4, 5}}),
TestFixture::MakeShape({{2}, {1, 2}, {6, 7, 8}}),
},
2));
EXPECT_THAT(result_shape, IsEquivalentTo(TestFixture::MakeShape({
{2},
{1, 2},
{9, 11, 13},
})));
}
{
ASSERT_OK_AND_ASSIGN(
typename TestFixture::Shape result_shape,
ConcatJaggedShapesAlongDimension<typename TestFixture::Shape>(
{
TestFixture::MakeShape({{2}, {1, 2}, {1, 1, 2}, {1, 1, 1, 2}}),
TestFixture::MakeShape({{2},
{3, 1},
{2, 3, 1, 4},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}}),
},
1));
EXPECT_THAT(result_shape,
IsEquivalentTo(TestFixture::MakeShape(
{{2},
{4, 3},
{1, 2, 3, 1, 1, 2, 4},
{1, 1, 2, 3, 4, 5, 6, 1, 1, 2, 7, 8, 9, 10}})));
}
{
EXPECT_THAT(StackJaggedShapesAlongDimension<typename TestFixture::Shape>(
absl::Span<const typename TestFixture::Shape>{}, 0),
StatusIs(absl::StatusCode::kInvalidArgument,
"concat/stack requires a nonzero number of inputs"));
}
{
EXPECT_THAT(ConcatJaggedShapesAlongDimension<typename TestFixture::Shape>(
{
TestFixture::Shape::Empty(),
},
0),
StatusIs(absl::StatusCode::kInvalidArgument,
"cannot concat shapes of rank zero"));
}
{
EXPECT_THAT(StackJaggedShapesAlongDimension<typename TestFixture::Shape>(
{
TestFixture::MakeShape({{2}, {1, 2}, {3, 4, 5}}),
},
-1),
StatusIs(absl::StatusCode::kInvalidArgument,
"invalid dim = -1 for concat/stack"));
}
{
EXPECT_THAT(ConcatJaggedShapesAlongDimension<typename TestFixture::Shape>(
{
TestFixture::MakeShape({{2}, {1, 2}, {3, 4, 5}}),
},
3),
StatusIs(absl::StatusCode::kInvalidArgument,
"invalid dim = 3 for concat/stack"));
}
{
EXPECT_THAT(StackJaggedShapesAlongDimension<typename TestFixture::Shape>(
{
TestFixture::MakeShape({{2}, {1, 2}, {3, 4, 5}}),
},
4),
StatusIs(absl::StatusCode::kInvalidArgument,
"invalid dim = 4 for concat/stack"));
}
{
EXPECT_THAT(StackJaggedShapesAlongDimension<typename TestFixture::Shape>(
{
TestFixture::MakeShape({{2}, {1, 2}, {3, 4, 5}}),
TestFixture::MakeShape({{2}, {1, 2}}),
},
0),
StatusIs(absl::StatusCode::kInvalidArgument,
"concat/stack requires all inputs to have the same "
"rank, got 3 and 2"));
}
{
EXPECT_THAT(
StackJaggedShapesAlongDimension<typename TestFixture::Shape>(
{
TestFixture::MakeShape({{2}, {1, 2}, {3, 4, 5}}),
TestFixture::MakeShape({{2}, {2, 1}, {3, 4, 5}}),
},
2),
StatusIs(
absl::StatusCode::kInvalidArgument,
"concat/stack requires all inputs to have the same shape prefix "
"before the concatenation dimension"));
}
}
TYPED_TEST(ConcatTest, StackOrConcatJaggedArraysAlongDimension) {
{
const auto array1 =
TestFixture::MakeArray(absl::Span<const int>({1, 2, 3}));
const auto shape1 = TestFixture::MakeShape({{3}});
ASSERT_OK_AND_ASSIGN(
(const auto& [result_array, result_shape]),
StackJaggedArraysAlongDimension(absl::MakeConstSpan({array1}),
absl::MakeConstSpan({shape1}), 0));
EXPECT_THAT(result_shape,
IsEquivalentTo(TestFixture::MakeShape({{1}, {3}})));
EXPECT_THAT(result_array, ElementsAre(1, 2, 3));
}
{
const auto array1 =
TestFixture::MakeArray(absl::Span<const int>({1, 2, 3}));
const auto shape1 = TestFixture::MakeShape({{3}});
ASSERT_OK_AND_ASSIGN(
(const auto& [result_array, result_shape]),
ConcatJaggedArraysAlongDimension(absl::MakeConstSpan({array1}),
absl::MakeConstSpan({shape1}), 0));
EXPECT_THAT(result_shape, IsEquivalentTo(TestFixture::MakeShape({{3}})));
EXPECT_THAT(result_array, ElementsAre(1, 2, 3));
}
{
const auto array1 =
TestFixture::MakeArray(absl::Span<const int>({1, 2, 3}));
const auto shape1 = TestFixture::MakeShape({{3}});
const auto array2 = TestFixture::MakeArray(absl::Span<const int>({4, 5}));
const auto shape2 = TestFixture::MakeShape({{2}});
ASSERT_OK_AND_ASSIGN((const auto& [result_array, result_shape]),
StackJaggedArraysAlongDimension(
absl::MakeConstSpan({array1, array2}),
absl::MakeConstSpan({shape1, shape2}), 0));
EXPECT_THAT(result_shape,
IsEquivalentTo(TestFixture::MakeShape({{2}, {3, 2}})));
EXPECT_THAT(result_array, ElementsAre(1, 2, 3, 4, 5));
}
{
const auto array1 =
TestFixture::MakeArray(absl::Span<const int>({1, 2, 3}));
const auto shape1 = TestFixture::MakeShape({{{2}, {1, 2}}});
const auto array2 =
TestFixture::MakeArray(absl::Span<const int>({4, 5, 6}));
const auto shape2 = TestFixture::MakeShape({{{2}, {2, 1}}});
ASSERT_OK_AND_ASSIGN((const auto& [result_array, result_shape]),
StackJaggedArraysAlongDimension(
absl::MakeConstSpan({array1, array2}),
absl::MakeConstSpan({shape1, shape2}), 0));
EXPECT_THAT(
result_shape,
IsEquivalentTo(TestFixture::MakeShape({{2}, {2, 2}, {1, 2, 2, 1}})));
EXPECT_THAT(result_array, ElementsAre(1, 2, 3, 4, 5, 6));
}
{
const auto array1 =
TestFixture::MakeArray(absl::Span<const int>({1, 2, 3}));
const auto shape1 = TestFixture::MakeShape({{{2}, {1, 2}}});
const auto array2 =
TestFixture::MakeArray(absl::Span<const int>({4, 5, 6}));
const auto shape2 = TestFixture::MakeShape({{{2}, {1, 2}}});
ASSERT_OK_AND_ASSIGN((const auto& [result_array, result_shape]),
StackJaggedArraysAlongDimension(
absl::MakeConstSpan({array1, array2}),
absl::MakeConstSpan({shape1, shape2}), 1));
EXPECT_THAT(
result_shape,
IsEquivalentTo(TestFixture::MakeShape({{2}, {2, 2}, {1, 1, 2, 2}})));
EXPECT_THAT(result_array, ElementsAre(1, 4, 2, 3, 5, 6));
}
{
const auto array1 =
TestFixture::MakeArray(absl::Span<const int>({1, 2, 3, 4}));
const auto shape1 = TestFixture::MakeShape({{{2}, {2, 1}, {2, 1, 1}}});
const auto array2 =
TestFixture::MakeArray(absl::Span<const int>({5, 6, 7, 8}));
const auto shape2 = TestFixture::MakeShape({{{2}, {2, 1}, {2, 1, 1}}});
ASSERT_OK_AND_ASSIGN((const auto& [result_array, result_shape]),
ConcatJaggedArraysAlongDimension(
absl::MakeConstSpan({array1, array2}),
absl::MakeConstSpan({shape1, shape2}), 0));
EXPECT_THAT(result_shape, IsEquivalentTo(TestFixture::MakeShape(
{{4}, {2, 1, 2, 1}, {2, 1, 1, 2, 1, 1}})));
EXPECT_THAT(result_array, ElementsAre(1, 2, 3, 4, 5, 6, 7, 8));
}
{
const auto array1 =
TestFixture::MakeArray(absl::Span<const int>({1, 2, 3, 4}));
const auto shape1 = TestFixture::MakeShape({{{2}, {2, 1}, {2, 1, 1}}});
const auto array2 =
TestFixture::MakeArray(absl::Span<const int>({5, 6, 7, 8}));
const auto shape2 = TestFixture::MakeShape({{{2}, {2, 1}, {2, 1, 1}}});
ASSERT_OK_AND_ASSIGN((const auto& [result_array, result_shape]),
ConcatJaggedArraysAlongDimension(
absl::MakeConstSpan({array1, array2}),
absl::MakeConstSpan({shape1, shape2}), 1));
EXPECT_THAT(result_shape, IsEquivalentTo(TestFixture::MakeShape(
{{2}, {4, 2}, {2, 1, 2, 1, 1, 1}})));
EXPECT_THAT(result_array, ElementsAre(1, 2, 3, 5, 6, 7, 4, 8));
}
{
const auto array1 =
TestFixture::MakeArray(absl::Span<const int>({1, 2, 3, 4}));
const auto shape1 = TestFixture::MakeShape({{{2}, {2, 1}, {2, 1, 1}}});
const auto array2 =
TestFixture::MakeArray(absl::Span<const int>({5, 6, 7, 8}));
const auto shape2 = TestFixture::MakeShape({{{2}, {2, 1}, {2, 1, 1}}});
ASSERT_OK_AND_ASSIGN((const auto& [result_array, result_shape]),
ConcatJaggedArraysAlongDimension(
absl::MakeConstSpan({array1, array2}),
absl::MakeConstSpan({shape1, shape2}), 2));
EXPECT_THAT(
result_shape,
IsEquivalentTo(TestFixture::MakeShape({{2}, {2, 1}, {4, 2, 2}})));
EXPECT_THAT(result_array, ElementsAre(1, 2, 5, 6, 3, 7, 4, 8));
}
{
const auto array1 =
TestFixture::MakeArray(absl::Span<const int>({1, 2, 3}));
const auto shape1 = TestFixture::MakeShape({{3}});
const auto array2 = TestFixture::MakeArray(absl::Span<const int>({4, 5}));
const auto shape2 = TestFixture::MakeShape({{2}});
EXPECT_THAT(StackJaggedArraysAlongDimension(
absl::MakeConstSpan({array1}),
absl::MakeConstSpan({shape1, shape2}), 0),
StatusIs(absl::StatusCode::kInvalidArgument,
"concat/stack expects `arrays` and `array_shapes` to "
"be 1:1, got sizes 1 and 2"));
}
{
const auto array1 =
TestFixture::MakeArray(absl::Span<const int>({1, 2, 3}));
const auto shape1 = TestFixture::MakeShape({{3}});
const auto array2 = TestFixture::MakeArray(absl::Span<const int>({4, 5}));
const auto shape2 = TestFixture::MakeShape({{2}});
EXPECT_THAT(
StackJaggedArraysAlongDimension(absl::MakeConstSpan({array2, array2}),
absl::MakeConstSpan({shape1, shape2}),
0),
StatusIs(absl::StatusCode::kInvalidArgument,
"concat/stack expects `arrays` and `array_shapes` to describe "
"arrays with the same size, but got 2 != 3 for index 0"));
}
}
template <typename ShapeHelper>
typename ShapeHelper::Edge GetSplitPointsEdge(int64_t parent_size,
int64_t children) {
std::vector<OptionalValue<int64_t>> split_points;
split_points.reserve(parent_size + 1);
for (int64_t i = 0; i <= parent_size; ++i) {
split_points.push_back(i * children);
}
return ShapeHelper::EdgeFromSplitPoints(std::move(split_points)).value();
}
template <typename ShapeHelper>
typename ShapeHelper::Shape GetShape(int64_t rank, int64_t num_children) {
typename ShapeHelper::Shape::EdgeVec edges;
edges.reserve(rank);
for (int i = 0; i < rank; ++i) {
edges.push_back(GetSplitPointsEdge<ShapeHelper>(std::pow(num_children, i),
num_children));
}
return ShapeHelper::Shape::FromEdges(std::move(edges)).value();
}
template <typename ShapeHelper>
void BM_ConcatJaggedShapesAlongDimension_StackFirst(benchmark::State& state) {
const int rank = state.range(0);
const int num_children = state.range(1);
const int num_shapes = state.range(2);
std::vector<typename ShapeHelper::Shape> shapes;
shapes.reserve(num_shapes);
for (int i = 0; i < num_shapes; ++i) {
shapes.push_back(GetShape<ShapeHelper>(rank, num_children));
}
for (auto _ : state) {
benchmark::DoNotOptimize(shapes);
auto result_shape_or =
StackJaggedShapesAlongDimension(absl::MakeConstSpan(shapes), 0);
benchmark::DoNotOptimize(result_shape_or);
}
}
BENCHMARK(
BM_ConcatJaggedShapesAlongDimension_StackFirst<JaggedArrayShapeHelper>)
->Args({1, 1, 2})
->Args({4, 100, 2})
->Args({4, 100, 10});
BENCHMARK(
BM_ConcatJaggedShapesAlongDimension_StackFirst<JaggedDenseArrayShapeHelper>)
->Args({1, 1, 2})
->Args({4, 100, 2})
->Args({4, 100, 10});
template <typename ShapeHelper>
void BM_StackJaggedShapesAlongDimension_StackLast(benchmark::State& state) {
const int rank = state.range(0);
const int num_children = state.range(1);
const int num_shapes = state.range(2);
std::vector<typename ShapeHelper::Shape> shapes;
shapes.reserve(num_shapes);
for (int i = 0; i < num_shapes; ++i) {
shapes.push_back(GetShape<ShapeHelper>(rank, num_children));
}
for (auto _ : state) {
benchmark::DoNotOptimize(shapes);
auto result_shape_or =
StackJaggedShapesAlongDimension(absl::MakeConstSpan(shapes), rank);
benchmark::DoNotOptimize(result_shape_or);
}
}
BENCHMARK(BM_StackJaggedShapesAlongDimension_StackLast<JaggedArrayShapeHelper>)
->Args({1, 1, 2})
->Args({4, 100, 2})
->Args({4, 100, 10});
BENCHMARK(
BM_StackJaggedShapesAlongDimension_StackLast<JaggedDenseArrayShapeHelper>)
->Args({1, 1, 2})
->Args({4, 100, 2})
->Args({4, 100, 10});
template <typename ShapeHelper>
void BM_ConcatJaggedShapesAlongDimension_ConcatFirst(benchmark::State& state) {
const int rank = state.range(0);
const int num_children = state.range(1);
const int num_shapes = state.range(2);
std::vector<typename ShapeHelper::Shape> shapes;
shapes.reserve(num_shapes);
for (int i = 0; i < num_shapes; ++i) {
shapes.push_back(GetShape<ShapeHelper>(rank, num_children));
}
for (auto _ : state) {
benchmark::DoNotOptimize(shapes);
auto result_shape_or =
ConcatJaggedShapesAlongDimension(absl::MakeConstSpan(shapes), 0);
benchmark::DoNotOptimize(result_shape_or);
}
}
BENCHMARK(
BM_ConcatJaggedShapesAlongDimension_ConcatFirst<JaggedArrayShapeHelper>)
->Args({1, 1, 2})
->Args({4, 100, 2})
->Args({4, 100, 10});
BENCHMARK(BM_ConcatJaggedShapesAlongDimension_ConcatFirst<
JaggedDenseArrayShapeHelper>)
->Args({1, 1, 2})
->Args({4, 100, 2})
->Args({4, 100, 10});
template <typename ShapeHelper>
void BM_ConcatJaggedShapesAlongDimension_ConcatLast(benchmark::State& state) {
const int rank = state.range(0);
const int num_children = state.range(1);
const int num_shapes = state.range(2);
std::vector<typename ShapeHelper::Shape> shapes;
shapes.reserve(num_shapes);
for (int i = 0; i < num_shapes; ++i) {
shapes.push_back(GetShape<ShapeHelper>(rank, num_children));
}
for (auto _ : state) {
benchmark::DoNotOptimize(shapes);
auto result_shape_or =
ConcatJaggedShapesAlongDimension(absl::MakeConstSpan(shapes), rank - 1);
benchmark::DoNotOptimize(result_shape_or);
}
}
BENCHMARK(
BM_ConcatJaggedShapesAlongDimension_ConcatLast<JaggedArrayShapeHelper>)
->Args({1, 1, 2})
->Args({4, 100, 2})
->Args({4, 100, 10});
BENCHMARK(
BM_ConcatJaggedShapesAlongDimension_ConcatLast<JaggedDenseArrayShapeHelper>)
->Args({1, 1, 2})
->Args({4, 100, 2})
->Args({4, 100, 10});
template <typename ShapeHelper>
void BM_StackJaggedArraysAlongDimension_StackFirst(benchmark::State& state) {
const int rank = state.range(0);
const int num_children = state.range(1);
const auto shape1 = GetShape<ShapeHelper>(rank, num_children);
const auto shape2 = GetShape<ShapeHelper>(rank, num_children);
const auto array1 = ShapeHelper::MakeArray(std::pow(num_children, rank), 1);
const auto array2 = ShapeHelper::MakeArray(std::pow(num_children, rank), 2);
for (auto _ : state) {
auto result_shape_or = StackJaggedArraysAlongDimension(
absl::MakeConstSpan({array1, array2}),
absl::MakeConstSpan({shape1, shape2}), 0);
benchmark::DoNotOptimize(result_shape_or);
}
}
BENCHMARK(
BM_StackJaggedArraysAlongDimension_StackFirst<JaggedArrayShapeHelper>)
->Args({1, 1})
->Args({2, 1000});
BENCHMARK(
BM_StackJaggedArraysAlongDimension_StackFirst<JaggedDenseArrayShapeHelper>)
->Args({1, 1})
->Args({2, 1000});
template <typename ShapeHelper>
void BM_StackJaggedArraysAlongDimension_StackLast(benchmark::State& state) {
const int rank = state.range(0);
const int num_children = state.range(1);
const auto shape1 = GetShape<ShapeHelper>(rank, num_children);
const auto shape2 = GetShape<ShapeHelper>(rank, num_children);
const auto array1 = ShapeHelper::MakeArray(std::pow(num_children, rank), 1);
const auto array2 = ShapeHelper::MakeArray(std::pow(num_children, rank), 2);
for (auto _ : state) {
auto result_shape_or = StackJaggedArraysAlongDimension(
absl::MakeConstSpan({array1, array2}),
absl::MakeConstSpan({shape1, shape2}), rank);
benchmark::DoNotOptimize(result_shape_or);
}
}
BENCHMARK(BM_StackJaggedArraysAlongDimension_StackLast<JaggedArrayShapeHelper>)
->Args({1, 1})
->Args({2, 1000});
BENCHMARK(
BM_StackJaggedArraysAlongDimension_StackLast<JaggedDenseArrayShapeHelper>)
->Args({1, 1})
->Args({2, 1000});
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/jagged_shape/dense_array/util/concat.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/jagged_shape/util/concat_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
38985f23-70af-414f-9402-1f1728be4239 | cpp | google/tsl | retrying_file_system | tsl/platform/retrying_file_system.h | tsl/platform/retrying_file_system_test.cc | #ifndef TENSORFLOW_TSL_PLATFORM_RETRYING_FILE_SYSTEM_H_
#define TENSORFLOW_TSL_PLATFORM_RETRYING_FILE_SYSTEM_H_
#include <functional>
#include <string>
#include <vector>
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/file_system.h"
#include "tsl/platform/random.h"
#include "tsl/platform/retrying_utils.h"
#include "tsl/platform/status.h"
namespace tsl {
template <typename Underlying>
class RetryingFileSystem : public FileSystem {
public:
RetryingFileSystem(std::unique_ptr<Underlying> base_file_system,
const RetryConfig& retry_config)
: base_file_system_(std::move(base_file_system)),
retry_config_(retry_config) {}
TF_USE_FILESYSTEM_METHODS_WITH_NO_TRANSACTION_SUPPORT;
absl::Status NewRandomAccessFile(
const string& filename, TransactionToken* token,
std::unique_ptr<RandomAccessFile>* result) override;
absl::Status NewWritableFile(const string& filename, TransactionToken* token,
std::unique_ptr<WritableFile>* result) override;
absl::Status NewAppendableFile(
const string& filename, TransactionToken* token,
std::unique_ptr<WritableFile>* result) override;
absl::Status NewReadOnlyMemoryRegionFromFile(
const string& filename, TransactionToken* token,
std::unique_ptr<ReadOnlyMemoryRegion>* result) override;
absl::Status FileExists(const string& fname,
TransactionToken* token) override {
return RetryingUtils::CallWithRetries(
[this, &fname, token]() {
return base_file_system_->FileExists(fname, token);
},
retry_config_);
}
absl::Status GetChildren(const string& dir, TransactionToken* token,
std::vector<string>* result) override {
return RetryingUtils::CallWithRetries(
[this, &dir, result, token]() {
return base_file_system_->GetChildren(dir, token, result);
},
retry_config_);
}
absl::Status GetMatchingPaths(const string& pattern, TransactionToken* token,
std::vector<string>* result) override {
return RetryingUtils::CallWithRetries(
[this, &pattern, result, token]() {
return base_file_system_->GetMatchingPaths(pattern, token, result);
},
retry_config_);
}
absl::Status Stat(const string& fname, TransactionToken* token,
FileStatistics* stat) override {
return RetryingUtils::CallWithRetries(
[this, &fname, stat, token]() {
return base_file_system_->Stat(fname, token, stat);
},
retry_config_);
}
absl::Status DeleteFile(const string& fname,
TransactionToken* token) override {
return RetryingUtils::DeleteWithRetries(
[this, &fname, token]() {
return base_file_system_->DeleteFile(fname, token);
},
retry_config_);
}
absl::Status CreateDir(const string& dirname,
TransactionToken* token) override {
return RetryingUtils::CallWithRetries(
[this, &dirname, token]() {
return base_file_system_->CreateDir(dirname, token);
},
retry_config_);
}
absl::Status DeleteDir(const string& dirname,
TransactionToken* token) override {
return RetryingUtils::DeleteWithRetries(
[this, &dirname, token]() {
return base_file_system_->DeleteDir(dirname, token);
},
retry_config_);
}
absl::Status GetFileSize(const string& fname, TransactionToken* token,
uint64* file_size) override {
return RetryingUtils::CallWithRetries(
[this, &fname, file_size, token]() {
return base_file_system_->GetFileSize(fname, token, file_size);
},
retry_config_);
}
absl::Status RenameFile(const string& src, const string& target,
TransactionToken* token) override {
return RetryingUtils::CallWithRetries(
[this, &src, &target, token]() {
return base_file_system_->RenameFile(src, target, token);
},
retry_config_);
}
absl::Status IsDirectory(const string& dirname,
TransactionToken* token) override {
return RetryingUtils::CallWithRetries(
[this, &dirname, token]() {
return base_file_system_->IsDirectory(dirname, token);
},
retry_config_);
}
absl::Status HasAtomicMove(const string& path,
bool* has_atomic_move) override {
return base_file_system_->HasAtomicMove(path, has_atomic_move);
}
absl::Status CanCreateTempFile(const std::string& fname,
bool* can_create_temp_file) override {
return base_file_system_->CanCreateTempFile(fname, can_create_temp_file);
}
absl::Status DeleteRecursively(const string& dirname, TransactionToken* token,
int64_t* undeleted_files,
int64_t* undeleted_dirs) override {
return RetryingUtils::DeleteWithRetries(
[this, &dirname, token, undeleted_files, undeleted_dirs]() {
return base_file_system_->DeleteRecursively(
dirname, token, undeleted_files, undeleted_dirs);
},
retry_config_);
}
void FlushCaches(TransactionToken* token) override {
base_file_system_->FlushCaches(token);
}
Underlying* underlying() const { return base_file_system_.get(); }
private:
std::unique_ptr<Underlying> base_file_system_;
const RetryConfig retry_config_;
RetryingFileSystem(const RetryingFileSystem&) = delete;
void operator=(const RetryingFileSystem&) = delete;
};
namespace retrying_internals {
class RetryingRandomAccessFile : public RandomAccessFile {
public:
RetryingRandomAccessFile(std::unique_ptr<RandomAccessFile> base_file,
const RetryConfig& retry_config)
: base_file_(std::move(base_file)), retry_config_(retry_config) {}
absl::Status Name(absl::string_view* result) const override {
return base_file_->Name(result);
}
absl::Status Read(uint64 offset, size_t n, absl::string_view* result,
char* scratch) const override {
return RetryingUtils::CallWithRetries(
[this, offset, n, result, scratch]() {
return base_file_->Read(offset, n, result, scratch);
},
retry_config_);
}
private:
std::unique_ptr<RandomAccessFile> base_file_;
const RetryConfig retry_config_;
};
class RetryingWritableFile : public WritableFile {
public:
RetryingWritableFile(std::unique_ptr<WritableFile> base_file,
const RetryConfig& retry_config)
: base_file_(std::move(base_file)), retry_config_(retry_config) {}
~RetryingWritableFile() override {
Close().IgnoreError();
}
absl::Status Append(absl::string_view data) override {
return RetryingUtils::CallWithRetries(
[this, &data]() { return base_file_->Append(data); }, retry_config_);
}
absl::Status Close() override {
return RetryingUtils::CallWithRetries(
[this]() { return base_file_->Close(); }, retry_config_);
}
absl::Status Flush() override {
return RetryingUtils::CallWithRetries(
[this]() { return base_file_->Flush(); }, retry_config_);
}
absl::Status Name(absl::string_view* result) const override {
return base_file_->Name(result);
}
absl::Status Sync() override {
return RetryingUtils::CallWithRetries(
[this]() { return base_file_->Sync(); }, retry_config_);
}
absl::Status Tell(int64_t* position) override {
return RetryingUtils::CallWithRetries(
[this, &position]() { return base_file_->Tell(position); },
retry_config_);
}
private:
std::unique_ptr<WritableFile> base_file_;
const RetryConfig retry_config_;
};
}
template <typename Underlying>
absl::Status RetryingFileSystem<Underlying>::NewRandomAccessFile(
const string& filename, TransactionToken* token,
std::unique_ptr<RandomAccessFile>* result) {
std::unique_ptr<RandomAccessFile> base_file;
TF_RETURN_IF_ERROR(RetryingUtils::CallWithRetries(
[this, &filename, &base_file, token]() {
return base_file_system_->NewRandomAccessFile(filename, token,
&base_file);
},
retry_config_));
result->reset(new retrying_internals::RetryingRandomAccessFile(
std::move(base_file), retry_config_));
return absl::OkStatus();
}
template <typename Underlying>
absl::Status RetryingFileSystem<Underlying>::NewWritableFile(
const string& filename, TransactionToken* token,
std::unique_ptr<WritableFile>* result) {
std::unique_ptr<WritableFile> base_file;
TF_RETURN_IF_ERROR(RetryingUtils::CallWithRetries(
[this, &filename, &base_file, token]() {
return base_file_system_->NewWritableFile(filename, token, &base_file);
},
retry_config_));
result->reset(new retrying_internals::RetryingWritableFile(
std::move(base_file), retry_config_));
return absl::OkStatus();
}
template <typename Underlying>
absl::Status RetryingFileSystem<Underlying>::NewAppendableFile(
const string& filename, TransactionToken* token,
std::unique_ptr<WritableFile>* result) {
std::unique_ptr<WritableFile> base_file;
TF_RETURN_IF_ERROR(RetryingUtils::CallWithRetries(
[this, &filename, &base_file, token]() {
return base_file_system_->NewAppendableFile(filename, token,
&base_file);
},
retry_config_));
result->reset(new retrying_internals::RetryingWritableFile(
std::move(base_file), retry_config_));
return absl::OkStatus();
}
template <typename Underlying>
absl::Status RetryingFileSystem<Underlying>::NewReadOnlyMemoryRegionFromFile(
const string& filename, TransactionToken* token,
std::unique_ptr<ReadOnlyMemoryRegion>* result) {
return RetryingUtils::CallWithRetries(
[this, &filename, result, token]() {
return base_file_system_->NewReadOnlyMemoryRegionFromFile(
filename, token, result);
},
retry_config_);
}
}
#endif | #include "tsl/platform/retrying_file_system.h"
#include <fstream>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/str_util.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace {
typedef std::vector<std::tuple<string, absl::Status>> ExpectedCalls;
ExpectedCalls CreateRetriableErrors(const string& method, int n) {
ExpectedCalls expected_calls;
expected_calls.reserve(n);
for (int i = 0; i < n; i++) {
expected_calls.emplace_back(std::make_tuple(
method, errors::Unavailable(strings::StrCat("Retriable error #", i))));
}
return expected_calls;
}
class MockCallSequence {
public:
explicit MockCallSequence(const ExpectedCalls& calls) : calls_(calls) {}
~MockCallSequence() {
EXPECT_TRUE(calls_.empty())
<< "Not all expected calls have been made, "
<< "the next expected call: " << std::get<0>(calls_.front());
}
absl::Status ConsumeNextCall(const string& method) {
EXPECT_FALSE(calls_.empty()) << "No more calls were expected.";
auto call = calls_.front();
calls_.erase(calls_.begin());
EXPECT_EQ(std::get<0>(call), method) << "Unexpected method called.";
return std::get<1>(call);
}
private:
ExpectedCalls calls_;
};
class MockRandomAccessFile : public RandomAccessFile {
public:
explicit MockRandomAccessFile(const ExpectedCalls& calls) : calls_(calls) {}
absl::Status Name(absl::string_view* result) const override {
return calls_.ConsumeNextCall("Name");
}
absl::Status Read(uint64 offset, size_t n, absl::string_view* result,
char* scratch) const override {
return calls_.ConsumeNextCall("Read");
}
private:
mutable MockCallSequence calls_;
};
class MockWritableFile : public WritableFile {
public:
explicit MockWritableFile(const ExpectedCalls& calls) : calls_(calls) {}
absl::Status Append(absl::string_view data) override {
return calls_.ConsumeNextCall("Append");
}
absl::Status Close() override { return calls_.ConsumeNextCall("Close"); }
absl::Status Flush() override { return calls_.ConsumeNextCall("Flush"); }
absl::Status Name(absl::string_view* result) const override {
return calls_.ConsumeNextCall("Name");
}
absl::Status Sync() override { return calls_.ConsumeNextCall("Sync"); }
absl::Status Tell(int64_t* position) override {
return calls_.ConsumeNextCall("Tell");
}
private:
mutable MockCallSequence calls_;
};
class MockFileSystem : public FileSystem {
public:
explicit MockFileSystem(const ExpectedCalls& calls, bool* flushed = nullptr)
: calls_(calls), flushed_(flushed) {}
TF_USE_FILESYSTEM_METHODS_WITH_NO_TRANSACTION_SUPPORT;
absl::Status NewRandomAccessFile(
const string& fname, TransactionToken* token,
std::unique_ptr<RandomAccessFile>* result) override {
*result = std::move(random_access_file_to_return);
return calls_.ConsumeNextCall("NewRandomAccessFile");
}
absl::Status NewWritableFile(const string& fname, TransactionToken* token,
std::unique_ptr<WritableFile>* result) override {
*result = std::move(writable_file_to_return);
return calls_.ConsumeNextCall("NewWritableFile");
}
absl::Status NewAppendableFile(
const string& fname, TransactionToken* token,
std::unique_ptr<WritableFile>* result) override {
*result = std::move(writable_file_to_return);
return calls_.ConsumeNextCall("NewAppendableFile");
}
absl::Status NewReadOnlyMemoryRegionFromFile(
const string& fname, TransactionToken* token,
std::unique_ptr<ReadOnlyMemoryRegion>* result) override {
return calls_.ConsumeNextCall("NewReadOnlyMemoryRegionFromFile");
}
absl::Status FileExists(const string& fname,
TransactionToken* token) override {
return calls_.ConsumeNextCall("FileExists");
}
absl::Status GetChildren(const string& dir, TransactionToken* token,
std::vector<string>* result) override {
return calls_.ConsumeNextCall("GetChildren");
}
absl::Status GetMatchingPaths(const string& dir, TransactionToken* token,
std::vector<string>* result) override {
return calls_.ConsumeNextCall("GetMatchingPaths");
}
absl::Status Stat(const string& fname, TransactionToken* token,
FileStatistics* stat) override {
return calls_.ConsumeNextCall("Stat");
}
absl::Status DeleteFile(const string& fname,
TransactionToken* token) override {
return calls_.ConsumeNextCall("DeleteFile");
}
absl::Status CreateDir(const string& dirname,
TransactionToken* token) override {
return calls_.ConsumeNextCall("CreateDir");
}
absl::Status DeleteDir(const string& dirname,
TransactionToken* token) override {
return calls_.ConsumeNextCall("DeleteDir");
}
absl::Status GetFileSize(const string& fname, TransactionToken* token,
uint64* file_size) override {
return calls_.ConsumeNextCall("GetFileSize");
}
absl::Status RenameFile(const string& src, const string& target,
TransactionToken* token) override {
return calls_.ConsumeNextCall("RenameFile");
}
absl::Status IsDirectory(const string& dirname,
TransactionToken* token) override {
return calls_.ConsumeNextCall("IsDirectory");
}
absl::Status DeleteRecursively(const string& dirname, TransactionToken* token,
int64_t* undeleted_files,
int64_t* undeleted_dirs) override {
return calls_.ConsumeNextCall("DeleteRecursively");
}
void FlushCaches(TransactionToken* token) override {
if (flushed_) {
*flushed_ = true;
}
}
std::unique_ptr<WritableFile> writable_file_to_return;
std::unique_ptr<RandomAccessFile> random_access_file_to_return;
private:
MockCallSequence calls_;
bool* flushed_ = nullptr;
};
TEST(RetryingFileSystemTest, NewRandomAccessFile_ImmediateSuccess) {
ExpectedCalls expected_file_calls(
{std::make_tuple("Name", absl::OkStatus()),
std::make_tuple("Read", absl::OkStatus())});
std::unique_ptr<RandomAccessFile> base_file(
new MockRandomAccessFile(expected_file_calls));
ExpectedCalls expected_fs_calls(
{std::make_tuple("NewRandomAccessFile", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
base_fs->random_access_file_to_return = std::move(base_file);
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
std::unique_ptr<RandomAccessFile> random_access_file;
TF_EXPECT_OK(
fs.NewRandomAccessFile("filename.txt", nullptr, &random_access_file));
absl::string_view result;
TF_EXPECT_OK(random_access_file->Name(&result));
EXPECT_EQ(result, "");
char scratch[10];
TF_EXPECT_OK(random_access_file->Read(0, 10, &result, scratch));
}
TEST(RetryingFileSystemTest, NewRandomAccessFile_SuccessWith3rdTry) {
ExpectedCalls expected_file_calls(
{std::make_tuple("Read", errors::Unavailable("Something is wrong")),
std::make_tuple("Read", errors::Unavailable("Wrong again")),
std::make_tuple("Read", absl::OkStatus())});
std::unique_ptr<RandomAccessFile> base_file(
new MockRandomAccessFile(expected_file_calls));
ExpectedCalls expected_fs_calls(
{std::make_tuple("NewRandomAccessFile", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
base_fs->random_access_file_to_return = std::move(base_file);
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
std::unique_ptr<RandomAccessFile> random_access_file;
TF_EXPECT_OK(
fs.NewRandomAccessFile("filename.txt", nullptr, &random_access_file));
absl::string_view result;
char scratch[10];
TF_EXPECT_OK(random_access_file->Read(0, 10, &result, scratch));
}
TEST(RetryingFileSystemTest, NewRandomAccessFile_AllRetriesFailed) {
ExpectedCalls expected_file_calls = CreateRetriableErrors("Read", 11);
std::unique_ptr<RandomAccessFile> base_file(
new MockRandomAccessFile(expected_file_calls));
ExpectedCalls expected_fs_calls(
{std::make_tuple("NewRandomAccessFile", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
base_fs->random_access_file_to_return = std::move(base_file);
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
std::unique_ptr<RandomAccessFile> random_access_file;
TF_EXPECT_OK(
fs.NewRandomAccessFile("filename.txt", nullptr, &random_access_file));
absl::string_view result;
char scratch[10];
const auto& status = random_access_file->Read(0, 10, &result, scratch);
EXPECT_TRUE(absl::StrContains(status.message(), "Retriable error #10"))
<< status;
}
TEST(RetryingFileSystemTest, NewRandomAccessFile_NoRetriesForSomeErrors) {
ExpectedCalls expected_file_calls({
std::make_tuple("Read",
errors::FailedPrecondition("Failed precondition")),
});
std::unique_ptr<RandomAccessFile> base_file(
new MockRandomAccessFile(expected_file_calls));
ExpectedCalls expected_fs_calls(
{std::make_tuple("NewRandomAccessFile", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
base_fs->random_access_file_to_return = std::move(base_file);
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
std::unique_ptr<RandomAccessFile> random_access_file;
TF_EXPECT_OK(
fs.NewRandomAccessFile("filename.txt", nullptr, &random_access_file));
absl::string_view result;
char scratch[10];
EXPECT_EQ("Failed precondition",
random_access_file->Read(0, 10, &result, scratch).message());
}
TEST(RetryingFileSystemTest, NewWritableFile_ImmediateSuccess) {
ExpectedCalls expected_file_calls(
{std::make_tuple("Name", absl::OkStatus()),
std::make_tuple("Sync", absl::OkStatus()),
std::make_tuple("Close", absl::OkStatus())});
std::unique_ptr<WritableFile> base_file(
new MockWritableFile(expected_file_calls));
ExpectedCalls expected_fs_calls(
{std::make_tuple("NewWritableFile", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
base_fs->writable_file_to_return = std::move(base_file);
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
std::unique_ptr<WritableFile> writable_file;
TF_EXPECT_OK(fs.NewWritableFile("filename.txt", nullptr, &writable_file));
absl::string_view result;
TF_EXPECT_OK(writable_file->Name(&result));
EXPECT_EQ(result, "");
TF_EXPECT_OK(writable_file->Sync());
}
TEST(RetryingFileSystemTest, NewWritableFile_SuccessWith3rdTry) {
ExpectedCalls expected_file_calls(
{std::make_tuple("Sync", errors::Unavailable("Something is wrong")),
std::make_tuple("Sync", errors::Unavailable("Something is wrong again")),
std::make_tuple("Sync", absl::OkStatus()),
std::make_tuple("Close", absl::OkStatus())});
std::unique_ptr<WritableFile> base_file(
new MockWritableFile(expected_file_calls));
ExpectedCalls expected_fs_calls(
{std::make_tuple("NewWritableFile", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
base_fs->writable_file_to_return = std::move(base_file);
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
std::unique_ptr<WritableFile> writable_file;
TF_EXPECT_OK(fs.NewWritableFile("filename.txt", nullptr, &writable_file));
TF_EXPECT_OK(writable_file->Sync());
}
TEST(RetryingFileSystemTest, NewWritableFile_SuccessWith3rdTry_ViaDestructor) {
ExpectedCalls expected_file_calls(
{std::make_tuple("Close", errors::Unavailable("Something is wrong")),
std::make_tuple("Close",
errors::Unavailable("Something is wrong again")),
std::make_tuple("Close", absl::OkStatus())});
std::unique_ptr<WritableFile> base_file(
new MockWritableFile(expected_file_calls));
ExpectedCalls expected_fs_calls(
{std::make_tuple("NewWritableFile", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
base_fs->writable_file_to_return = std::move(base_file);
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
std::unique_ptr<WritableFile> writable_file;
TF_EXPECT_OK(fs.NewWritableFile("filename.txt", nullptr, &writable_file));
writable_file.reset();
}
TEST(RetryingFileSystemTest, NewAppendableFile_SuccessWith3rdTry) {
ExpectedCalls expected_file_calls(
{std::make_tuple("Sync", errors::Unavailable("Something is wrong")),
std::make_tuple("Sync", errors::Unavailable("Something is wrong again")),
std::make_tuple("Sync", absl::OkStatus()),
std::make_tuple("Close", absl::OkStatus())});
std::unique_ptr<WritableFile> base_file(
new MockWritableFile(expected_file_calls));
ExpectedCalls expected_fs_calls(
{std::make_tuple("NewAppendableFile", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
base_fs->writable_file_to_return = std::move(base_file);
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
std::unique_ptr<WritableFile> writable_file;
TF_EXPECT_OK(fs.NewAppendableFile("filename.txt", nullptr, &writable_file));
TF_EXPECT_OK(writable_file->Sync());
}
TEST(RetryingFileSystemTest, NewWritableFile_AllRetriesFailed) {
ExpectedCalls expected_file_calls = CreateRetriableErrors("Sync", 11);
expected_file_calls.emplace_back(std::make_tuple("Close", absl::OkStatus()));
std::unique_ptr<WritableFile> base_file(
new MockWritableFile(expected_file_calls));
ExpectedCalls expected_fs_calls(
{std::make_tuple("NewWritableFile", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
base_fs->writable_file_to_return = std::move(base_file);
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
std::unique_ptr<WritableFile> writable_file;
TF_EXPECT_OK(fs.NewWritableFile("filename.txt", nullptr, &writable_file));
const auto& status = writable_file->Sync();
EXPECT_TRUE(absl::StrContains(status.message(), "Retriable error #10"))
<< status;
}
TEST(RetryingFileSystemTest,
NewReadOnlyMemoryRegionFromFile_SuccessWith2ndTry) {
ExpectedCalls expected_fs_calls(
{std::make_tuple("NewReadOnlyMemoryRegionFromFile",
errors::Unavailable("Something is wrong")),
std::make_tuple("NewReadOnlyMemoryRegionFromFile", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
std::unique_ptr<ReadOnlyMemoryRegion> result;
TF_EXPECT_OK(
fs.NewReadOnlyMemoryRegionFromFile("filename.txt", nullptr, &result));
}
TEST(RetryingFileSystemTest, NewReadOnlyMemoryRegionFromFile_AllRetriesFailed) {
ExpectedCalls expected_fs_calls =
CreateRetriableErrors("NewReadOnlyMemoryRegionFromFile", 11);
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
std::unique_ptr<ReadOnlyMemoryRegion> result;
const auto& status =
fs.NewReadOnlyMemoryRegionFromFile("filename.txt", nullptr, &result);
EXPECT_TRUE(absl::StrContains(status.message(), "Retriable error #10"))
<< status;
}
TEST(RetryingFileSystemTest, GetChildren_SuccessWith2ndTry) {
ExpectedCalls expected_fs_calls(
{std::make_tuple("GetChildren",
errors::Unavailable("Something is wrong")),
std::make_tuple("GetChildren", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
std::vector<string> result;
TF_EXPECT_OK(fs.GetChildren("gs:
}
TEST(RetryingFileSystemTest, GetChildren_AllRetriesFailed) {
ExpectedCalls expected_fs_calls = CreateRetriableErrors("GetChildren", 11);
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
std::vector<string> result;
const auto& status = fs.GetChildren("gs:
EXPECT_TRUE(absl::StrContains(status.message(), "Retriable error #10"))
<< status;
}
TEST(RetryingFileSystemTest, GetMatchingPaths_SuccessWith2ndTry) {
ExpectedCalls expected_fs_calls(
{std::make_tuple("GetMatchingPaths",
errors::Unavailable("Something is wrong")),
std::make_tuple("GetMatchingPaths", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
std::vector<string> result;
TF_EXPECT_OK(fs.GetMatchingPaths("gs:
}
TEST(RetryingFileSystemTest, GetMatchingPaths_AllRetriesFailed) {
ExpectedCalls expected_fs_calls =
CreateRetriableErrors("GetMatchingPaths", 11);
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
std::vector<string> result;
const auto& status = fs.GetMatchingPaths("gs:
EXPECT_TRUE(absl::StrContains(status.message(), "Retriable error #10"))
<< status;
}
TEST(RetryingFileSystemTest, DeleteFile_SuccessWith2ndTry) {
ExpectedCalls expected_fs_calls(
{std::make_tuple("DeleteFile", errors::Unavailable("Something is wrong")),
std::make_tuple("DeleteFile", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
TF_EXPECT_OK(fs.DeleteFile("gs:
}
TEST(RetryingFileSystemTest, DeleteFile_AllRetriesFailed) {
ExpectedCalls expected_fs_calls = CreateRetriableErrors("DeleteFile", 11);
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
const auto& status = fs.DeleteFile("gs:
EXPECT_TRUE(absl::StrContains(status.message(), "Retriable error #10"))
<< status;
}
TEST(RetryingFileSystemTest, CreateDir_SuccessWith2ndTry) {
ExpectedCalls expected_fs_calls(
{std::make_tuple("CreateDir", errors::Unavailable("Something is wrong")),
std::make_tuple("CreateDir", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
TF_EXPECT_OK(fs.CreateDir("gs:
}
TEST(RetryingFileSystemTest, CreateDir_AllRetriesFailed) {
ExpectedCalls expected_fs_calls = CreateRetriableErrors("CreateDir", 11);
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
const auto& status = fs.CreateDir("gs:
EXPECT_TRUE(absl::StrContains(status.message(), "Retriable error #10"))
<< status;
}
TEST(RetryingFileSystemTest, DeleteDir_SuccessWith2ndTry) {
ExpectedCalls expected_fs_calls(
{std::make_tuple("DeleteDir", errors::Unavailable("Something is wrong")),
std::make_tuple("DeleteDir", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
TF_EXPECT_OK(fs.DeleteDir("gs:
}
TEST(RetryingFileSystemTest, DeleteDir_AllRetriesFailed) {
ExpectedCalls expected_fs_calls = CreateRetriableErrors("DeleteDir", 11);
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
const auto& status = fs.DeleteDir("gs:
EXPECT_TRUE(absl::StrContains(status.message(), "Retriable error #10"))
<< status;
}
TEST(RetryingFileSystemTest, GetFileSize_SuccessWith2ndTry) {
ExpectedCalls expected_fs_calls(
{std::make_tuple("GetFileSize",
errors::Unavailable("Something is wrong")),
std::make_tuple("GetFileSize", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
uint64 size;
TF_EXPECT_OK(fs.GetFileSize("gs:
}
TEST(RetryingFileSystemTest, GetFileSize_AllRetriesFailed) {
ExpectedCalls expected_fs_calls = CreateRetriableErrors("GetFileSize", 11);
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
uint64 size;
const auto& status = fs.GetFileSize("gs:
EXPECT_TRUE(absl::StrContains(status.message(), "Retriable error #10"))
<< status;
}
TEST(RetryingFileSystemTest, RenameFile_SuccessWith2ndTry) {
ExpectedCalls expected_fs_calls(
{std::make_tuple("RenameFile", errors::Unavailable("Something is wrong")),
std::make_tuple("RenameFile", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
TF_EXPECT_OK(fs.RenameFile("old_name", "new_name", nullptr));
}
TEST(RetryingFileSystemTest, RenameFile_AllRetriesFailed) {
ExpectedCalls expected_fs_calls = CreateRetriableErrors("RenameFile", 11);
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
const auto& status = fs.RenameFile("old_name", "new_name", nullptr);
EXPECT_TRUE(absl::StrContains(status.message(), "Retriable error #10"))
<< status;
}
TEST(RetryingFileSystemTest, Stat_SuccessWith2ndTry) {
ExpectedCalls expected_fs_calls(
{std::make_tuple("Stat", errors::Unavailable("Something is wrong")),
std::make_tuple("Stat", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
FileStatistics stat;
TF_EXPECT_OK(fs.Stat("file_name", nullptr, &stat));
}
TEST(RetryingFileSystemTest, Stat_AllRetriesFailed) {
ExpectedCalls expected_fs_calls = CreateRetriableErrors("Stat", 11);
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
FileStatistics stat;
const auto& status = fs.Stat("file_name", nullptr, &stat);
EXPECT_TRUE(absl::StrContains(status.message(), "Retriable error #10"))
<< status;
}
TEST(RetryingFileSystemTest, FileExists_AllRetriesFailed) {
ExpectedCalls expected_fs_calls = CreateRetriableErrors("FileExists", 11);
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
const auto& status = fs.FileExists("file_name", nullptr);
EXPECT_TRUE(absl::StrContains(status.message(), "Retriable error #10"))
<< status;
}
TEST(RetryingFileSystemTest, FileExists_SuccessWith2ndTry) {
ExpectedCalls expected_fs_calls(
{std::make_tuple("FileExists", errors::Unavailable("Something is wrong")),
std::make_tuple("FileExists", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
TF_EXPECT_OK(fs.FileExists("gs:
}
TEST(RetryingFileSystemTest, IsDirectory_SuccessWith2ndTry) {
ExpectedCalls expected_fs_calls(
{std::make_tuple("IsDirectory",
errors::Unavailable("Something is wrong")),
std::make_tuple("IsDirectory", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
TF_EXPECT_OK(fs.IsDirectory("gs:
}
TEST(RetryingFileSystemTest, IsDirectory_AllRetriesFailed) {
ExpectedCalls expected_fs_calls = CreateRetriableErrors("IsDirectory", 11);
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
const auto& status = fs.IsDirectory("gs:
EXPECT_TRUE(absl::StrContains(status.message(), "Retriable error #10"))
<< status;
}
TEST(RetryingFileSystemTest, DeleteRecursively_SuccessWith2ndTry) {
ExpectedCalls expected_fs_calls(
{std::make_tuple("DeleteRecursively",
errors::Unavailable("Something is wrong")),
std::make_tuple("DeleteRecursively", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
int64_t undeleted_files, undeleted_dirs;
TF_EXPECT_OK(fs.DeleteRecursively("gs:
&undeleted_dirs));
}
TEST(RetryingFileSystemTest, DeleteRecursively_AllRetriesFailed) {
ExpectedCalls expected_fs_calls =
CreateRetriableErrors("DeleteRecursively", 11);
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
int64_t undeleted_files, undeleted_dirs;
const auto& status = fs.DeleteRecursively("gs:
&undeleted_files, &undeleted_dirs);
EXPECT_TRUE(absl::StrContains(status.message(), "Retriable error #10"))
<< status;
}
TEST(RetryingFileSystemTest, FlushCaches) {
ExpectedCalls none;
bool flushed = false;
std::unique_ptr<MockFileSystem> base_fs(new MockFileSystem(none, &flushed));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
fs.FlushCaches(nullptr);
EXPECT_TRUE(flushed);
}
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/retrying_file_system.h | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/retrying_file_system_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
017d6ee9-96b6-4ac8-8dc8-91d074a92ed2 | cpp | tensorflow/tensorflow | command_buffer_scheduling | third_party/xla/xla/service/gpu/transforms/command_buffer_scheduling.cc | third_party/xla/xla/service/gpu/transforms/command_buffer_scheduling_test.cc | #include "xla/service/gpu/transforms/command_buffer_scheduling.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/ffi/ffi_api.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/variant_visitor.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/semantic_version.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
using CommandBuffer = CommandBufferScheduling::CommandBuffer;
using CommandBufferConfig = CommandBufferScheduling::CommandBufferConfig;
static bool IsCommand(const HloComputation* computation,
const CommandBufferConfig& config);
static bool IsConstant(const HloInstruction* hlo) {
return hlo->opcode() == HloOpcode::kConstant;
}
static bool IsParameter(const HloInstruction* hlo) {
return hlo->opcode() == HloOpcode::kParameter;
}
static bool IsNoOp(const HloInstruction* hlo) {
return HloPredicateIsOp<HloOpcode::kBitcast, HloOpcode::kTuple,
HloOpcode::kGetTupleElement>(hlo);
};
static bool IsAsyncStartCommand(const HloInstruction* hlo,
const CommandBufferConfig& config) {
if (hlo->opcode() == HloOpcode::kAllReduceStart ||
hlo->opcode() == HloOpcode::kAllGatherStart) {
return config.enabled_commands.contains(DebugOptions::COLLECTIVES);
}
if (hlo->opcode() == HloOpcode::kAsyncStart) {
if (IsCublasGemm(*hlo->async_wrapped_instruction())) {
return config.enabled_commands.contains(DebugOptions::CUBLAS);
}
if (hlo->async_wrapped_opcode() == HloOpcode::kFusion) {
return config.enabled_commands.contains(DebugOptions::FUSION);
}
if (hlo->async_wrapped_opcode() == HloOpcode::kReduceScatter ||
hlo->async_wrapped_opcode() == HloOpcode::kAllToAll) {
return config.enabled_commands.contains(DebugOptions::COLLECTIVES);
}
}
if (hlo->opcode() == HloOpcode::kReduceScatter ||
hlo->opcode() == HloOpcode::kAllToAll) {
return config.enabled_commands.contains(DebugOptions::COLLECTIVES);
}
return false;
}
static bool IsAsyncDoneCommand(const HloInstruction* hlo,
const CommandBufferConfig& config) {
if (hlo->opcode() == HloOpcode::kAllReduceDone ||
hlo->opcode() == HloOpcode::kAllGatherDone) {
return config.enabled_commands.contains(DebugOptions::COLLECTIVES);
}
if (hlo->opcode() == HloOpcode::kAsyncDone) {
if (IsCublasGemm(*hlo->async_wrapped_instruction())) {
return config.enabled_commands.contains(DebugOptions::CUBLAS);
}
if (hlo->async_wrapped_opcode() == HloOpcode::kFusion) {
return config.enabled_commands.contains(DebugOptions::FUSION);
}
if (hlo->async_wrapped_opcode() == HloOpcode::kReduceScatter ||
hlo->async_wrapped_opcode() == HloOpcode::kAllToAll) {
return config.enabled_commands.contains(DebugOptions::COLLECTIVES);
}
}
return false;
}
static HloInstruction* FindAsyncDoneCommand(const HloInstruction* start) {
if (start->opcode() == HloOpcode::kAllReduceStart ||
start->opcode() == HloOpcode::kAllGatherStart) {
CHECK(start->users().size() == 1);
return start->users().front();
} else if (start->opcode() == HloOpcode::kAsyncStart) {
return start->async_chain_done();
}
return nullptr;
}
template <HloOpcode op>
static bool IsCommand(const HloInstruction*, const CommandBufferConfig&);
template <>
bool IsCommand<HloOpcode::kWhile>(const HloInstruction* hlo,
const CommandBufferConfig& config) {
return config.enabled_commands.contains(DebugOptions::CONDITIONALS) &&
IsCommand(hlo->while_body(), config) &&
IsCommand(hlo->while_condition(), config);
}
template <>
bool IsCommand<HloOpcode::kConditional>(const HloInstruction* hlo,
const CommandBufferConfig& config) {
return config.enabled_commands.contains(DebugOptions::CONDITIONALS) &&
absl::c_all_of(hlo->branch_computations(),
[&](const HloComputation* comp) {
return IsCommand(comp, config);
});
}
static bool IsCommand(const HloCustomCallInstruction* hlo,
const CommandBufferConfig& config) {
if (config.enabled_commands.contains(DebugOptions::CUBLAS) &&
IsLegacyCublasMatmul(*hlo)) {
return true;
}
if (config.enabled_commands.contains(DebugOptions::CUBLASLT) &&
(IsCublasLtMatmul(*hlo) || IsCublasLtMatmulF8(*hlo))) {
return true;
}
if (config.enabled_commands.contains(DebugOptions::CUDNN) &&
IsCustomCallTofMHA(*hlo)) {
VLOG(3) << "Recording FusedMHA, target " << hlo->custom_call_target()
<< " into command buffer.";
return true;
}
if (!config.enabled_commands.contains(DebugOptions::CUSTOM_CALL)) {
return false;
}
if (config.enabled_legacy_custom_call_targets.contains(
hlo->custom_call_target())) {
VLOG(3) << "Recording legacy custom call target "
<< hlo->custom_call_target() << " into command buffer.";
return true;
}
auto registration = ffi::FindHandler(hlo->custom_call_target(), "gpu");
return registration.ok()
? ffi::IsCommandBufferCompatible(registration->traits)
: false;
}
static bool IsCommand(const HloInstruction* hlo,
const CommandBufferConfig& config) {
if (auto* fusion = DynCast<HloFusionInstruction>(hlo)) {
auto gpu_config = fusion->backend_config<GpuBackendConfig>();
const FusionBackendConfig& backend_config =
gpu_config->fusion_backend_config();
if (backend_config.kind() == kCuDnnFusionKind) {
return config.enabled_commands.contains(DebugOptions::CUDNN);
}
const auto& custom_config = backend_config.custom_fusion_config();
if (custom_config.name() == "address_computation") {
auto fusion_analysis =
HloFusionAnalysis::Create(*hlo, config.device_description);
const HloFusionAdaptor& adaptor = fusion_analysis.fusion();
auto hero_adaptor =
HloBfsFindIf(adaptor.GetRoots(), adaptor, [](auto node) {
return node.opcode() == HloOpcode::kCustomCall ||
node.opcode() == HloOpcode::kReduceScatter;
});
const HloInstruction* hero = &hero_adaptor->instruction();
return IsCommand(hero, config) || IsAsyncStartCommand(hero, config);
}
if (custom_config.name() == "dynamic_address_computation") {
return false;
}
return config.enabled_commands.contains(DebugOptions::FUSION);
}
if (auto* sort = DynCast<HloSortInstruction>(hlo))
return config.enabled_commands.contains(DebugOptions::FUSION);
if (hlo->opcode() == HloOpcode::kPartitionId ||
hlo->opcode() == HloOpcode::kReplicaId) {
return config.enabled_commands.contains(DebugOptions::FUSION);
}
if (auto* custom_call = DynCast<HloCustomCallInstruction>(hlo))
return IsCommand(custom_call, config);
if (hlo->opcode() == HloOpcode::kWhile)
return IsCommand<HloOpcode::kWhile>(hlo, config);
if (hlo->opcode() == HloOpcode::kConditional)
return IsCommand<HloOpcode::kConditional>(hlo, config);
return false;
}
static bool IsCommand(const HloComputation* computation,
const CommandBufferConfig& config) {
return absl::c_all_of(
computation->instructions(), [&](const HloInstruction* inst) {
return IsNoOp(inst) || IsConstant(inst) || IsParameter(inst) ||
IsCommand(inst, config) || IsAsyncStartCommand(inst, config) ||
IsAsyncDoneCommand(inst, config);
});
}
static void RemoveTrailingNoOps(HloInstructionSequence& seq) {
std::vector<HloInstruction*> instructions = seq.instructions();
for (int i = instructions.size() - 1; i >= 0; i--) {
if (HloInstruction* inst = instructions[i]; IsNoOp(inst)) {
seq.remove_instruction(inst);
} else {
break;
}
}
}
std::vector<HloInstructionSequence>
CommandBufferScheduling::CollectCommandBufferSequences(
const HloInstructionSequence schedule, const CommandBufferConfig& config,
int32_t min_num_commands) {
std::vector<HloInstructionSequence> sequences;
HloInstructionSequence current_seq;
int64_t num_commands_in_current_seq = 0;
auto collect_current_seq = [&]() {
if (num_commands_in_current_seq >= std::max(1, min_num_commands)) {
RemoveTrailingNoOps(current_seq);
sequences.push_back(std::move(current_seq));
}
current_seq = HloInstructionSequence();
num_commands_in_current_seq = 0;
};
auto& instructions = schedule.instructions();
auto collect_async_region = [&](const HloInstruction* start) {
auto get_index = [&](const HloInstruction* inst) -> size_t {
auto it = std::find(instructions.begin(), instructions.end(), inst);
return std::distance(instructions.begin(), it);
};
HloInstructionSequence seq;
size_t done_index = get_index(FindAsyncDoneCommand(start));
for (size_t i = get_index(start); i <= done_index; i++) {
HloInstruction* inst = instructions.at(i);
if (IsAsyncStartCommand(inst, config)) {
const HloInstruction* done = FindAsyncDoneCommand(inst);
done_index = std::max(done_index, get_index(done));
}
seq.push_back(inst);
}
return seq;
};
auto check_async_region = [&](const HloInstructionSequence& seq) {
if (!absl::c_all_of(seq.instructions(), [&](HloInstruction* inst) {
return IsNoOp(inst) || IsCommand(inst, config) ||
IsAsyncStartCommand(inst, config) ||
IsAsyncDoneCommand(inst, config);
})) {
return false;
}
absl::flat_hash_set<HloInstruction*> done_instructions;
for (const HloInstruction* inst : seq.instructions()) {
if (IsAsyncStartCommand(inst, config)) {
done_instructions.insert(FindAsyncDoneCommand(inst));
}
if (IsAsyncDoneCommand(inst, config)) {
if (!done_instructions.contains(inst)) {
return false;
}
}
}
return true;
};
for (size_t i = 0; i < instructions.size(); i++) {
HloInstruction* inst = instructions.at(i);
if (IsNoOp(inst) && num_commands_in_current_seq) {
current_seq.push_back(inst);
continue;
}
if (IsCommand(inst, config)) {
num_commands_in_current_seq++;
current_seq.push_back(inst);
continue;
}
if (IsAsyncStartCommand(inst, config)) {
HloInstructionSequence seq = collect_async_region(inst);
if (check_async_region(seq)) {
num_commands_in_current_seq += seq.instructions().size();
for (HloInstruction* inst : seq.instructions()) {
current_seq.push_back(inst);
}
i += seq.instructions().size() - 1;
continue;
}
}
collect_current_seq();
}
collect_current_seq();
return sequences;
}
absl::StatusOr<bool> CommandBufferScheduling::MoveParametersAndConstantsToFront(
HloComputation* computation) {
HloInstructionSequence new_sequence;
HloSchedule& schedule = computation->parent()->schedule();
HloInstructionSequence& sequence = schedule.GetOrCreateSequence(computation);
for (HloInstruction* inst : sequence.instructions()) {
if (IsParameter(inst) || IsConstant(inst)) {
new_sequence.push_back(inst);
for (HloInstruction* control_predecessor : inst->control_predecessors()) {
for (HloInstruction* user : inst->users()) {
TF_RETURN_IF_ERROR(control_predecessor->AddControlDependencyTo(user));
}
}
TF_RETURN_IF_ERROR(inst->DropAllControlDeps());
}
}
for (HloInstruction* inst : sequence.instructions()) {
if (!IsParameter(inst) && !IsConstant(inst)) {
new_sequence.push_back(inst);
}
}
schedule.set_sequence(computation, new_sequence);
for (auto [old_i, new_i] :
llvm::zip(sequence.instructions(), new_sequence.instructions())) {
if (old_i != new_i) return true;
}
return false;
}
absl::StatusOr<CommandBuffer> CommandBufferScheduling::PrepareCommandBuffer(
const HloInstructionSequence& seq, HloModule* module) {
auto builder = HloComputation::Builder("command_buffer");
absl::Span<HloInstruction* const> instructions =
absl::MakeSpan(seq.instructions());
absl::flat_hash_set<HloInstruction*> in_command_buffer(instructions.begin(),
instructions.end());
absl::flat_hash_map<HloInstruction*, HloParameterInstruction*> parameters;
absl::flat_hash_map<HloInstruction*, HloInstruction*> inst_mapping;
auto mapped_operands = [&](HloInstruction* instr) {
absl::InlinedVector<HloInstruction*, 4> operands;
for (HloInstruction* operand : instr->operands()) {
if (auto it = inst_mapping.find(operand); it != inst_mapping.end())
operands.push_back(it->second);
}
return operands;
};
for (HloInstruction* inst : instructions) {
for (HloInstruction* operand : inst->operands()) {
if (parameters.contains(operand)) continue;
if (in_command_buffer.contains(operand)) continue;
int64_t parameter_id = parameters.size();
auto* parameter = Cast<HloParameterInstruction>(
builder.AddInstruction(HloInstruction::CreateParameter(
parameter_id, operand->shape(), "p")));
parameter->UniquifyName(module);
parameter->UniquifyId(module);
inst_mapping[operand] = parameters[operand] = parameter;
}
}
for (HloInstruction* inst : seq.instructions()) {
HloCloneContext ctx(inst->GetModule());
for (HloComputation* called_computation : inst->called_computations()) {
if (called_computation->IsAsyncComputation()) {
called_computation->RemoveAsyncStart();
}
ctx.MapComputation(called_computation, called_computation);
}
inst_mapping[inst] = builder.AddInstruction(
inst->CloneWithNewOperands(inst->shape(), mapped_operands(inst), &ctx));
inst_mapping[inst]->UniquifyId(module);
}
std::vector<HloInstruction*> arguments(parameters.size());
for (auto& [argument, parameter] : parameters) {
arguments[parameter->parameter_number()] = argument;
}
std::vector<HloInstruction*> results;
std::vector<HloInstruction*> returned;
auto has_external_users = [&](HloInstruction* inst) {
return inst->IsRoot() || absl::c_any_of(inst->users(), [&](auto* user) {
return !in_command_buffer.contains(user);
});
};
for (HloInstruction* inst : instructions) {
if (has_external_users(inst)) {
results.push_back(inst);
returned.push_back(inst_mapping[inst]);
}
}
if (returned.size() > 1) {
HloInstruction* inst =
builder.AddInstruction(HloInstruction::CreateTuple(returned));
inst->UniquifyName(module);
inst->UniquifyId(module);
}
std::unique_ptr<HloComputation> comp = builder.Build();
comp->UniquifyName(module);
comp->SetUniqueId(comp->root_instruction()->unique_id());
return CommandBuffer{std::move(arguments), std::move(results),
std::move(comp), std::move(inst_mapping)};
}
absl::StatusOr<HloComputation*> CommandBufferScheduling::RewriteCommandBuffer(
HloComputation* parent, const HloInstructionSequence& seq,
CommandBuffer command_buffer) {
if (command_buffer.results.empty())
return absl::InternalError("command buffer results must not be empty");
Shape cmd_buffer_result_shape;
bool has_single_result = command_buffer.results.size() == 1;
if (has_single_result) {
cmd_buffer_result_shape = command_buffer.results[0]->shape();
} else {
absl::InlinedVector<Shape, 4> shapes;
shapes.reserve(command_buffer.results.size());
for (auto* res : command_buffer.results) shapes.push_back(res->shape());
cmd_buffer_result_shape = ShapeUtil::MakeTupleShape(shapes);
}
HloComputation* computation =
parent->parent()->AddComputation(std::move(command_buffer.computation),
false);
HloInstruction* call = parent->AddInstruction(HloInstruction::CreateCall(
cmd_buffer_result_shape, command_buffer.arguments, computation));
if (has_single_result) {
TF_RETURN_IF_ERROR(command_buffer.results[0]->ReplaceAllUsesWith(call));
} else {
for (int i = 0; i < command_buffer.results.size(); i++) {
TF_RETURN_IF_ERROR(
command_buffer.results[i]->ReplaceAllUsesWith(parent->AddInstruction(
HloInstruction::CreateGetTupleElement(call, i))));
}
}
HloSchedule& schedule = parent->parent()->schedule();
HloInstructionSequence& sequence = schedule.GetOrCreateSequence(parent);
sequence.replace_instruction(seq.instructions().back(), call);
HloInstructionSequence cmd_buffer_schedule;
for (auto* argument : command_buffer.arguments) {
cmd_buffer_schedule.push_back(command_buffer.inst_mapping[argument]);
}
for (auto* inst : seq.instructions()) {
cmd_buffer_schedule.push_back(command_buffer.inst_mapping[inst]);
}
if (!has_single_result) {
cmd_buffer_schedule.push_back(computation->root_instruction());
}
schedule.set_sequence(computation, cmd_buffer_schedule);
auto& inst_mapping = command_buffer.inst_mapping;
for (HloInstruction* inst : seq.instructions()) {
HloInstruction* cmd_inst = inst_mapping[inst];
for (HloInstruction* predecessor : inst->control_predecessors()) {
if (auto it = inst_mapping.find(predecessor); it != inst_mapping.end()) {
HloInstruction* cmd_predecessor = it->second;
if (IsParameter(cmd_predecessor)) {
TF_RETURN_IF_ERROR(predecessor->AddControlDependencyTo(call));
} else {
TF_RETURN_IF_ERROR(cmd_predecessor->AddControlDependencyTo(cmd_inst));
}
} else {
TF_RETURN_IF_ERROR(predecessor->AddControlDependencyTo(call));
}
}
for (HloInstruction* successor : inst->control_successors()) {
if (auto it = inst_mapping.find(successor); it != inst_mapping.end()) {
HloInstruction* cmd_successor = it->second;
TF_RETURN_IF_ERROR(cmd_inst->AddControlDependencyTo(cmd_successor));
} else {
TF_RETURN_IF_ERROR(call->AddControlDependencyTo(successor));
}
}
TF_RETURN_IF_ERROR(inst->DropAllControlDeps());
}
for (int32_t i = seq.instructions().size() - 1; i >= 0; i--) {
TF_RETURN_IF_ERROR(parent->RemoveInstruction(seq.instructions()[i]));
}
return computation;
}
CommandBufferScheduling::CommandBufferScheduling(
const se::DeviceDescription& device_description)
: device_description_(device_description) {}
absl::StatusOr<bool> CommandBufferScheduling::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
if (!module->has_schedule()) return Internal("module is not scheduled");
const DebugOptions& debug_options = module->config().debug_options();
absl::flat_hash_set<DebugOptions::CommandBufferCmdType> commands;
for (auto cmd_type : debug_options.xla_gpu_enable_command_buffer()) {
commands.insert(static_cast<DebugOptions::CommandBufferCmdType>(cmd_type));
}
absl::flat_hash_set<std::string> legacy_custom_call_targets;
for (const auto& target :
debug_options.legacy_command_buffer_custom_call_targets()) {
legacy_custom_call_targets.insert(target);
}
CommandBufferConfig config{std::move(commands),
std::move(legacy_custom_call_targets),
device_description_};
static constexpr auto kRequireConditionals = {DebugOptions::CONDITIONALS};
static constexpr auto kRequireTracing = {
DebugOptions::CUBLAS, DebugOptions::CUBLASLT, DebugOptions::CUDNN,
DebugOptions::CUSTOM_CALL, DebugOptions::COLLECTIVES};
auto erase = [&](absl::Span<const DebugOptions::CommandBufferCmdType> cmds) {
for (auto cmd : cmds) {
if (config.enabled_commands.erase(cmd)) {
VLOG(1) << "Removed command buffer support for "
<< DebugOptions::CommandBufferCmdType_Name(cmd)
<< " as it's not supported with gpu toolkit version "
<< device_description_.runtime_version()
<< " and driver version "
<< device_description_.driver_version()
<< ". This might negatively impact peformance. To enable "
<< DebugOptions::CommandBufferCmdType_Name(cmd)
<< " support in command buffers use cuda-compat package: "
#if defined(PLATFORM_GOOGLE)
<< "set CUDA_COMPAT_LOAD=1 env variable.";
#else
<< "https:
#endif
}
}
};
auto erase_cuda = [&](const se::CudaComputeCapability& cuda_comp) {
if (std::min(device_description_.runtime_version(),
device_description_.driver_version()) <
se::SemanticVersion{12, 3, 0}) {
erase(kRequireTracing);
erase(kRequireConditionals);
}
};
auto erase_rocm = [&](const se::RocmComputeCapability& rocm_comp) {
erase(kRequireConditionals);
};
std::visit(VariantVisitor{erase_cuda, erase_rocm},
device_description_.gpu_compute_capability());
auto order = module->MakeComputationPostOrder();
std::reverse(order.begin(), order.end());
absl::flat_hash_set<HloComputation*> processed_command_buffers;
auto changed = false;
for (HloComputation* comp : order) {
if (comp->IsFusionComputation() || comp->IsAsyncComputation() ||
comp->IsCustomCallComputation())
continue;
if (processed_command_buffers.contains(comp)) continue;
TF_ASSIGN_OR_RETURN(bool changed_, MoveParametersAndConstantsToFront(comp));
changed |= changed_;
std::vector<HloInstructionSequence> sequences =
CollectCommandBufferSequences(
module->schedule().sequence(comp), config,
debug_options.xla_gpu_graph_min_graph_size());
for (const HloInstructionSequence& seq : sequences) {
TF_ASSIGN_OR_RETURN(CommandBuffer command_buffer,
PrepareCommandBuffer(seq, comp->parent()));
TF_ASSIGN_OR_RETURN(
HloComputation * command_buffer_computation,
RewriteCommandBuffer(comp, seq, std::move(command_buffer)));
changed = true;
for (HloComputation* called :
command_buffer_computation->MakeEmbeddedComputationsList()) {
processed_command_buffers.insert(called);
}
}
}
TF_RETURN_IF_ERROR(module->schedule().Update());
return changed;
}
} | #include "xla/service/gpu/transforms/command_buffer_scheduling.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/gpu_executable.h"
#include "xla/service/hlo_parser.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
class CommandBufferSchedulingTest : public HloTestBase {
public:
se::DeviceDescription device_desc() {
return TestGpuDeviceInfo::CudaOrRocmDeviceInfo();
}
DebugOptions GetDebugOptionsForTest() override {
auto debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.add_xla_gpu_enable_command_buffer(DebugOptions::FUSION);
debug_options.add_xla_gpu_enable_command_buffer(DebugOptions::CONDITIONALS);
debug_options.add_xla_gpu_enable_command_buffer(DebugOptions::COLLECTIVES);
debug_options.add_xla_gpu_enable_command_buffer(DebugOptions::CUDNN);
debug_options.add_xla_gpu_enable_command_buffer(DebugOptions::CUBLASLT);
debug_options.add_xla_gpu_enable_command_buffer(DebugOptions::CUSTOM_CALL);
debug_options.set_xla_gpu_graph_min_graph_size(2);
return debug_options;
}
};
using CommandBuffer = CommandBufferScheduling::CommandBuffer;
TEST_F(CommandBufferSchedulingTest, SingleCommandBuffer) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%fused_computation (param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.1 (param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
ENTRY %main (a: s32[], b: s32[]) -> s32[] {
%a = s32[] parameter(0)
%b = s32[] parameter(1)
%fusion = s32[] fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation
%fusion.1 = s32[] fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation.1
ROOT %custom-call = s32[] custom-call(s32[] %fusion, s32[] %fusion.1), custom_call_target="some target"
})";
const char* expected = R"(
RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, MultipleCommandBuffers) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%fused_computation(param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.1(param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.2(param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.3(param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
ENTRY %main (a: s32[], b: s32[], c: (s32[], s32[])) -> s32[] {
%a = s32[] parameter(0)
%b = s32[] parameter(1)
%c = (s32[], s32[]) parameter(2)
%fusion = s32[] fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation
%d = s32[] get-tuple-element((s32[], s32[]) %c), index=0
%fusion.1 = s32[] fusion(s32[] %fusion, s32[] %d), kind=kLoop, calls=%fused_computation.1
%e = s32[] get-tuple-element((s32[], s32[]) %c), index=1
%custom-call = s32[] custom-call(s32[] %fusion.1, s32[] %e), custom_call_target="some target"
%fusion.2 = s32[] fusion(s32[] %custom-call, s32[] %a), kind=kLoop, calls=%fused_computation.2
%fusion.3 = s32[] fusion(s32[] %custom-call, s32[] %fusion.2), kind=kLoop, calls=%fused_computation.3
ROOT %custom-call.1 = s32[] custom-call(s32[] %fusion.3), custom_call_target="some target"
})";
const char* expected = R"(
RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, AllReduceStartFollowedByDone) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%add (p0: s32[4], p1: s32[4]) -> s32[4] {
%p0 = s32[4] parameter(0)
%p1 = s32[4] parameter(1)
ROOT %add = s32[4] add(s32[4] %p0, s32[4] %p1)
}
ENTRY %main (a: s32[4]) -> s32[4] {
%a = s32[4] parameter(0)
%start = s32[4]{0} all-reduce-start(s32[4]{0} %a),
replica_groups={{0,1}}, to_apply=%add,
backend_config={"collective_backend_config": {"is_sync":true,"no_parallel_custom_call":false}}
ROOT %done = s32[4]{0} all-reduce-done(s32[4]{0} %start)
})";
const char* expected = R"(
CHECK: %command_buffer ([[P0:.+]]: s32[4]) -> s32[4] {
CHECK: %[[P0]] = s32[4]{0} parameter(0)
CHECK: %[[START:.+]] = s32[4]{0} all-reduce-start(%[[P0]])
CHECK: ROOT %[[DONE:.+]] = s32[4]{0} all-reduce-done(%[[START]])
CHECK: }
CHECK: ENTRY %main (a: s32[4]) -> s32[4] {
CHECK: %[[A:.+]] = s32[4]{0} parameter(0)
CHECK: ROOT %[[CALL:.+]] = s32[4]{0} call(%[[A]]),
CHECK: to_apply=%command_buffer
CHECK: })";
RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, AllGatherStartFollowedByDone) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
ENTRY %main (a: s32[2]) -> s32[4] {
%a = s32[2] parameter(0)
%start = (s32[2]{0}, s32[4]{0}) all-gather-start(%a),
channel_id=555, replica_groups={{0,1}}, dimensions={0},
backend_config={"collective_backend_config": {"is_sync":true,"no_parallel_custom_call":false}}
ROOT %done = s32[4]{0} all-gather-done(%start)
})";
const char* expected = R"(
CHECK: %command_buffer ([[P0:.+]]: s32[2]) -> s32[4] {
CHECK: %[[P0]] = s32[2]{0} parameter(0)
CHECK: %[[START:.+]] = {{.*}} all-gather-start(%[[P0]])
CHECK: ROOT %[[DONE:.+]] = s32[4]{0} all-gather-done(%[[START]])
CHECK: }
CHECK: ENTRY %main (a: s32[2]) -> s32[4] {
CHECK: %[[A:.+]] = s32[2]{0} parameter(0)
CHECK: ROOT %[[CALL:.+]] = s32[4]{0} call(%[[A]]),
CHECK: to_apply=%command_buffer
CHECK: })";
RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, ReduceScatterStartFollowedByDone) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%add (p0: s32[], p1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
ENTRY %main (a: s32[4]) -> s32[2] {
%a = s32[4] parameter(0)
%start = ((s32[4]{0}), s32[2]{0}) reduce-scatter-start(%a),
channel_id=555, replica_groups={{0,1}}, dimensions={0}, to_apply=add,
backend_config={"collective_backend_config": {"is_sync":true,"no_parallel_custom_call":false}}
ROOT %done = s32[2]{0} reduce-scatter-done(%start)
})";
const char* expected = R"(
CHECK: %command_buffer ([[P0:.+]]: s32[4]) -> s32[2] {
CHECK: %[[P0]] = s32[4]{0} parameter(0)
CHECK: %[[START:.+]] = {{.*}} reduce-scatter-start(%[[P0]])
CHECK: ROOT %[[DONE:.+]] = s32[2]{0} reduce-scatter-done(%[[START]])
CHECK: }
CHECK: ENTRY %main (a: s32[4]) -> s32[2] {
CHECK: %[[A:.+]] = s32[4]{0} parameter(0)
CHECK: ROOT %[[CALL:.+]] = s32[2]{0} call(%[[A]]),
CHECK: to_apply=%command_buffer
CHECK: })";
RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, AllReduceStartFollowedByBitcast) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%add (p0: s32[4], p1: s32[4]) -> s32[4] {
%p0 = s32[4] parameter(0)
%p1 = s32[4] parameter(1)
ROOT %add = s32[4] add(s32[4] %p0, s32[4] %p1)
}
ENTRY %main (a: s32[4]) -> s32[4] {
%a = s32[4] parameter(0)
%start = s32[4]{0} all-reduce-start(s32[4]{0} %a),
replica_groups={{0,1}}, to_apply=%add,
backend_config={"collective_backend_config": {"is_sync":true,"no_parallel_custom_call":false}}
%bitcast = s32[4] bitcast(s32[4]{0} %a)
ROOT %done = s32[4]{0} all-reduce-done(s32[4]{0} %start)
})";
const char* expected = R"(
CHECK: %command_buffer ([[P0:.+]]: s32[4]) -> s32[4] {
CHECK: %[[P0]] = s32[4]{0} parameter(0)
CHECK: %[[START:.+]] = s32[4]{0} all-reduce-start(%[[P0]])
CHECK: %[[BITCAST:.+]] = s32[4]{0} bitcast(%[[P0]])
CHECK: ROOT %[[DONE:.+]] = s32[4]{0} all-reduce-done(%[[START]])
CHECK: }
CHECK: ENTRY %main (a: s32[4]) -> s32[4] {
CHECK: %[[A:.+]] = s32[4]{0} parameter(0)
CHECK: ROOT %[[CALL:.+]] = s32[4]{0} call(%[[A]]),
CHECK: to_apply=%command_buffer
CHECK: })";
RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, AllReduceStartFollowedAllReduceStart) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%add (p0: s32[4], p1: s32[4]) -> s32[4] {
%p0 = s32[4] parameter(0)
%p1 = s32[4] parameter(1)
ROOT %add = s32[4] add(s32[4] %p0, s32[4] %p1)
}
ENTRY %main (a: s32[4]) -> s32[4] {
%a = s32[4] parameter(0)
%start1 = s32[4]{0} all-reduce-start(s32[4]{0} %a),
replica_groups={{0,1}}, to_apply=%add,
backend_config={"collective_backend_config": {"is_sync":true,"no_parallel_custom_call":false}}
%start2 = s32[4]{0} all-reduce-start(s32[4]{0} %a),
replica_groups={{0,1}}, to_apply=%add,
backend_config={"collective_backend_config": {"is_sync":true,"no_parallel_custom_call":false}}
%done1 = s32[4]{0} all-reduce-done(s32[4]{0} %start1)
ROOT %done2 = s32[4]{0} all-reduce-done(s32[4]{0} %start2)
})";
const char* expected = R"(
CHECK: %command_buffer ([[P0:.+]]: s32[4]) -> s32[4] {
CHECK: %[[P0]] = s32[4]{0} parameter(0)
CHECK: %[[START1:.+]] = s32[4]{0} all-reduce-start(%[[P0]])
CHECK: %[[START2:.+]] = s32[4]{0} all-reduce-start(%[[P0]])
CHECK: %[[DONE1:.+]] = s32[4]{0} all-reduce-done(%[[START1]])
CHECK: ROOT %[[DONE2:.+]] = s32[4]{0} all-reduce-done(%[[START2]])
CHECK: }
CHECK: ENTRY %main (a: s32[4]) -> s32[4] {
CHECK: %[[A:.+]] = s32[4]{0} parameter(0)
CHECK: ROOT %[[CALL:.+]] = s32[4]{0} call(%[[A]]),
CHECK: to_apply=%command_buffer
CHECK: })";
RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, DoNotCaptureUnmatchedAsyncDone) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%fused_computation(param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.1(param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%add (p0: s32[4], p1: s32[4]) -> s32[4] {
%p0 = s32[4] parameter(0)
%p1 = s32[4] parameter(1)
ROOT %add = s32[4] add(s32[4] %p0, s32[4] %p1)
}
ENTRY %main (a: s32[4], b:s32[]) -> s32[] {
%a = s32[4] parameter(0)
%b = s32[] parameter(1)
%start1 = s32[4]{0} all-reduce-start(s32[4]{0} %a),
replica_groups={{0,1}}, to_apply=%add,
backend_config={"collective_backend_config": {"is_sync":true,"no_parallel_custom_call":false}}
%c = s32[] custom-call(), custom_call_target="target"
%start2 = s32[4]{0} all-reduce-start(s32[4]{0} %a),
replica_groups={{0,1}}, to_apply=%add,
backend_config={"collective_backend_config": {"is_sync":true,"no_parallel_custom_call":false}}
%done1 = s32[4]{0} all-reduce-done(s32[4]{0} %start1)
%done2 = s32[4]{0} all-reduce-done(s32[4]{0} %start2)
%fusion = s32[] fusion(s32[] %b, s32[] %c), kind=kLoop, calls=%fused_computation
ROOT %fusion.1 = s32[] fusion(s32[] %b, s32[] %c), kind=kLoop, calls=%fused_computation.1
})";
const char* expected = R"(
CHECK: %command_buffer ([[P0:.+]]: s32[], [[P1:.+]]: s32[]) -> s32[] {
CHECK: %[[P0]] = s32[] parameter(0)
CHECK: %[[P1]] = s32[] parameter(1)
CHECK: %fusion = s32[] fusion(%[[P0]], %[[P1]]), kind=kLoop, calls=%fused_computation
CHECK: ROOT %fusion.1 = s32[] fusion(%[[P0]], %[[P1]]), kind=kLoop, calls=%fused_computation.1
CHECK: }
CHECK: ENTRY %main (a: s32[4], b: s32[]) -> s32[] {
CHECK: %[[A:.+]] = s32[4]{0} parameter(0)
CHECK: %[[B:.+]] = s32[] parameter(1)
CHECK: %[[START1:.+]] = s32[4]{0} all-reduce-start(%[[A]])
CHECK: %[[C:.+]] = s32[] custom-call()
CHECK: %[[START2:.+]] = s32[4]{0} all-reduce-start(%[[A]])
CHECK: %[[DONE1:.+]] = s32[4]{0} all-reduce-done(%[[START1]])
CHECK: %[[DONE2:.+]] = s32[4]{0} all-reduce-done(%[[START2]])
CHECK: %call = s32[] call(%b, %c), to_apply=%command_buffer
CHECK: })";
RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, CollectCommandBufferSequence) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%fused_computation(param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.1(param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.2(param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.3(param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
ENTRY %main (a: s32[], b: s32[], c: (s32[], s32[])) -> s32[] {
%a = s32[] parameter(0)
%b = s32[] parameter(1)
%c = (s32[], s32[]) parameter(2)
%fusion = s32[] fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation
%d = s32[] get-tuple-element((s32[], s32[]) %c), index=0
%fusion.1 = s32[] fusion(s32[] %fusion, s32[] %d), kind=kLoop, calls=%fused_computation.1
%e = s32[] get-tuple-element((s32[], s32[]) %c), index=1
%custom-call = s32[] custom-call(s32[] %fusion.1, s32[] %e), custom_call_target="some target"
%fusion.2 = s32[] fusion(s32[] %custom-call, s32[] %a), kind=kLoop, calls=%fused_computation.2
ROOT %fusion.3 = s32[] fusion(s32[] %custom-call, s32[] %fusion.2), kind=kLoop, calls=%fused_computation.3
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstructionSequence seq;
for (HloInstruction* x : module->entry_computation()->instructions()) {
seq.push_back(x);
}
EXPECT_EQ(seq.size(), 10);
CommandBufferScheduling::CommandBufferConfig config{
{DebugOptions::FUSION}, {}, device_desc()};
std::vector<HloInstructionSequence> command_buffer_sequences =
CommandBufferScheduling::CollectCommandBufferSequences(seq, config);
EXPECT_EQ(command_buffer_sequences.size(), 2);
std::vector<HloInstruction*> seq_0 =
command_buffer_sequences[0].instructions();
EXPECT_EQ(seq_0.size(), 3);
EXPECT_EQ(seq_0[0]->opcode(), HloOpcode::kFusion);
EXPECT_EQ(seq_0[1]->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(seq_0[2]->opcode(), HloOpcode::kFusion);
std::vector<HloInstruction*> seq_1 =
command_buffer_sequences[1].instructions();
EXPECT_EQ(seq_1.size(), 2);
EXPECT_EQ(seq_1[0]->opcode(), HloOpcode::kFusion);
EXPECT_EQ(seq_1[1]->opcode(), HloOpcode::kFusion);
}
TEST_F(CommandBufferSchedulingTest, MoveParametersToFront) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%fused_computation (param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.1 (param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
ENTRY %main (a: s32[], b: s32[], c: s32[]) -> s32[] {
%a = s32[] parameter(0)
%b = s32[] parameter(1)
%fusion = s32[] fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation
%c = s32[] parameter(2)
ROOT %fusion.1 = s32[] fusion(s32[] %a, s32[] %c), kind=kLoop, calls=%fused_computation.1
})";
const char* expected = R"(
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo));
TF_ASSERT_OK(CommandBufferScheduling::MoveParametersAndConstantsToFront(
module->entry_computation()));
TF_ASSERT_OK_AND_ASSIGN(
bool filecheck_matches,
RunFileCheck(
module->ToString(HloPrintOptions{}.set_print_operand_shape(false)),
expected));
EXPECT_TRUE(filecheck_matches);
}
TEST_F(CommandBufferSchedulingTest, PrepareCommandBuffer) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%fused_computation(param_0: s32[], param_1: s32[]) -> (s32[], s32[]) {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %tuple.1 = (s32[], s32[]) tuple(s32[] %p0, s32[] %p1)
}
%fused_computation.1(param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
ENTRY %main (a: s32[], b: s32[]) -> s32[] {
%a = s32[] parameter(0)
%b = s32[] custom-call(), custom_call_target="target"
%fusion = (s32[], s32[]) fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation
%d = s32[] get-tuple-element((s32[], s32[]) %fusion), index=0
%fusion.1 = s32[] fusion(s32[] %a, s32[] %d), kind=kLoop, calls=%fused_computation.1
ROOT %custom-call = s32[] custom-call(s32[] %fusion.1, s32[] %d), custom_call_target="some target"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(hlo));
EXPECT_EQ(module->entry_computation()->instruction_count(), 6);
std::vector<HloInstruction*> instructions;
HloInstructionSequence seq;
for (HloInstruction* inst : module->entry_computation()->instructions()) {
if (inst->opcode() == HloOpcode::kFusion ||
inst->opcode() == HloOpcode::kGetTupleElement) {
seq.push_back(inst);
}
instructions.push_back(inst);
}
TF_ASSERT_OK_AND_ASSIGN(
CommandBuffer command_buffer,
CommandBufferScheduling::PrepareCommandBuffer(seq, module.get()));
HloComputation* computation = module->AddComputation(
std::move(command_buffer.computation), false);
const char* expected = R"(
TF_ASSERT_OK_AND_ASSIGN(
bool filecheck_matches,
RunFileCheck(computation->ToString(
HloPrintOptions{}.set_print_operand_shape(false)),
expected));
EXPECT_TRUE(filecheck_matches);
auto& arguments = command_buffer.arguments;
ASSERT_EQ(arguments.size(), 2);
EXPECT_EQ(arguments[0], instructions[0]);
EXPECT_EQ(arguments[1], instructions[1]);
auto& results = command_buffer.results;
ASSERT_EQ(results.size(), 2);
EXPECT_EQ(results[0], instructions[3]);
EXPECT_EQ(results[1], instructions[4]);
}
TEST_F(CommandBufferSchedulingTest, ForwardControlDependencies) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%fused_computation (param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.1 (param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.2 (param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
ENTRY %main (a: s32[], b: s32[]) -> s32[] {
%a = s32[] parameter(0)
%b = s32[] parameter(1)
%custom-call = s32[] custom-call(), custom_call_target="some target"
%fusion = s32[] fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation, control-predecessors={%custom-call}
%fusion.1 = s32[] fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation.1, control-predecessors={%fusion}
%custom-call.1 = s32[] custom-call(), custom_call_target="some target"
%fusion.2 = s32[] fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation.2, control-predecessors={%fusion.1}
ROOT %custom-call.2 = s32[] custom-call(s32[] %fusion.1, s32[] %fusion.2), custom_call_target="some target"
})";
const char* expected = R"(
CHECK: %command_buffer ([[P0:.+]]: s32[], [[P1:.+]]: s32[]) -> s32[] {
CHECK: %[[P0]] = s32[] parameter(0)
CHECK: %[[P1]] = s32[] parameter(1)
CHECK: %[[F0:.+]] = s32[] fusion(%[[P0]], %[[P1]])
CHECK: ROOT {{.*}} = s32[] fusion(%[[P0]], %[[P1]]), {{.*}} control-predecessors={%[[F0]]}
CHECK: }
CHECK: ENTRY %main (a: s32[], b: s32[]) -> s32[] {
CHECK: %a = s32[] parameter(0)
CHECK: %b = s32[] parameter(1)
CHECK: %custom-call = s32[] custom-call(), custom_call_target="some target"
CHECK: %call = s32[] call(%a, %b), to_apply=%command_buffer, control-predecessors={%custom-call}
CHECK: %custom-call.1 = s32[] custom-call(), custom_call_target="some target"
CHECK: %[[F3:.+]] = s32[] fusion(%a, %b), kind=kLoop, calls=%fused_computation.2, control-predecessors={%call}
CHECK: ROOT %custom-call.2 = s32[] custom-call(%call, %[[F3]]), custom_call_target="some target"
CHECK: })";
RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, ForwardControlDependenciesToParams) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%fused_computation.0 (p0: s32[], p1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.1 (p0: s32[], p1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
ENTRY %main (a: s32[], b: s32[]) -> s32[] {
%a = s32[] parameter(0)
%b = s32[] parameter(1)
%custom-call = s32[] custom-call(), custom_call_target="some target"
%fusion = s32[] fusion(s32[] %custom-call, s32[] %a), kind=kLoop, calls=%fused_computation.0, control-predecessors={%custom-call}
ROOT %fusion.1 = s32[] fusion(s32[] %fusion, s32[] %b), kind=kLoop, calls=%fused_computation.1
})";
const char* expected = R"(
CHECK: ENTRY %main (a: s32[], b: s32[]) -> s32[] {
CHECK: %a = s32[] parameter(0)
CHECK: %b = s32[] parameter(1)
CHECK: %[[CUSTOM_CALL:.+]] = s32[] custom-call(), custom_call_target="some target"
CHECK: ROOT {{.*}} call(%[[CUSTOM_CALL]], %a, %b), to_apply=%command_buffer, control-predecessors={%[[CUSTOM_CALL]]}
CHECK: })";
RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, WhileNotCommand) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%fused_computation (param_0: f32[1]) -> f32[1] {
%param_0 = f32[1]{0} parameter(0)
ROOT %copy.5 = f32[1]{0} copy(f32[1]{0} %param_0)
}
%fused_computation.1 (param_0.1: f32[1], param_1: f32[1]) -> f32[1] {
%param_0.1 = f32[1]{0} parameter(0)
%param_1 = f32[1]{0} parameter(1)
ROOT %add.2 = f32[1]{0} add(f32[1]{0} %param_0.1, f32[1]{0} %param_1)
}
%fused_computation.2 (param_0.2: f32[1], param_1.1: f32[1]) -> pred[1] {
%param_0.2 = f32[1]{0} parameter(0)
%param_1.1 = f32[1]{0} parameter(1)
ROOT %compare.3 = pred[1]{0} compare(f32[1]{0} %param_0.2, f32[1]{0} %param_1.1), direction=LT
}
%fused_computation.3 (param_0.1: f32[1], param_1: f32[1]) -> f32[1] {
%param_0.1 = f32[1]{0} parameter(0)
%param_1 = f32[1]{0} parameter(1)
ROOT %add.2 = f32[1]{0} add(f32[1]{0} %param_0.1, f32[1]{0} %param_1)
}
%body (Arg_.3: f32[1]) -> f32[1] {
%constant_4 = f32[1]{0} constant({1})
%Arg_.3 = f32[1]{0} parameter(0)
%custom-call = s32[] custom-call(), custom_call_target="some target"
%add = f32[1]{0} fusion(f32[1]{0} %Arg_.3, f32[1]{0} %constant_4), kind=kLoop, calls=%fused_computation.1, control-predecessors={%custom-call}
ROOT %wrapped_add.1 = f32[1]{0} fusion(f32[1]{0} %add, f32[1]{0} %constant_4), kind=kLoop, calls=%fused_computation.3, control-predecessors={%custom-call}
}
%cond (Arg_.11: f32[1]) -> pred[] {
%constant = f32[1]{0} constant({100})
%Arg_.11 = f32[1]{0} parameter(0)
%wrapped_compare.2 = pred[1]{0} fusion(f32[1]{0} %Arg_.11, f32[1]{0} %constant), kind=kLoop, calls=%fused_computation.2
ROOT %bitcast = pred[] bitcast(pred[1]{0} %wrapped_compare.2)
}
ENTRY %main.18 (Arg_0.1: f32[1]) -> f32[] {
%Arg_0.1 = f32[1]{0} parameter(0), sharding={replicated}
%wrapped_copy.4 = f32[1]{0} fusion(f32[1]{0} %Arg_0.1), kind=kLoop, calls=%fused_computation
%while.16 = f32[1]{0} while(f32[1]{0} %wrapped_copy.4), condition=%cond, body=%body
ROOT %bitcast.1 = f32[] bitcast(f32[1]{0} %while.16)
})";
const char* expected = R"(
CHECK: %command_buffer ([[P0:.+]]: f32[1], [[P1:.+]]: f32[1]) -> f32[1] {
CHECK: %[[P0]] = f32[1]{0} parameter(0)
CHECK: %[[P1]] = f32[1]{0} parameter(1)
CHECK: %[[ADD:.*]] = f32[1]{0} fusion(%[[P0]], %[[P1]]), kind=kLoop
CHECK: ROOT {{.*}} = f32[1]{0} fusion(%[[ADD]], %[[P1]]), kind=kLoop
CHECK: }
CHECK: %[[BODY:[a-z_0-9.]+]] ([[P0:.+]]: f32[1]) -> f32[1] {
CHECK: %[[C1:.*]] = f32[1]{0} constant({1})
CHECK: %[[P0]] = f32[1]{0} parameter(0)
CHECK: %[[CC:.*]] = s32[] custom-call(), custom_call_target="some target"
CHECK: ROOT %call = f32[1]{0} call(%[[P0]], %[[C1]]), to_apply=%command_buffer, control-predecessors={%[[CC]]}
CHECK: }
CHECK: ENTRY %[[MAIN:.+]] ([[ARG0:.+]]: f32[1]) -> f32[] {
CHECK: %[[ARG0]] = f32[1]{0} parameter(0)
CHECK: %[[COPY:.*]] = f32[1]{0} fusion(%[[ARG0]]), kind=kLoop
CHECK: %[[WHILE:.*]] = f32[1]{0} while(%[[COPY]]), condition=%[[COND:[a-z_0-9.]+]], body=%[[BODY]]
CHECK: ROOT %[[BC:.+]] = f32[] bitcast(%[[WHILE]])
CHECK: })";
RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, While) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%fused_computation (param_0: f32[1]) -> f32[1] {
%param_0 = f32[1]{0} parameter(0)
ROOT %copy.5 = f32[1]{0} copy(f32[1]{0} %param_0)
}
%fused_computation.1 (param_0.1: f32[1], param_1: f32[1]) -> f32[1] {
%param_0.1 = f32[1]{0} parameter(0)
%param_1 = f32[1]{0} parameter(1)
ROOT %add.2 = f32[1]{0} add(f32[1]{0} %param_0.1, f32[1]{0} %param_1)
}
%fused_computation.2 (param_0.2: f32[1], param_1.1: f32[1]) -> pred[1] {
%param_0.2 = f32[1]{0} parameter(0)
%param_1.1 = f32[1]{0} parameter(1)
ROOT %compare.3 = pred[1]{0} compare(f32[1]{0} %param_0.2, f32[1]{0} %param_1.1), direction=LT
}
%body (Arg_.3: f32[1]) -> f32[1] {
%constant_4 = f32[1]{0} constant({1})
%Arg_.3 = f32[1]{0} parameter(0)
ROOT %wrapped_add.1 = f32[1]{0} fusion(f32[1]{0} %Arg_.3, f32[1]{0} %constant_4), kind=kLoop, calls=%fused_computation.1
}
%cond (Arg_.11: f32[1]) -> pred[] {
%constant = f32[1]{0} constant({100})
%Arg_.11 = f32[1]{0} parameter(0)
%wrapped_compare.2 = pred[1]{0} fusion(f32[1]{0} %Arg_.11, f32[1]{0} %constant), kind=kLoop, calls=%fused_computation.2
ROOT %bitcast = pred[] bitcast(pred[1]{0} %wrapped_compare.2)
}
ENTRY %main.18 (Arg_0.1: f32[1]) -> f32[] {
%Arg_0.1 = f32[1]{0} parameter(0), sharding={replicated}
%wrapped_copy.4 = f32[1]{0} fusion(f32[1]{0} %Arg_0.1), kind=kLoop, calls=%fused_computation
%while.16 = f32[1]{0} while(f32[1]{0} %wrapped_copy.4), condition=%cond, body=%body
ROOT %bitcast.1 = f32[] bitcast(f32[1]{0} %while.16)
})";
const char* expected = R"(
CHECK: %command_buffer ([[P0:.+]]: f32[1]) -> f32[1] {
CHECK: %[[P0]] = f32[1]{0} parameter(0)
CHECK: %[[COPY:.*]] = f32[1]{0} fusion(%[[P0]]), kind=kLoop
CHECK: ROOT {{.*}} = f32[1]{0} while(%[[COPY]]), condition=%[[COND:[a-z_0-9.]+]], body=%[[BODY:[a-z_0-9.]+]]
CHECK: }
CHECK: ENTRY %[[MAIN:.+]] ([[ARG0:.+]]: f32[1]) -> f32[] {
CHECK: %[[ARG0]] = f32[1]{0} parameter(0)
CHECK: %call = f32[1]{0} call(%[[ARG0]]), to_apply=%command_buffer
CHECK: ROOT %[[BC:.+]] = f32[] bitcast(%call)
CHECK: })";
RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, Conditional) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%fused_computation.1 (param_0.2: s32[5]) -> s32[5] {
%param_0.2 = s32[5]{0} parameter(0)
ROOT %negate.2 = s32[5]{0} negate(s32[5]{0} %param_0.2)
}
%region_0.7 (Arg_.8: s32[5]) -> (s32[5]) {
%Arg_.8 = s32[5]{0} parameter(0)
%wrapped_negate.1 = s32[5]{0} fusion(s32[5]{0} %Arg_.8), kind=kLoop, calls=%fused_computation.1
ROOT %tuple.3 = (s32[5]{0}) tuple(s32[5]{0} %wrapped_negate.1)
}
%fused_computation.2 (param_0.3: s32[5]) -> s32[5] {
%param_0.3 = s32[5]{0} parameter(0)
ROOT %not.2 = s32[5]{0} not(s32[5]{0} %param_0.3)
}
%region_1.10 (Arg_.11: s32[5]) -> (s32[5]) {
%Arg_.11 = s32[5]{0} parameter(0)
%wrapped_not.1 = s32[5]{0} fusion(s32[5]{0} %Arg_.11), kind=kLoop, calls=%fused_computation.2
ROOT %tuple.4 = (s32[5]{0}) tuple(s32[5]{0} %wrapped_not.1)
}
%fused_computation.3 (param_0.4: s32[5]) -> s32[5] {
%param_0.4 = s32[5]{0} parameter(0)
ROOT %multiply.2 = s32[5]{0} multiply(s32[5]{0} %param_0.4, s32[5]{0} %param_0.4)
}
%region_2.13 (Arg_.14: s32[5]) -> (s32[5]) {
%Arg_.14 = s32[5]{0} parameter(0)
%wrapped_multiply.1 = s32[5]{0} fusion(s32[5]{0} %Arg_.14), kind=kLoop, calls=%fused_computation.3
ROOT %tuple.5 = (s32[5]{0}) tuple(s32[5]{0} %wrapped_multiply.1)
}
%fused_computation (param_0.1: s64[]) -> s32[] {
%constant_1 = s32[] constant(0)
%param_0.1 = s64[] parameter(0)
%convert.2 = s32[] convert(s64[] %param_0.1)
%constant_0 = s32[] constant(2)
ROOT %clamp.2 = s32[] clamp(s32[] %constant_1, s32[] %convert.2, s32[] %constant_0)
}
ENTRY %main.17 (Arg_0.1: s64[], Arg_1.2: s32[5]) -> s32[5] {
%Arg_0.1 = s64[] parameter(0), sharding={replicated}
%fusion = s32[] fusion(s64[] %Arg_0.1), kind=kLoop, calls=%fused_computation
%Arg_1.2 = s32[5]{0} parameter(1), sharding={replicated}
%conditional.16.clone = (s32[5]{0}) conditional(s32[] %fusion, s32[5]{0} %Arg_1.2, s32[5]{0} %Arg_1.2, s32[5]{0} %Arg_1.2), branch_computations={%region_0.7, %region_1.10, %region_2.13}
ROOT %get-tuple-element = s32[5]{0} get-tuple-element((s32[5]{0}) %conditional.16.clone), index=0
})";
const char* expected = R"(
CHECK: %command_buffer ([[P0:.+]]: s64[], [[P1:.+]]: s32[5]) -> (s32[5]) {
CHECK: %[[P0]] = s64[] parameter(0)
CHECK: %[[P1]] = s32[5]{0} parameter(1)
CHECK: %[[FUSION:.*]] = s32[] fusion(%[[P0]]), kind=kLoop
CHECK: ROOT {{.*}} = (s32[5]{0}) conditional(%[[FUSION]], %[[P1]], %[[P1]], %[[P1]]), branch_computations={%[[B1:[a-z_0-9.]+]], %[[B2:[a-z_0-9.]+]], %[[B3:[a-z_0-9.]+]]}
CHECK: }
CHECK: ENTRY %[[MAIN:.+]] ([[ARG0:.+]]: s64[], [[ARG1:.+]]: s32[5]) -> s32[5] {
CHECK: %[[ARG0]] = s64[] parameter(0)
CHECK: %[[ARG1]] = s32[5]{0} parameter(1)
CHECK: %call = (s32[5]{0}) call(%[[ARG0]], %[[ARG1]]), to_apply=%command_buffer
CHECK: ROOT %[[GEP:.+]] = s32[5]{0} get-tuple-element(%call)
CHECK: })";
RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, CuDnnFusionGraphCaptureWorks) {
const std::string kHloText = R"(
HloModule m, is_scheduled=true
fusion0 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
ROOT d = f32[64,64] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
fusion1 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
ROOT d = f32[64,64] dot(p0, p1),
lhs_contracting_dims={0}, rhs_contracting_dims={1}
}
fusion_a {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
ROOT a = f32[64,64] add(p0, p1)
}
ENTRY e {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
d0 = f32[64,64] fusion(p0, p1), kind=kCustom,
calls=fusion0,
backend_config={"fusion_backend_config": {"kind":"__cudnn$fusion"}}
a = f32[64,64] fusion(d0, d0), kind=kLoop, calls=fusion_a
ROOT d1 = f32[64,64] fusion(a, p1), kind=kCustom,
calls=fusion1,
backend_config={"fusion_backend_config": {"kind":"__cudnn$fusion"}}
})";
const std::string kExpected = R"(
; CHECK: ENTRY
; CHECK-NEXT: parameter
; CHECK-NEXT: parameter
; CHECK-NEXT: ROOT
; CHECK-SAME: call(
; CHECK-SAME: to_apply=%command_buffer
})";
RunAndFilecheckHloRewrite(kHloText, CommandBufferScheduling(device_desc()),
kExpected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, AsyncCustomCall) {
const char* hlo = R"(
HloModule m, is_scheduled=true
ENTRY %main (a: s32[], b: s32[]) -> f32[2,2] {
%p = f32[2,2]{1,0} parameter(0)
%start1 = ((f32[2,2], f32[2,2]), (f32[2,2], s8[4]), u32[]) custom-call-start(f32[2,2] %p, f32[2,2] %p), custom_call_target="__cublas$gemm"
%start2 = ((f32[2,2], f32[2,2]), (f32[2,2], s8[4]), u32[]) custom-call-start(f32[2,2] %p, f32[2,2] %p), custom_call_target="__cublas$gemm"
%done1 = (f32[2,2], s8[4]) custom-call-done(((f32[2,2], f32[2,2]), (f32[2,2], s8[4]), u32[]) %start1)
%done2 = (f32[2,2], s8[4]) custom-call-done(((f32[2,2], f32[2,2]), (f32[2,2], s8[4]), u32[]) %start2)
%result1 = f32[2,2] get-tuple-element((f32[2,2], s8[4]) %done1), index=0
%result2 = f32[2,2] get-tuple-element((f32[2,2], s8[4]) %done2), index=0
ROOT %sum = f32[2,2] add(f32[2,2] %result1, f32[2,2] %result2)
})";
const char* expected = R"(
CHECK: %command_buffer ([[P:.+]]: f32[2,2]) -> ((f32[2,2], s8[4]), (f32[2,2], s8[4])) {
CHECK: %[[P]] = f32[2,2]{1,0} parameter(0)
CHECK: %[[S1:.+]] = ((f32[2,2]{1,0}, f32[2,2]{1,0}), (f32[2,2]{1,0}, s8[4]{0}), u32[]) custom-call-start(%[[P]], %[[P]]), custom_call_target="__cublas$gemm"
CHECK: %[[S2:.+]] = ((f32[2,2]{1,0}, f32[2,2]{1,0}), (f32[2,2]{1,0}, s8[4]{0}), u32[]) custom-call-start(%[[P]], %[[P]]), custom_call_target="__cublas$gemm"
CHECK: %[[D1:.+]] = (f32[2,2]{1,0}, s8[4]{0}) custom-call-done(%[[S1]])
CHECK: %[[D2:.+]] = (f32[2,2]{1,0}, s8[4]{0}) custom-call-done(%[[S2]])
CHECK: ROOT %[[T:.+]] = ((f32[2,2]{1,0}, s8[4]{0}), (f32[2,2]{1,0}, s8[4]{0})) tuple(%[[D1]], %[[D2]])
CHECK: })";
RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, AsyncFusion) {
const char* hlo = R"(
HloModule m, is_scheduled=true
add0 {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
add1 {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
ENTRY main {
%a = s32[] parameter(0)
%b = s32[] parameter(1)
%start1 = ((s32[], s32[]), s32[], u32[]) fusion-start(%a, %b),
kind=kLoop, calls=add0
%start2 = ((s32[], s32[]), s32[], u32[]) fusion-start(%a, %b),
kind=kLoop, calls=add1
%done1 = s32[] fusion-done(%start1)
%done2 = s32[] fusion-done(%start2)
ROOT %tuple = (s32[], s32[]) tuple(%done1, %done2)
})";
const char* expected = R"(
CHECK: %command_buffer {{.*}} -> (s32[], s32[]) {
CHECK: %[[S1:.+]] = ((s32[], s32[]), s32[], u32[]) fusion-start
CHECK: %[[S2:.+]] = ((s32[], s32[]), s32[], u32[]) fusion-start
CHECK: %[[D1:.+]] = s32[] fusion-done(%[[S1]])
CHECK: %[[D2:.+]] = s32[] fusion-done(%[[S2]])
CHECK: ROOT {{.*}} = (s32[], s32[]) tuple(%[[D1]], %[[D2]])
CHECK: })";
RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, AsyncAlltoAll) {
const char* hlo = R"(
HloModule m, is_scheduled=true
async_computation.1 {
param.1 = f32[4,8,128]{2,1,0} parameter(0)
ROOT all-to-all.1 = f32[4,8,128]{2,1,0} all-to-all(param.1), channel_id=1, dimensions={1}
}
ENTRY main {
param.0 = f32[4,8,128]{2,1,0} parameter(0)
all-to-all-start = ((f32[4,8,128]{2,1,0}), f32[4,8,128]{2,1,0}) async-start(param.0), calls=async_computation.1
ROOT all-to-all-done = f32[4,8,128]{2,1,0} async-done(all-to-all-start)
})";
const char* expected = R"(
CHECK: %command_buffer ([[P:.+]]: f32[4,8,128]) -> f32[4,8,128] {
CHECK: %[[P]] = f32[4,8,128]{2,1,0} parameter(0)
CHECK: %[[S1:.+]] = ((f32[4,8,128]{2,1,0}), f32[4,8,128]{2,1,0}) all-to-all-start(%[[P]]), channel_id=1, replica_groups={}, dimensions={1}
CHECK: ROOT {{.*}} = f32[4,8,128]{2,1,0} all-to-all-done(%[[S1]])
CHECK: })";
RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, DynamicSliceFusionDynamicSlicing) {
if (backend().platform()->Name() == "Host") {
GTEST_SKIP() << "GPU support required for this test";
}
const char* hlo = R"(
HloModule jit_slice, replica_count=2
add {
a = s32[] parameter(0)
b = s32[] parameter(1)
ROOT add = add(a,b)
}
ENTRY main.9 {
p0 = s32[2,8,32]{2,1,0} parameter(0)
p1 = s32[8,32]{1,0} parameter(1)
c0 = s32[] constant(0)
c1 = s32[] constant(1)
slice = s32[1,8,32]{2,1,0} dynamic-slice(p0, c1, c0, c0), dynamic_slice_sizes={1,8,32}
input = s32[8,32]{1,0} reshape(slice)
rs = s32[4,32] reduce-scatter(input), channel_id=64, replica_groups={{0,1}}, use_global_device_ids=true, dimensions={0}, to_apply=add
ROOT dus = s32[8,32] dynamic-update-slice(p1, rs, c0, c0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, GetOptimizedModule(hlo));
HloModuleConfig config(m->config());
DebugOptions options(config.debug_options());
options.set_xla_gpu_graph_min_graph_size(0);
auto check = [&m, this](DebugOptions options) -> absl::Status {
auto m_clone = m->Clone();
HloModuleConfig config(m_clone->config());
config.set_debug_options(options);
m_clone->set_config(config);
TF_ASSIGN_OR_RETURN(auto exec, CreateExecutable(std::move(m_clone), false));
auto gpu_exec = std::unique_ptr<GpuExecutable>(
static_cast<GpuExecutable*>(exec.release()));
TF_RET_CHECK(llvm::any_of(gpu_exec->GetThunk().thunks(),
[](const std::unique_ptr<Thunk>& thunk) {
return thunk->kind() == Thunk::kDynamicSlice;
}));
return absl::OkStatus();
};
options.clear_xla_gpu_enable_command_buffer();
options.add_xla_gpu_enable_command_buffer(DebugOptions::FUSION);
options.add_xla_gpu_enable_command_buffer(DebugOptions::COLLECTIVES);
TF_ASSERT_OK(check(options));
options.clear_xla_gpu_enable_command_buffer();
TF_ASSERT_OK(check(options));
options.clear_xla_gpu_enable_command_buffer();
options.add_xla_gpu_enable_command_buffer(DebugOptions::COLLECTIVES);
TF_ASSERT_OK(check(options));
options.clear_xla_gpu_enable_command_buffer();
options.add_xla_gpu_enable_command_buffer(DebugOptions::FUSION);
TF_ASSERT_OK(check(options));
}
TEST_F(CommandBufferSchedulingTest, DynamicSliceFusionStaticSlicing) {
if (backend().platform()->Name() == "Host" || backend().device_count() < 2) {
GTEST_SKIP() << "Atleast two GPUs required for this test";
}
const char* hlo = R"(
HloModule jit_slice, replica_count=2
add {
a = s32[] parameter(0)
b = s32[] parameter(1)
ROOT add = add(a,b)
}
ENTRY main.9 {
p0 = s32[2,8,32]{2,1,0} parameter(0)
p1 = s32[8,32]{1,0} parameter(1)
c0 = s32[] constant(0)
c1 = s32[] constant(1)
slice = s32[1,8,32]{2,1,0} slice(p0), slice={[1:2], [0:8], [0:32]}
input = s32[8,32]{1,0} reshape(slice)
ROOT rs = s32[4,32] reduce-scatter(input), channel_id=64, replica_groups={{0,1}}, use_global_device_ids=true, dimensions={0}, to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, GetOptimizedModule(hlo));
HloModuleConfig config(m->config());
DebugOptions options(config.debug_options());
options.set_xla_gpu_graph_min_graph_size(0);
auto get_exec = [&m, this](DebugOptions options)
-> absl::StatusOr<std::unique_ptr<GpuExecutable>> {
auto m_clone = m->Clone();
HloModuleConfig config(m_clone->config());
config.set_debug_options(options);
m_clone->set_config(config);
TF_ASSIGN_OR_RETURN(auto exec, CreateExecutable(std::move(m_clone), false));
return std::unique_ptr<GpuExecutable>(
static_cast<GpuExecutable*>(exec.release()));
};
{
options.clear_xla_gpu_enable_command_buffer();
options.add_xla_gpu_enable_command_buffer(DebugOptions::FUSION);
options.add_xla_gpu_enable_command_buffer(DebugOptions::COLLECTIVES);
TF_ASSERT_OK_AND_ASSIGN(auto gpu_exec, get_exec(options));
Thunk* child = gpu_exec->GetThunk().thunks()[0].get();
ASSERT_EQ(child->kind(), Thunk::kCommandBuffer);
}
{
options.clear_xla_gpu_enable_command_buffer();
TF_ASSERT_OK_AND_ASSIGN(auto gpu_exec, get_exec(options));
Thunk* child = gpu_exec->GetThunk().thunks()[0].get();
ASSERT_NE(child->kind(), Thunk::kCommandBuffer);
}
{
options.clear_xla_gpu_enable_command_buffer();
options.add_xla_gpu_enable_command_buffer(DebugOptions::COLLECTIVES);
TF_ASSERT_OK_AND_ASSIGN(auto gpu_exec, get_exec(options));
Thunk* child = gpu_exec->GetThunk().thunks()[0].get();
ASSERT_EQ(child->kind(), Thunk::kCommandBuffer);
}
{
options.clear_xla_gpu_enable_command_buffer();
options.add_xla_gpu_enable_command_buffer(DebugOptions::FUSION);
TF_ASSERT_OK_AND_ASSIGN(auto gpu_exec, get_exec(options));
Thunk* child = gpu_exec->GetThunk().thunks()[0].get();
ASSERT_NE(child->kind(), Thunk::kCommandBuffer);
}
options.clear_xla_gpu_enable_command_buffer();
auto m_ref = m->Clone();
config.set_debug_options(options);
m_ref->set_config(config);
config.set_debug_options(GetDebugOptionsForTest());
m->set_config(config);
ASSERT_TRUE(RunAndCompareTwoModulesReplicated(std::move(m_ref), std::move(m),
false, true, std::nullopt));
}
TEST_F(CommandBufferSchedulingTest, ReturnFalseWhenNoChange) {
const char* hlo = R"(
HloModule module, is_scheduled=true
ENTRY main {
a = s32[8,8] parameter(0)
b = s32[8,8] parameter(1)
ROOT call = s32[8,8] custom-call(a,b), custom_call_target="__cublas$gemm"
}
)";
HloModuleConfig config;
DebugOptions options = GetDebugOptionsForTest();
options.clear_xla_gpu_enable_command_buffer();
options.add_xla_gpu_enable_command_buffer(DebugOptions::COLLECTIVES);
config.set_debug_options(options);
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo, config));
RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()),
std::nullopt);
}
TEST_F(CommandBufferSchedulingTest, ReturnTrueWhenOnlyParamMoved) {
const char* hlo = R"(
HloModule module, is_scheduled=true
ENTRY main {
a = s32[8,8] parameter(0)
b = s32[8,8] parameter(1)
call = s32[8,8] custom-call(a,b), custom_call_target="__cublas$gemm"
c = s32[8,8] parameter(2)
ROOT call2 = s32[8,8] custom-call(call, c), custom_call_target="__cublas$gemm"
}
)";
HloModuleConfig config;
DebugOptions options = GetDebugOptionsForTest();
options.clear_xla_gpu_enable_command_buffer();
options.add_xla_gpu_enable_command_buffer(DebugOptions::COLLECTIVES);
config.set_debug_options(options);
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo, config));
RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()), R"(
)");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/command_buffer_scheduling.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/command_buffer_scheduling_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8cfd8b81-c908-4911-ac60-3c75d154cc47 | cpp | google/quiche | qpack_decoder | quiche/quic/core/qpack/qpack_decoder.cc | quiche/quic/core/qpack/qpack_decoder_test.cc | #include "quiche/quic/core/qpack/qpack_decoder.h"
#include <memory>
#include <utility>
#include "absl/strings/string_view.h"
#include "quiche/quic/core/qpack/qpack_index_conversions.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
QpackDecoder::QpackDecoder(
uint64_t maximum_dynamic_table_capacity, uint64_t maximum_blocked_streams,
EncoderStreamErrorDelegate* encoder_stream_error_delegate)
: encoder_stream_error_delegate_(encoder_stream_error_delegate),
encoder_stream_receiver_(this),
maximum_blocked_streams_(maximum_blocked_streams),
known_received_count_(0) {
QUICHE_DCHECK(encoder_stream_error_delegate_);
header_table_.SetMaximumDynamicTableCapacity(maximum_dynamic_table_capacity);
}
QpackDecoder::~QpackDecoder() {}
void QpackDecoder::OnStreamReset(QuicStreamId stream_id) {
if (header_table_.maximum_dynamic_table_capacity() > 0) {
decoder_stream_sender_.SendStreamCancellation(stream_id);
}
}
bool QpackDecoder::OnStreamBlocked(QuicStreamId stream_id) {
auto result = blocked_streams_.insert(stream_id);
QUICHE_DCHECK(result.second);
return blocked_streams_.size() <= maximum_blocked_streams_;
}
void QpackDecoder::OnStreamUnblocked(QuicStreamId stream_id) {
size_t result = blocked_streams_.erase(stream_id);
QUICHE_DCHECK_EQ(1u, result);
}
void QpackDecoder::OnDecodingCompleted(QuicStreamId stream_id,
uint64_t required_insert_count) {
if (required_insert_count > 0) {
decoder_stream_sender_.SendHeaderAcknowledgement(stream_id);
if (known_received_count_ < required_insert_count) {
known_received_count_ = required_insert_count;
}
}
if (known_received_count_ < header_table_.inserted_entry_count()) {
decoder_stream_sender_.SendInsertCountIncrement(
header_table_.inserted_entry_count() - known_received_count_);
known_received_count_ = header_table_.inserted_entry_count();
}
}
void QpackDecoder::OnInsertWithNameReference(bool is_static,
uint64_t name_index,
absl::string_view value) {
if (is_static) {
auto entry = header_table_.LookupEntry( true, name_index);
if (!entry) {
OnErrorDetected(QUIC_QPACK_ENCODER_STREAM_INVALID_STATIC_ENTRY,
"Invalid static table entry.");
return;
}
if (!header_table_.EntryFitsDynamicTableCapacity(entry->name(), value)) {
OnErrorDetected(QUIC_QPACK_ENCODER_STREAM_ERROR_INSERTING_STATIC,
"Error inserting entry with name reference.");
return;
}
header_table_.InsertEntry(entry->name(), value);
return;
}
uint64_t absolute_index;
if (!QpackEncoderStreamRelativeIndexToAbsoluteIndex(
name_index, header_table_.inserted_entry_count(), &absolute_index)) {
OnErrorDetected(QUIC_QPACK_ENCODER_STREAM_INSERTION_INVALID_RELATIVE_INDEX,
"Invalid relative index.");
return;
}
const QpackEntry* entry =
header_table_.LookupEntry( false, absolute_index);
if (!entry) {
OnErrorDetected(QUIC_QPACK_ENCODER_STREAM_INSERTION_DYNAMIC_ENTRY_NOT_FOUND,
"Dynamic table entry not found.");
return;
}
if (!header_table_.EntryFitsDynamicTableCapacity(entry->name(), value)) {
OnErrorDetected(QUIC_QPACK_ENCODER_STREAM_ERROR_INSERTING_DYNAMIC,
"Error inserting entry with name reference.");
return;
}
header_table_.InsertEntry(entry->name(), value);
}
void QpackDecoder::OnInsertWithoutNameReference(absl::string_view name,
absl::string_view value) {
if (!header_table_.EntryFitsDynamicTableCapacity(name, value)) {
OnErrorDetected(QUIC_QPACK_ENCODER_STREAM_ERROR_INSERTING_LITERAL,
"Error inserting literal entry.");
return;
}
header_table_.InsertEntry(name, value);
}
void QpackDecoder::OnDuplicate(uint64_t index) {
uint64_t absolute_index;
if (!QpackEncoderStreamRelativeIndexToAbsoluteIndex(
index, header_table_.inserted_entry_count(), &absolute_index)) {
OnErrorDetected(QUIC_QPACK_ENCODER_STREAM_DUPLICATE_INVALID_RELATIVE_INDEX,
"Invalid relative index.");
return;
}
const QpackEntry* entry =
header_table_.LookupEntry( false, absolute_index);
if (!entry) {
OnErrorDetected(QUIC_QPACK_ENCODER_STREAM_DUPLICATE_DYNAMIC_ENTRY_NOT_FOUND,
"Dynamic table entry not found.");
return;
}
if (!header_table_.EntryFitsDynamicTableCapacity(entry->name(),
entry->value())) {
OnErrorDetected(QUIC_INTERNAL_ERROR, "Error inserting duplicate entry.");
return;
}
header_table_.InsertEntry(entry->name(), entry->value());
}
void QpackDecoder::OnSetDynamicTableCapacity(uint64_t capacity) {
if (!header_table_.SetDynamicTableCapacity(capacity)) {
OnErrorDetected(QUIC_QPACK_ENCODER_STREAM_SET_DYNAMIC_TABLE_CAPACITY,
"Error updating dynamic table capacity.");
}
}
void QpackDecoder::OnErrorDetected(QuicErrorCode error_code,
absl::string_view error_message) {
encoder_stream_error_delegate_->OnEncoderStreamError(error_code,
error_message);
}
std::unique_ptr<QpackProgressiveDecoder> QpackDecoder::CreateProgressiveDecoder(
QuicStreamId stream_id,
QpackProgressiveDecoder::HeadersHandlerInterface* handler) {
return std::make_unique<QpackProgressiveDecoder>(stream_id, this, this,
&header_table_, handler);
}
void QpackDecoder::FlushDecoderStream() { decoder_stream_sender_.Flush(); }
} | #include "quiche/quic/core/qpack/qpack_decoder.h"
#include <algorithm>
#include <memory>
#include <string>
#include "absl/strings/escaping.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/qpack/qpack_decoder_test_utils.h"
#include "quiche/quic/test_tools/qpack/qpack_test_utils.h"
using ::testing::_;
using ::testing::Eq;
using ::testing::Invoke;
using ::testing::Mock;
using ::testing::Sequence;
using ::testing::StrictMock;
using ::testing::Values;
namespace quic {
namespace test {
namespace {
const char* const kHeaderAcknowledgement = "\x81";
const uint64_t kMaximumDynamicTableCapacity = 1024;
const uint64_t kMaximumBlockedStreams = 1;
class QpackDecoderTest : public QuicTestWithParam<FragmentMode> {
protected:
QpackDecoderTest()
: qpack_decoder_(kMaximumDynamicTableCapacity, kMaximumBlockedStreams,
&encoder_stream_error_delegate_),
fragment_mode_(GetParam()) {
qpack_decoder_.set_qpack_stream_sender_delegate(
&decoder_stream_sender_delegate_);
}
~QpackDecoderTest() override = default;
void SetUp() override {
ON_CALL(handler_, OnDecodingErrorDetected(_, _))
.WillByDefault(Invoke([this](QuicErrorCode ,
absl::string_view ) {
progressive_decoder_.reset();
}));
}
void DecodeEncoderStreamData(absl::string_view data) {
qpack_decoder_.encoder_stream_receiver()->Decode(data);
}
std::unique_ptr<QpackProgressiveDecoder> CreateProgressiveDecoder(
QuicStreamId stream_id) {
return qpack_decoder_.CreateProgressiveDecoder(stream_id, &handler_);
}
void FlushDecoderStream() { qpack_decoder_.FlushDecoderStream(); }
void StartDecoding() {
progressive_decoder_ = CreateProgressiveDecoder( 1);
}
void DecodeData(absl::string_view data) {
auto fragment_size_generator =
FragmentModeToFragmentSizeGenerator(fragment_mode_);
while (progressive_decoder_ && !data.empty()) {
size_t fragment_size = std::min(fragment_size_generator(), data.size());
progressive_decoder_->Decode(data.substr(0, fragment_size));
data = data.substr(fragment_size);
}
}
void EndDecoding() {
if (progressive_decoder_) {
progressive_decoder_->EndHeaderBlock();
}
}
void DecodeHeaderBlock(absl::string_view data) {
StartDecoding();
DecodeData(data);
EndDecoding();
}
StrictMock<MockEncoderStreamErrorDelegate> encoder_stream_error_delegate_;
StrictMock<MockQpackStreamSenderDelegate> decoder_stream_sender_delegate_;
StrictMock<MockHeadersHandler> handler_;
private:
QpackDecoder qpack_decoder_;
const FragmentMode fragment_mode_;
std::unique_ptr<QpackProgressiveDecoder> progressive_decoder_;
};
INSTANTIATE_TEST_SUITE_P(All, QpackDecoderTest,
Values(FragmentMode::kSingleChunk,
FragmentMode::kOctetByOctet));
TEST_P(QpackDecoderTest, NoPrefix) {
EXPECT_CALL(handler_,
OnDecodingErrorDetected(QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Incomplete header data prefix.")));
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("00", &input));
DecodeHeaderBlock(input);
}
TEST_P(QpackDecoderTest, InvalidPrefix) {
StartDecoding();
EXPECT_CALL(handler_,
OnDecodingErrorDetected(QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Encoded integer too large.")));
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("ffffffffffffffffffffffffffff", &input));
DecodeData(input);
}
TEST_P(QpackDecoderTest, EmptyHeaderBlock) {
EXPECT_CALL(handler_, OnDecodingCompleted());
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("0000", &input));
DecodeHeaderBlock(input);
}
TEST_P(QpackDecoderTest, LiteralEntryEmptyName) {
EXPECT_CALL(handler_, OnHeaderDecoded(Eq(""), Eq("foo")));
EXPECT_CALL(handler_, OnDecodingCompleted());
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("00002003666f6f", &input));
DecodeHeaderBlock(input);
}
TEST_P(QpackDecoderTest, LiteralEntryEmptyValue) {
EXPECT_CALL(handler_, OnHeaderDecoded(Eq("foo"), Eq("")));
EXPECT_CALL(handler_, OnDecodingCompleted());
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("000023666f6f00", &input));
DecodeHeaderBlock(input);
}
TEST_P(QpackDecoderTest, LiteralEntryEmptyNameAndValue) {
EXPECT_CALL(handler_, OnHeaderDecoded(Eq(""), Eq("")));
EXPECT_CALL(handler_, OnDecodingCompleted());
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("00002000", &input));
DecodeHeaderBlock(input);
}
TEST_P(QpackDecoderTest, SimpleLiteralEntry) {
EXPECT_CALL(handler_, OnHeaderDecoded(Eq("foo"), Eq("bar")));
EXPECT_CALL(handler_, OnDecodingCompleted());
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("000023666f6f03626172", &input));
DecodeHeaderBlock(input);
}
TEST_P(QpackDecoderTest, MultipleLiteralEntries) {
EXPECT_CALL(handler_, OnHeaderDecoded(Eq("foo"), Eq("bar")));
std::string str(127, 'a');
EXPECT_CALL(handler_, OnHeaderDecoded(Eq("foobaar"), absl::string_view(str)));
EXPECT_CALL(handler_, OnDecodingCompleted());
std::string input;
ASSERT_TRUE(absl::HexStringToBytes(
"0000"
"23666f6f03626172"
"2700666f6f62616172"
"7f0061616161616161"
"616161616161616161"
"6161616161616161616161616161616161616161616161616161616161616161616161"
"6161616161616161616161616161616161616161616161616161616161616161616161"
"6161616161616161616161616161616161616161616161616161616161616161616161"
"616161616161",
&input));
DecodeHeaderBlock(input);
}
TEST_P(QpackDecoderTest, NameLenTooLargeForVarintDecoder) {
EXPECT_CALL(handler_,
OnDecodingErrorDetected(QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Encoded integer too large.")));
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("000027ffffffffffffffffffff", &input));
DecodeHeaderBlock(input);
}
TEST_P(QpackDecoderTest, NameLenExceedsLimit) {
EXPECT_CALL(handler_,
OnDecodingErrorDetected(QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("String literal too long.")));
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("000027ffff7f", &input));
DecodeHeaderBlock(input);
}
TEST_P(QpackDecoderTest, ValueLenTooLargeForVarintDecoder) {
EXPECT_CALL(handler_,
OnDecodingErrorDetected(QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Encoded integer too large.")));
std::string input;
ASSERT_TRUE(
absl::HexStringToBytes("000023666f6f7fffffffffffffffffffff", &input));
DecodeHeaderBlock(input);
}
TEST_P(QpackDecoderTest, ValueLenExceedsLimit) {
EXPECT_CALL(handler_,
OnDecodingErrorDetected(QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("String literal too long.")));
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("000023666f6f7fffff7f", &input));
DecodeHeaderBlock(input);
}
TEST_P(QpackDecoderTest, LineFeedInValue) {
EXPECT_CALL(handler_, OnHeaderDecoded(Eq("foo"), Eq("ba\nr")));
EXPECT_CALL(handler_, OnDecodingCompleted());
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("000023666f6f0462610a72", &input));
DecodeHeaderBlock(input);
}
TEST_P(QpackDecoderTest, IncompleteHeaderBlock) {
EXPECT_CALL(handler_,
OnDecodingErrorDetected(QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Incomplete header block.")));
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("00002366", &input));
DecodeHeaderBlock(input);
}
TEST_P(QpackDecoderTest, HuffmanSimple) {
EXPECT_CALL(handler_, OnHeaderDecoded(Eq("custom-key"), Eq("custom-value")));
EXPECT_CALL(handler_, OnDecodingCompleted());
std::string input;
ASSERT_TRUE(absl::HexStringToBytes(
"00002f0125a849e95ba97d7f8925a849e95bb8e8b4bf", &input));
DecodeHeaderBlock(input);
}
TEST_P(QpackDecoderTest, AlternatingHuffmanNonHuffman) {
EXPECT_CALL(handler_, OnHeaderDecoded(Eq("custom-key"), Eq("custom-value")))
.Times(4);
EXPECT_CALL(handler_, OnDecodingCompleted());
std::string input;
ASSERT_TRUE(absl::HexStringToBytes(
"0000"
"2f0125a849e95ba97d7f"
"8925a849e95bb8e8b4bf"
"2703637573746f6d2d6b6579"
"0c637573746f6d2d76616c7565"
"2f0125a849e95ba97d7f"
"0c637573746f6d2d76616c7565"
"2703637573746f6d2d6b6579"
"8925a849e95bb8e8b4bf",
&input));
DecodeHeaderBlock(input);
}
TEST_P(QpackDecoderTest, HuffmanNameDoesNotHaveEOSPrefix) {
EXPECT_CALL(handler_,
OnDecodingErrorDetected(QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Error in Huffman-encoded string.")));
std::string input;
ASSERT_TRUE(absl::HexStringToBytes(
"00002f0125a849e95ba97d7e8925a849e95bb8e8b4bf", &input));
DecodeHeaderBlock(input);
}
TEST_P(QpackDecoderTest, HuffmanValueDoesNotHaveEOSPrefix) {
EXPECT_CALL(handler_,
OnDecodingErrorDetected(QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Error in Huffman-encoded string.")));
std::string input;
ASSERT_TRUE(absl::HexStringToBytes(
"00002f0125a849e95ba97d7f8925a849e95bb8e8b4be", &input));
DecodeHeaderBlock(input);
}
TEST_P(QpackDecoderTest, HuffmanNameEOSPrefixTooLong) {
EXPECT_CALL(handler_,
OnDecodingErrorDetected(QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Error in Huffman-encoded string.")));
std::string input;
ASSERT_TRUE(absl::HexStringToBytes(
"00002f0225a849e95ba97d7fff8925a849e95bb8e8b4bf", &input));
DecodeHeaderBlock(input);
}
TEST_P(QpackDecoderTest, HuffmanValueEOSPrefixTooLong) {
EXPECT_CALL(handler_,
OnDecodingErrorDetected(QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Error in Huffman-encoded string.")));
std::string input;
ASSERT_TRUE(absl::HexStringToBytes(
"00002f0125a849e95ba97d7f8a25a849e95bb8e8b4bfff", &input));
DecodeHeaderBlock(input);
}
TEST_P(QpackDecoderTest, StaticTable) {
EXPECT_CALL(handler_, OnHeaderDecoded(Eq(":method"), Eq("GET")));
EXPECT_CALL(handler_, OnHeaderDecoded(Eq(":method"), Eq("POST")));
EXPECT_CALL(handler_, OnHeaderDecoded(Eq(":method"), Eq("TRACE")));
EXPECT_CALL(handler_,
OnHeaderDecoded(Eq("accept-encoding"), Eq("gzip, deflate, br")));
EXPECT_CALL(handler_, OnHeaderDecoded(Eq("accept-encoding"), Eq("compress")));
EXPECT_CALL(handler_, OnHeaderDecoded(Eq("accept-encoding"), Eq("")));
EXPECT_CALL(handler_, OnHeaderDecoded(Eq("location"), Eq("")));
EXPECT_CALL(handler_, OnHeaderDecoded(Eq("location"), Eq("foo")));
EXPECT_CALL(handler_, OnDecodingCompleted());
std::string input;
ASSERT_TRUE(absl::HexStringToBytes(
"0000d1dfccd45f108621e9aec2a11f5c8294e75f000554524143455f1000", &input));
DecodeHeaderBlock(input);
}
TEST_P(QpackDecoderTest, TooHighStaticTableIndex) {
EXPECT_CALL(handler_,
OnHeaderDecoded(Eq("x-frame-options"), Eq("sameorigin")));
EXPECT_CALL(handler_,
OnDecodingErrorDetected(QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Static table entry not found.")));
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("0000ff23ff24", &input));
DecodeHeaderBlock(input);
}
TEST_P(QpackDecoderTest, DynamicTable) {
std::string input;
ASSERT_TRUE(absl::HexStringToBytes(
"3fe107"
"6294e703626172"
"80035a5a5a"
"cf8294e7"
"01",
&input));
DecodeEncoderStreamData(input);
Sequence s;
EXPECT_CALL(handler_, OnHeaderDecoded(Eq("foo"), Eq("bar"))).InSequence(s);
EXPECT_CALL(handler_, OnHeaderDecoded(Eq("foo"), Eq("ZZZ"))).InSequence(s);
EXPECT_CALL(handler_, OnHeaderDecoded(Eq(":method"), Eq("foo")))
.InSequence(s);
EXPECT_CALL(handler_, OnHeaderDecoded(Eq("foo"), Eq("ZZZ"))).InSequence(s);
EXPECT_CALL(handler_, OnHeaderDecoded(Eq(":method"), Eq("ZZ"))).InSequence(s);
EXPECT_CALL(handler_, OnDecodingCompleted()).InSequence(s);
EXPECT_CALL(decoder_stream_sender_delegate_,
WriteStreamData(Eq(kHeaderAcknowledgement)))
.InSequence(s);
ASSERT_TRUE(absl::HexStringToBytes(
"0500"
"83"
"82"
"81"
"80"
"41025a5a",
&input));
DecodeHeaderBlock(input);
FlushDecoderStream();
EXPECT_CALL(handler_, OnHeaderDecoded(Eq("foo"), Eq("bar"))).InSequence(s);
EXPECT_CALL(handler_, OnHeaderDecoded(Eq("foo"), Eq("ZZZ"))).InSequence(s);
EXPECT_CALL(handler_, OnHeaderDecoded(Eq(":method"), Eq("foo")))
.InSequence(s);
EXPECT_CALL(handler_, OnHeaderDecoded(Eq("foo"), Eq("ZZZ"))).InSequence(s);
EXPECT_CALL(handler_, OnHeaderDecoded(Eq(":method"), Eq("ZZ"))).InSequence(s);
EXPECT_CALL(handler_, OnDecodingCompleted()).InSequence(s);
EXPECT_CALL(decoder_stream_sender_delegate_,
WriteStreamData(Eq(kHeaderAcknowledgement)))
.InSequence(s);
ASSERT_TRUE(absl::HexStringToBytes(
"0502"
"85"
"84"
"83"
"82"
"43025a5a",
&input));
DecodeHeaderBlock(input);
FlushDecoderStream();
EXPECT_CALL(handler_, OnHeaderDecoded(Eq("foo"), Eq("bar"))).InSequence(s);
EXPECT_CALL(handler_, OnHeaderDecoded(Eq("foo"), Eq("ZZZ"))).InSequence(s);
EXPECT_CALL(handler_, OnHeaderDecoded(Eq(":method"), Eq("foo")))
.InSequence(s);
EXPECT_CALL(handler_, OnHeaderDecoded(Eq("foo"), Eq("ZZZ"))).InSequence(s);
EXPECT_CALL(handler_, OnHeaderDecoded(Eq(":method"), Eq("ZZ"))).InSequence(s);
EXPECT_CALL(handler_, OnDecodingCompleted()).InSequence(s);
EXPECT_CALL(decoder_stream_sender_delegate_,
WriteStreamData(Eq(kHeaderAcknowledgement)))
.InSequence(s);
ASSERT_TRUE(absl::HexStringToBytes(
"0582"
"80"
"10"
"11"
"12"
"01025a5a",
&input));
DecodeHeaderBlock(input);
FlushDecoderStream();
}
TEST_P(QpackDecoderTest, DecreasingDynamicTableCapacityEvictsEntries) {
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("3fe107", &input));
DecodeEncoderStreamData(input);
ASSERT_TRUE(absl::HexStringToBytes("6294e703626172", &input));
DecodeEncoderStreamData(input);
EXPECT_CALL(handler_, OnHeaderDecoded(Eq("foo"), Eq("bar")));
EXPECT_CALL(handler_, OnDecodingCompleted());
EXPECT_CALL(decoder_stream_sender_delegate_,
WriteStreamData(Eq(kHeaderAcknowledgement)));
ASSERT_TRUE(absl::HexStringToBytes(
"0200"
"80",
&input));
DecodeHeaderBlock(input);
ASSERT_TRUE(absl::HexStringToBytes("3f01", &input));
DecodeEncoderStreamData(input);
EXPECT_CALL(handler_, OnDecodingErrorDetected(
QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Dynamic table entry already evicted.")));
ASSERT_TRUE(absl::HexStringToBytes(
"0200"
"80",
&input));
DecodeHeaderBlock(input);
FlushDecoderStream();
}
TEST_P(QpackDecoderTest, EncoderStreamErrorEntryTooLarge) {
std::string input;
EXPECT_CALL(
encoder_stream_error_delegate_,
OnEncoderStreamError(QUIC_QPACK_ENCODER_STREAM_ERROR_INSERTING_LITERAL,
Eq("Error inserting literal entry.")));
ASSERT_TRUE(absl::HexStringToBytes("3f03", &input));
DecodeEncoderStreamData(input);
ASSERT_TRUE(absl::HexStringToBytes("6294e703626172", &input));
DecodeEncoderStreamData(input);
}
TEST_P(QpackDecoderTest, EncoderStreamErrorInvalidStaticTableEntry) {
EXPECT_CALL(
encoder_stream_error_delegate_,
OnEncoderStreamError(QUIC_QPACK_ENCODER_STREAM_INVALID_STATIC_ENTRY,
Eq("Invalid static table entry.")));
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("ff2400", &input));
DecodeEncoderStreamData(input);
}
TEST_P(QpackDecoderTest, EncoderStreamErrorInvalidDynamicTableEntry) {
EXPECT_CALL(encoder_stream_error_delegate_,
OnEncoderStreamError(
QUIC_QPACK_ENCODER_STREAM_INSERTION_INVALID_RELATIVE_INDEX,
Eq("Invalid relative index.")));
std::string input;
ASSERT_TRUE(absl::HexStringToBytes(
"3fe107"
"6294e703626172"
"8100",
&input));
DecodeEncoderStreamData(input);
}
TEST_P(QpackDecoderTest, EncoderStreamErrorDuplicateInvalidEntry) {
EXPECT_CALL(encoder_stream_error_delegate_,
OnEncoderStreamError(
QUIC_QPACK_ENCODER_STREAM_DUPLICATE_INVALID_RELATIVE_INDEX,
Eq("Invalid relative index.")));
std::string input;
ASSERT_TRUE(absl::HexStringToBytes(
"3fe107"
"6294e703626172"
"01",
&input));
DecodeEncoderStreamData(input);
}
TEST_P(QpackDecoderTest, EncoderStreamErrorTooLargeInteger) {
EXPECT_CALL(encoder_stream_error_delegate_,
OnEncoderStreamError(QUIC_QPACK_ENCODER_STREAM_INTEGER_TOO_LARGE,
Eq("Encoded integer too large.")));
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("3fffffffffffffffffffff", &input));
DecodeEncoderStreamData(input);
}
TEST_P(QpackDecoderTest, InvalidDynamicEntryWhenBaseIsZero) {
EXPECT_CALL(handler_, OnDecodingErrorDetected(QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Invalid relative index.")));
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("3fe107", &input));
DecodeEncoderStreamData(input);
ASSERT_TRUE(absl::HexStringToBytes("6294e703626172", &input));
DecodeEncoderStreamData(input);
ASSERT_TRUE(absl::HexStringToBytes(
"0280"
"80",
&input));
DecodeHeaderBlock(input);
}
TEST_P(QpackDecoderTest, InvalidNegativeBase) {
EXPECT_CALL(handler_, OnDecodingErrorDetected(QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Error calculating Base.")));
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("0281", &input));
DecodeHeaderBlock(input);
}
TEST_P(QpackDecoderTest, InvalidDynamicEntryByRelativeIndex) {
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("3fe107", &input));
DecodeEncoderStreamData(input);
ASSERT_TRUE(absl::HexStringToBytes("6294e703626172", &input));
DecodeEncoderStreamData(input);
EXPECT_CALL(handler_, OnDecodingErrorDetected(QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Invalid relative index.")));
ASSERT_TRUE(absl::HexStringToBytes(
"0200"
"81",
&input));
DecodeHeaderBlock(input);
EXPECT_CALL(handler_, OnDecodingErrorDetected(QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Invalid relative index.")));
ASSERT_TRUE(absl::HexStringToBytes(
"0200"
"4100",
&input));
DecodeHeaderBlock(input);
}
TEST_P(QpackDecoderTest, EvictedDynamicTableEntry) {
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("3f61", &input));
DecodeEncoderStreamData(input);
ASSERT_TRUE(absl::HexStringToBytes("6294e703626172", &input));
DecodeEncoderStreamData(input);
ASSERT_TRUE(absl::HexStringToBytes("00000000", &input));
DecodeEncoderStreamData(input);
EXPECT_CALL(handler_, OnDecodingErrorDetected(
QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Dynamic table entry already evicted.")));
ASSERT_TRUE(absl::HexStringToBytes(
"0500"
"82",
&input));
DecodeHeaderBlock(input);
EXPECT_CALL(handler_, OnDecodingErrorDetected(
QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Dynamic table entry already evicted.")));
ASSERT_TRUE(absl::HexStringToBytes(
"0500"
"4200",
&input));
DecodeHeaderBlock(input);
EXPECT_CALL(handler_, OnDecodingErrorDetected(
QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Dynamic table entry already evicted.")));
ASSERT_TRUE(absl::HexStringToBytes(
"0380"
"10",
&input));
DecodeHeaderBlock(input);
EXPECT_CALL(handler_, OnDecodingErrorDetected(
QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Dynamic table entry already evicted.")));
ASSERT_TRUE(absl::HexStringToBytes(
"0380"
"0000",
&input));
DecodeHeaderBlock(input);
}
TEST_P(QpackDecoderTest, TableCapacityMustNotExceedMaximum) {
EXPECT_CALL(
encoder_stream_error_delegate_,
OnEncoderStreamError(QUIC_QPACK_ENCODER_STREAM_SET_DYNAMIC_TABLE_CAPACITY,
Eq("Error updating dynamic table capacity.")));
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("3fe10f", &input));
DecodeEncoderStreamData(input);
}
TEST_P(QpackDecoderTest, SetDynamicTableCapacity) {
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("3f61", &input));
DecodeEncoderStreamData(input);
}
TEST_P(QpackDecoderTest, InvalidEncodedRequiredInsertCount) {
EXPECT_CALL(handler_, OnDecodingErrorDetected(
QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Error decoding Required Insert Count.")));
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("4100", &input));
DecodeHeaderBlock(input);
}
TEST_P(QpackDecoderTest, DataAfterInvalidEncodedRequiredInsertCount) {
EXPECT_CALL(handler_, OnDecodingErrorDetected(
QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Error decoding Required Insert Count.")));
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("410000", &input));
DecodeHeaderBlock(input);
}
TEST_P(QpackDecoderTest, WrappedRequiredInsertCount) {
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("3fe107", &input));
DecodeEncoderStreamData(input);
ASSERT_TRUE(
absl::HexStringToBytes("6294e7"
"7fd903",
&input));
DecodeEncoderStreamData(input);
std::string header_value(600, 'Z');
DecodeEncoderStreamData(header_value);
DecodeEncoderStreamData(std::string(200, '\x00'));
EXPECT_CALL(handler_, OnHeaderDecoded(Eq("foo"), Eq(header_value)));
EXPECT_CALL(handler_, OnDecodingCompleted());
EXPECT_CALL(decoder_stream_sender_delegate_,
WriteStreamData(Eq(kHeaderAcknowledgement)));
ASSERT_TRUE(absl::HexStringToBytes(
"0a00"
"80",
&input));
DecodeHeaderBlock(input);
FlushDecoderStream();
}
TEST_P(QpackDecoderTest, NonZeroRequiredInsertCountButNoDynamicEntries) {
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("3fe107", &input));
DecodeEncoderStreamData(input);
ASSERT_TRUE(absl::HexStringToBytes("6294e703626172", &input));
DecodeEncoderStreamData(input);
EXPECT_CALL(handler_, OnHeaderDecoded(Eq(":method"), Eq("GET")));
EXPECT_CALL(handler_,
OnDecodingErrorDetected(QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Required Insert Count too large.")));
ASSERT_TRUE(absl::HexStringToBytes(
"0200"
"d1",
&input));
DecodeHeaderBlock(input);
}
TEST_P(QpackDecoderTest, AddressEntryNotAllowedByRequiredInsertCount) {
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("3fe107", &input));
DecodeEncoderStreamData(input);
ASSERT_TRUE(absl::HexStringToBytes("6294e703626172", &input));
DecodeEncoderStreamData(input);
EXPECT_CALL(
handler_,
OnDecodingErrorDetected(
QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Absolute Index must be smaller than Required Insert Count.")));
ASSERT_TRUE(absl::HexStringToBytes(
"0201"
"80",
&input));
DecodeHeaderBlock(input);
EXPECT_CALL(
handler_,
OnDecodingErrorDetected(
QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Absolute Index must be smaller than Required Insert Count.")));
ASSERT_TRUE(absl::HexStringToBytes(
"0201"
"4000",
&input));
DecodeHeaderBlock(input);
EXPECT_CALL(
handler_,
OnDecodingErrorDetected(
QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Absolute Index must be smaller than Required Insert Count.")));
ASSERT_TRUE(absl::HexStringToBytes(
"0200"
"10",
&input));
DecodeHeaderBlock(input);
EXPECT_CALL(
handler_,
OnDecodingErrorDetected(
QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Absolute Index must be smaller than Required Insert Count.")));
ASSERT_TRUE(absl::HexStringToBytes(
"0200"
"0000",
&input));
DecodeHeaderBlock(input);
}
TEST_P(QpackDecoderTest, PromisedRequiredInsertCountLargerThanActual) {
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("3fe107", &input));
DecodeEncoderStreamData(input);
ASSERT_TRUE(absl::HexStringToBytes("6294e703626172", &input));
DecodeEncoderStreamData(input);
ASSERT_TRUE(absl::HexStringToBytes("00", &input));
DecodeEncoderStreamData(input);
DecodeEncoderStreamData(input);
EXPECT_CALL(handler_, OnHeaderDecoded(Eq("foo"), Eq("bar")));
EXPECT_CALL(handler_,
OnDecodingErrorDetected(QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Required Insert Count too large.")));
ASSERT_TRUE(absl::HexStringToBytes(
"0300"
"81",
&input));
DecodeHeaderBlock(input);
EXPECT_CALL(handler_, OnHeaderDecoded(Eq("foo"), Eq("")));
EXPECT_CALL(handler_,
OnDecodingErrorDetected(QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Required Insert Count too large.")));
ASSERT_TRUE(absl::HexStringToBytes(
"0300"
"4100",
&input));
DecodeHeaderBlock(input);
EXPECT_CALL(handler_, OnHeaderDecoded(Eq("foo"), Eq("bar")));
EXPECT_CALL(handler_,
OnDecodingErrorDetected(QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Required Insert Count too large.")));
ASSERT_TRUE(absl::HexStringToBytes(
"0481"
"10",
&input));
DecodeHeaderBlock(input);
EXPECT_CALL(handler_, OnHeaderDecoded(Eq("foo"), Eq("")));
EXPECT_CALL(handler_,
OnDecodingErrorDetected(QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Required Insert Count too large.")));
ASSERT_TRUE(absl::HexStringToBytes(
"0481"
"0000",
&input));
DecodeHeaderBlock(input);
}
TEST_P(QpackDecoderTest, BlockedDecoding) {
std::string input;
ASSERT_TRUE(absl::HexStringToBytes(
"0200"
"80",
&input));
DecodeHeaderBlock(input);
EXPECT_CALL(handler_, OnHeaderDecoded(Eq("foo"), Eq("bar")));
EXPECT_CALL(handler_, OnDecodingCompleted());
EXPECT_CALL(decoder_stream_sender_delegate_,
WriteStreamData(Eq(kHeaderAcknowledgement)));
ASSERT_TRUE(absl::HexStringToBytes("3fe107", &input));
DecodeEncoderStreamData(input);
ASSERT_TRUE(absl::HexStringToBytes("6294e703626172", &input));
DecodeEncoderStreamData(input);
FlushDecoderStream();
}
TEST_P(QpackDecoderTest, BlockedDecodingUnblockedBeforeEndOfHeaderBlock) {
std::string input;
StartDecoding();
ASSERT_TRUE(absl::HexStringToBytes(
"0200"
"80"
"d1",
&input));
DecodeData(input);
ASSERT_TRUE(absl::HexStringToBytes("3fe107", &input));
DecodeEncoderStreamData(input);
EXPECT_CALL(handler_, OnHeaderDecoded(Eq("foo"), Eq("bar")));
EXPECT_CALL(handler_, OnHeaderDecoded(Eq(":method"), Eq("GET")));
ASSERT_TRUE(absl::HexStringToBytes("6294e703626172", &input));
DecodeEncoderStreamData(input);
Mock::VerifyAndClearExpectations(&handler_);
EXPECT_CALL(handler_, OnHeaderDecoded(Eq("foo"), Eq("bar")));
EXPECT_CALL(handler_, OnHeaderDecoded(Eq(":scheme"), Eq("https")));
ASSERT_TRUE(absl::HexStringToBytes(
"80"
"d7",
&input));
DecodeData(input);
Mock::VerifyAndClearExpectations(&handler_);
EXPECT_CALL(handler_, OnDecodingCompleted());
EXPECT_CALL(decoder_stream_sender_delegate_,
WriteStreamData(Eq(kHeaderAcknowledgement)));
EndDecoding();
FlushDecoderStream();
}
TEST_P(QpackDecoderTest,
BlockedDecodingUnblockedAndErrorBeforeEndOfHeaderBlock) {
std::string input;
StartDecoding();
ASSERT_TRUE(absl::HexStringToBytes(
"0200"
"80"
"81",
&input));
DecodeData(input);
ASSERT_TRUE(absl::HexStringToBytes("3fe107", &input));
DecodeEncoderStreamData(input);
EXPECT_CALL(handler_, OnHeaderDecoded(Eq("foo"), Eq("bar")));
EXPECT_CALL(handler_, OnDecodingErrorDetected(QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Invalid relative index.")));
ASSERT_TRUE(absl::HexStringToBytes("6294e703626172", &input));
DecodeEncoderStreamData(input);
}
TEST_P(QpackDecoderTest, BlockedDecodingAndEvictedEntries) {
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("3f61", &input));
DecodeEncoderStreamData(input);
ASSERT_TRUE(absl::HexStringToBytes(
"0700"
"80",
&input));
DecodeHeaderBlock(input);
ASSERT_TRUE(absl::HexStringToBytes("6294e703626172", &input));
DecodeEncoderStreamData(input);
ASSERT_TRUE(absl::HexStringToBytes("00000000", &input));
DecodeEncoderStreamData(input);
EXPECT_CALL(handler_, OnHeaderDecoded(Eq("foo"), Eq("baz")));
EXPECT_CALL(handler_, OnDecodingCompleted());
EXPECT_CALL(decoder_stream_sender_delegate_,
WriteStreamData(Eq(kHeaderAcknowledgement)));
ASSERT_TRUE(absl::HexStringToBytes("6294e70362617a", &input));
DecodeEncoderStreamData(input);
FlushDecoderStream();
}
TEST_P(QpackDecoderTest, TooManyBlockedStreams) {
std::string data;
ASSERT_TRUE(absl::HexStringToBytes("0200", &data));
auto progressive_decoder1 = CreateProgressiveDecoder( 1);
progressive_decoder1->Decode(data);
EXPECT_CALL(handler_,
OnDecodingErrorDetected(
QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Limit on number of blocked streams exceeded.")));
auto progressive_decoder2 = CreateProgressiveDecoder( 2);
progressive_decoder2->Decode(data);
}
TEST_P(QpackDecoderTest, InsertCountIncrement) {
std::string input;
ASSERT_TRUE(absl::HexStringToBytes(
"3fe107"
"6294e703626172"
"00",
&input));
DecodeEncoderStreamData(input);
EXPECT_CALL(handler_, OnHeaderDecoded(Eq("foo"), Eq("bar")));
EXPECT_CALL(handler_, OnDecodingCompleted());
std::string expected_data;
ASSERT_TRUE(absl::HexStringToBytes(
"81"
"01",
&expected_data));
EXPECT_CALL(decoder_stream_sender_delegate_,
WriteStreamData(Eq(expected_data)));
ASSERT_TRUE(absl::HexStringToBytes(
"0200"
"80",
&input));
DecodeHeaderBlock(input);
FlushDecoderStream();
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/qpack/qpack_decoder.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/qpack/qpack_decoder_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
be4b5433-ca85-4f8f-92b6-37a1dd883c50 | cpp | tensorflow/tensorflow | graph_rewriters | tensorflow/core/data/service/graph_rewriters.cc | tensorflow/core/data/service/graph_rewriters_test.cc | #include "tensorflow/core/data/service/graph_rewriters.h"
#include <cstdlib>
#include <iterator>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/strings/match.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "absl/types/optional.h"
#include "tensorflow/core/data/rewrite_utils.h"
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/url.h"
#include "tensorflow/core/framework/dataset_options.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/grappler_item_builder.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer.h"
#include "tensorflow/core/grappler/optimizers/data/auto_shard.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/optimizers/data/optimizer_base.h"
#include "tensorflow/core/grappler/optimizers/data/remove_compression_map.h"
#include "tensorflow/core/kernels/data/experimental/auto_shard_dataset_op.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
#include "tensorflow/core/protobuf/device_properties.pb.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace data {
namespace {
using ::tensorflow::data::experimental::AutoShardDatasetOp;
constexpr bool kApplyGeneralGrapplerOptimizations = false;
bool HasDynamicPort(absl::string_view address) {
URL url(address);
return url.has_port() && absl::StartsWith(url.port(), "%port") &&
absl::EndsWith(url.port(), "%");
}
bool ShouldReplaceDynamicPort(absl::string_view config_address,
absl::string_view worker_address) {
URL config_url(config_address), worker_url(worker_address);
return (!config_url.has_port() || HasDynamicPort(config_address)) &&
worker_url.has_port() && config_url.host() == worker_url.host();
}
}
absl::StatusOr<GraphDef>
RemoveCompressionMapRewriter::ApplyRemoveCompressionMapRewrite(
const GraphDef& graph_def) {
grappler::RemoveCompressionMap remove_compression_map;
tensorflow::RewriterConfig::CustomGraphOptimizer config = GetRewriteConfig();
TF_RETURN_IF_ERROR(remove_compression_map.Init(&config));
GraphDef input_graph = graph_def;
TF_ASSIGN_OR_RETURN(std::string dataset_node, GetDatasetNode(input_graph));
std::unique_ptr<tensorflow::grappler::GrapplerItem> grappler_item =
GetGrapplerItem(&input_graph, &dataset_node, false,
kApplyGeneralGrapplerOptimizations);
GraphDef rewritten_graph;
std::unordered_map<std::string, tensorflow::DeviceProperties> device_map;
tensorflow::grappler::VirtualCluster cluster(device_map);
grappler::AutoShard::OptimizationStats stats;
TF_RETURN_IF_ERROR(remove_compression_map.OptimizeAndCollectStats(
&cluster, *grappler_item, &rewritten_graph, &stats));
return rewritten_graph;
}
tensorflow::RewriterConfig::CustomGraphOptimizer
RemoveCompressionMapRewriter::GetRewriteConfig() const {
tensorflow::RewriterConfig::CustomGraphOptimizer config;
config.set_name("tf-data-service-remove-compression-map");
return config;
}
absl::StatusOr<AutoShardRewriter> AutoShardRewriter::Create(
const TaskDef& task_def) {
TF_ASSIGN_OR_RETURN(
AutoShardPolicy auto_shard_policy,
ToAutoShardPolicy(task_def.processing_mode_def().sharding_policy()));
return AutoShardRewriter(auto_shard_policy, task_def.num_workers(),
task_def.worker_index());
}
absl::StatusOr<GraphDef> AutoShardRewriter::ApplyAutoShardRewrite(
const GraphDef& graph_def) {
if (auto_shard_policy_ == AutoShardPolicy::OFF) {
return graph_def;
}
VLOG(2) << "Applying auto-shard policy "
<< AutoShardPolicy_Name(auto_shard_policy_)
<< ". Number of workers: " << num_workers_
<< "; worker index: " << worker_index_ << ".";
grappler::AutoShard autoshard;
tensorflow::RewriterConfig::CustomGraphOptimizer config = GetRewriteConfig();
TF_RETURN_IF_ERROR(autoshard.Init(&config));
GraphDef input_graph = graph_def;
TF_ASSIGN_OR_RETURN(std::string dataset_node, GetDatasetNode(input_graph));
std::unique_ptr<tensorflow::grappler::GrapplerItem> grappler_item =
GetGrapplerItem(&input_graph, &dataset_node, false,
kApplyGeneralGrapplerOptimizations);
GraphDef rewritten_graph;
std::unordered_map<std::string, tensorflow::DeviceProperties> device_map;
tensorflow::grappler::VirtualCluster cluster(device_map);
grappler::AutoShard::OptimizationStats stats;
TF_RETURN_IF_ERROR(autoshard.OptimizeAndCollectStats(
&cluster, *grappler_item, &rewritten_graph, &stats));
return rewritten_graph;
}
AutoShardRewriter::AutoShardRewriter(AutoShardPolicy auto_shard_policy,
int64_t num_workers, int64_t worker_index)
: auto_shard_policy_(auto_shard_policy),
num_workers_(num_workers),
worker_index_(worker_index) {}
tensorflow::RewriterConfig::CustomGraphOptimizer
AutoShardRewriter::GetRewriteConfig() const {
tensorflow::RewriterConfig::CustomGraphOptimizer config;
config.set_name("tf-data-service-auto-shard");
(*config.mutable_parameter_map())[AutoShardDatasetOp::kNumWorkers].set_i(
num_workers_);
(*config.mutable_parameter_map())[AutoShardDatasetOp::kIndex].set_i(
worker_index_);
(*config.mutable_parameter_map())[AutoShardDatasetOp::kAutoShardPolicy].set_i(
auto_shard_policy_);
(*config.mutable_parameter_map())[AutoShardDatasetOp::kNumReplicas].set_i(1);
return config;
}
Status WorkerIndexResolver::ValidateWorker(
absl::string_view worker_address) const {
if (worker_addresses_.empty()) {
return absl::OkStatus();
}
for (absl::string_view config_address : worker_addresses_) {
if (config_address == worker_address ||
ShouldReplaceDynamicPort(config_address, worker_address)) {
return absl::OkStatus();
}
}
return errors::FailedPrecondition(absl::Substitute(
"Failed to assign an index for worker $0. Configured workers list: [$1]. "
"The worker's address is not configured, or other workers are already "
"running at the configured host. If your worker has restarted, make sure "
"it runs at the same address and port.",
worker_address, absl::StrJoin(worker_addresses_, ", ")));
}
void WorkerIndexResolver::AddWorker(absl::string_view worker_address) {
for (std::string& config_address : worker_addresses_) {
if (config_address == worker_address) {
return;
}
if (ShouldReplaceDynamicPort(config_address, worker_address)) {
config_address = std::string(worker_address);
return;
}
}
}
absl::StatusOr<int64_t> WorkerIndexResolver::GetWorkerIndex(
absl::string_view worker_address) const {
const auto it = absl::c_find(worker_addresses_, worker_address);
if (it == worker_addresses_.cend()) {
return errors::NotFound(absl::Substitute(
"Failed to shard dataset in tf.data service: Worker $0 is not in the "
"workers list. Got workers list $1.",
worker_address, absl::StrJoin(worker_addresses_, ",")));
}
return std::distance(worker_addresses_.cbegin(), it);
}
}
} | #include "tensorflow/core/data/service/graph_rewriters.h"
#include <string>
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/framework/dataset_options.pb.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::tensorflow::data::testing::EqualsProto;
using ::tensorflow::data::testing::RangeDatasetWithShardHint;
using ::tensorflow::data::testing::RangeSquareDataset;
using ::tensorflow::testing::IsOkAndHolds;
using ::tensorflow::testing::StatusIs;
using ::testing::HasSubstr;
using ::testing::SizeIs;
absl::StatusOr<NodeDef> GetNode(const GraphDef& graph_def,
absl::string_view name) {
for (const NodeDef& node : graph_def.node()) {
if (node.name() == name) {
return node;
}
}
return errors::NotFound(absl::Substitute("Node $0 not found in graph $1.",
name, graph_def.ShortDebugString()));
}
absl::StatusOr<int64_t> GetValue(const GraphDef& graph_def,
absl::string_view name) {
for (const NodeDef& node : graph_def.node()) {
if (node.name() == name) {
return node.attr().at("value").tensor().int64_val()[0];
}
}
return errors::NotFound(absl::Substitute("Node $0 not found in graph $1.",
name, graph_def.ShortDebugString()));
}
TaskDef GetTaskDef(const ProcessingModeDef::ShardingPolicy sharding_policy,
const int64_t num_workers, const int64_t worker_index) {
TaskDef task_def;
task_def.mutable_processing_mode_def()->set_sharding_policy(sharding_policy);
task_def.set_num_workers(num_workers);
task_def.set_worker_index(worker_index);
return task_def;
}
TEST(AutoShardRewriterTest, AutoShard) {
TaskDef task_def = GetTaskDef(ProcessingModeDef::FILE_OR_DATA,
3, 1);
TF_ASSERT_OK_AND_ASSIGN(AutoShardRewriter rewriter,
AutoShardRewriter::Create(task_def));
DatasetDef dataset = RangeSquareDataset(10);
TF_ASSERT_OK_AND_ASSIGN(GraphDef rewritten_graph,
rewriter.ApplyAutoShardRewrite(dataset.graph()));
TF_ASSERT_OK_AND_ASSIGN(NodeDef shard_node,
GetNode(rewritten_graph, "ShardDataset"));
ASSERT_THAT(shard_node.input(), SizeIs(3));
EXPECT_THAT(GetValue(rewritten_graph, shard_node.input(1)), IsOkAndHolds(3));
EXPECT_THAT(GetValue(rewritten_graph, shard_node.input(2)), IsOkAndHolds(1));
}
TEST(AutoShardRewriterTest, ShardByData) {
TaskDef task_def = GetTaskDef(ProcessingModeDef::DATA, 3,
1);
TF_ASSERT_OK_AND_ASSIGN(AutoShardRewriter rewriter,
AutoShardRewriter::Create(task_def));
DatasetDef dataset = RangeSquareDataset(10);
TF_ASSERT_OK_AND_ASSIGN(GraphDef rewritten_graph,
rewriter.ApplyAutoShardRewrite(dataset.graph()));
TF_ASSERT_OK_AND_ASSIGN(NodeDef shard_node,
GetNode(rewritten_graph, "ShardDataset"));
ASSERT_THAT(shard_node.input(), SizeIs(3));
EXPECT_THAT(GetValue(rewritten_graph, shard_node.input(1)), IsOkAndHolds(3));
EXPECT_THAT(GetValue(rewritten_graph, shard_node.input(2)), IsOkAndHolds(1));
}
TEST(AutoShardRewriterTest, ShardByFile) {
TaskDef task_def = GetTaskDef(ProcessingModeDef::FILE, 3,
1);
TF_ASSERT_OK_AND_ASSIGN(AutoShardRewriter rewriter,
AutoShardRewriter::Create(task_def));
DatasetDef dataset = RangeSquareDataset(10);
EXPECT_THAT(rewriter.ApplyAutoShardRewrite(dataset.graph()),
StatusIs(error::NOT_FOUND,
HasSubstr("Found an unshardable source dataset")));
}
TEST(AutoShardRewriterTest, ShardByHint) {
TaskDef task_def = GetTaskDef(ProcessingModeDef::HINT, 3,
1);
TF_ASSERT_OK_AND_ASSIGN(AutoShardRewriter rewriter,
AutoShardRewriter::Create(task_def));
DatasetDef dataset = RangeDatasetWithShardHint(10);
TF_ASSERT_OK_AND_ASSIGN(GraphDef rewritten_graph,
rewriter.ApplyAutoShardRewrite(dataset.graph()));
TF_ASSERT_OK_AND_ASSIGN(NodeDef shard_node,
GetNode(rewritten_graph, "ShardDataset"));
ASSERT_THAT(shard_node.input(), SizeIs(3));
EXPECT_THAT(GetValue(rewritten_graph, shard_node.input(1)), IsOkAndHolds(3));
EXPECT_THAT(GetValue(rewritten_graph, shard_node.input(2)), IsOkAndHolds(1));
}
TEST(AutoShardRewriterTest, NoShard) {
TaskDef task_def =
GetTaskDef(ProcessingModeDef::OFF, 3, 1);
TF_ASSERT_OK_AND_ASSIGN(AutoShardRewriter rewriter,
AutoShardRewriter::Create(task_def));
DatasetDef dataset = RangeSquareDataset(10);
EXPECT_THAT(rewriter.ApplyAutoShardRewrite(dataset.graph()),
IsOkAndHolds(EqualsProto(dataset.graph())));
}
TEST(AutoShardRewriterTest, EmptyDataset) {
TaskDef task_def =
GetTaskDef(ProcessingModeDef::FILE_OR_DATA, 3,
1);
TF_ASSERT_OK_AND_ASSIGN(AutoShardRewriter rewriter,
AutoShardRewriter::Create(task_def));
DatasetDef dataset = RangeSquareDataset(0);
TF_ASSERT_OK_AND_ASSIGN(GraphDef rewritten_graph,
rewriter.ApplyAutoShardRewrite(dataset.graph()));
TF_ASSERT_OK_AND_ASSIGN(NodeDef shard_node,
GetNode(rewritten_graph, "ShardDataset"));
ASSERT_THAT(shard_node.input(), SizeIs(3));
EXPECT_THAT(GetValue(rewritten_graph, shard_node.input(1)), IsOkAndHolds(3));
EXPECT_THAT(GetValue(rewritten_graph, shard_node.input(2)), IsOkAndHolds(1));
}
TEST(AutoShardRewriterTest, NoWorkers) {
TaskDef task_def =
GetTaskDef(ProcessingModeDef::FILE_OR_DATA, 0,
0);
TF_ASSERT_OK_AND_ASSIGN(AutoShardRewriter rewriter,
AutoShardRewriter::Create(task_def));
DatasetDef dataset = RangeSquareDataset(10);
EXPECT_THAT(rewriter.ApplyAutoShardRewrite(dataset.graph()),
StatusIs(error::INVALID_ARGUMENT,
"num_workers should be >= 1, currently 0"));
}
TEST(AutoShardRewriterTest, NoWorkersWhenShardIsOff) {
TaskDef task_def =
GetTaskDef(ProcessingModeDef::OFF, 0, 0);
TF_ASSERT_OK_AND_ASSIGN(AutoShardRewriter rewriter,
AutoShardRewriter::Create(task_def));
DatasetDef dataset = RangeSquareDataset(10);
EXPECT_THAT(rewriter.ApplyAutoShardRewrite(dataset.graph()),
IsOkAndHolds(EqualsProto(dataset.graph())));
}
TEST(AutoShardRewriterTest, WorkerIndexOutOfRange) {
TaskDef task_def =
GetTaskDef(ProcessingModeDef::FILE_OR_DATA, 2,
5);
TF_ASSERT_OK_AND_ASSIGN(AutoShardRewriter rewriter,
AutoShardRewriter::Create(task_def));
DatasetDef dataset = RangeSquareDataset(10);
EXPECT_THAT(rewriter.ApplyAutoShardRewrite(dataset.graph()),
StatusIs(error::INVALID_ARGUMENT,
"index should be >= 0 and < 2, currently 5"));
}
TEST(WorkerIndexResolverTest, AddOneWorker) {
WorkerIndexResolver resolver(std::vector<std::string>{"localhost"});
EXPECT_THAT(resolver.GetWorkerIndex("localhost:12345"),
StatusIs(error::NOT_FOUND));
TF_EXPECT_OK(resolver.ValidateWorker("localhost:12345"));
resolver.AddWorker("localhost:12345");
EXPECT_THAT(resolver.GetWorkerIndex("localhost:12345"), IsOkAndHolds(0));
}
TEST(WorkerIndexResolverTest, AddMultipleWorkers) {
WorkerIndexResolver resolver(std::vector<std::string>{
"/worker/task/0", "/worker/task/1", "/worker/task/2"});
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/2:12345"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/1:23456"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/0:34567"));
resolver.AddWorker("/worker/task/2:12345");
resolver.AddWorker("/worker/task/1:23456");
resolver.AddWorker("/worker/task/0:34567");
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/0:34567"), IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/1:23456"), IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/2:12345"), IsOkAndHolds(2));
}
TEST(WorkerIndexResolverTest, NamedPorts) {
WorkerIndexResolver resolver(
std::vector<std::string>{"/worker/task/0:worker", "/worker/task/1:worker",
"/worker/task/2:worker"});
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/2:worker"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/1:worker"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/0:worker"));
resolver.AddWorker("/worker/task/2:worker");
resolver.AddWorker("/worker/task/1:worker");
resolver.AddWorker("/worker/task/0:worker");
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/0:worker"),
IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/1:worker"),
IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/2:worker"),
IsOkAndHolds(2));
}
TEST(WorkerIndexResolverTest, DynamicPorts) {
WorkerIndexResolver resolver(std::vector<std::string>{
"/worker/task/0:%port_worker%", "/worker/task/1:%port_worker%",
"/worker/task/2:%port_worker%"});
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/2:worker"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/1:worker"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/0:worker"));
resolver.AddWorker("/worker/task/2:worker");
resolver.AddWorker("/worker/task/1:worker");
resolver.AddWorker("/worker/task/0:worker");
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/0:worker"),
IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/1:worker"),
IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/2:worker"),
IsOkAndHolds(2));
}
TEST(WorkerIndexResolverTest, AnonymousPorts) {
WorkerIndexResolver resolver(
std::vector<std::string>{"/worker/task/0:%port%", "/worker/task/1:%port%",
"/worker/task/2:%port%"});
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/2:10000"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/1:10001"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/0:10002"));
resolver.AddWorker("/worker/task/2:10000");
resolver.AddWorker("/worker/task/1:10001");
resolver.AddWorker("/worker/task/0:10002");
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/0:10002"), IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/1:10001"), IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/2:10000"), IsOkAndHolds(2));
}
TEST(WorkerIndexResolverTest, NumericPorts) {
WorkerIndexResolver resolver(std::vector<std::string>{
"/worker/task/0:12345", "/worker/task/1:23456", "/worker/task/2:34567"});
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/0:12345"), IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/1:23456"), IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/2:34567"), IsOkAndHolds(2));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/2:34567"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/1:23456"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/0:12345"));
resolver.AddWorker("/worker/task/2:34567");
resolver.AddWorker("/worker/task/1:23456");
resolver.AddWorker("/worker/task/0:12345");
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/0:12345"), IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/1:23456"), IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/2:34567"), IsOkAndHolds(2));
}
TEST(WorkerIndexResolverTest, IPv6Addresses) {
WorkerIndexResolver resolver(std::vector<std::string>{
"[1080:0:0:0:8:800:200C:417A]", "[1080:0:0:0:8:800:200C:417B]",
"[1080:0:0:0:8:800:200C:417C]"});
TF_EXPECT_OK(resolver.ValidateWorker("[1080:0:0:0:8:800:200C:417A]:12345"));
TF_EXPECT_OK(resolver.ValidateWorker("[1080:0:0:0:8:800:200C:417B]:23456"));
TF_EXPECT_OK(resolver.ValidateWorker("[1080:0:0:0:8:800:200C:417C]:34567"));
resolver.AddWorker("[1080:0:0:0:8:800:200C:417A]:12345");
resolver.AddWorker("[1080:0:0:0:8:800:200C:417B]:23456");
resolver.AddWorker("[1080:0:0:0:8:800:200C:417C]:34567");
EXPECT_THAT(resolver.GetWorkerIndex("[1080:0:0:0:8:800:200C:417A]:12345"),
IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("[1080:0:0:0:8:800:200C:417B]:23456"),
IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("[1080:0:0:0:8:800:200C:417C]:34567"),
IsOkAndHolds(2));
}
TEST(WorkerIndexResolverTest, IPv6AddressesWithDynamicPort) {
WorkerIndexResolver resolver(
std::vector<std::string>{"[1080:0:0:0:8:800:200C:417A]:%port%",
"[1080:0:0:0:8:800:200C:417B]:%port%",
"[1080:0:0:0:8:800:200C:417C]:%port%"});
TF_EXPECT_OK(resolver.ValidateWorker("[1080:0:0:0:8:800:200C:417A]:12345"));
TF_EXPECT_OK(resolver.ValidateWorker("[1080:0:0:0:8:800:200C:417B]:23456"));
TF_EXPECT_OK(resolver.ValidateWorker("[1080:0:0:0:8:800:200C:417C]:34567"));
resolver.AddWorker("[1080:0:0:0:8:800:200C:417A]:12345");
resolver.AddWorker("[1080:0:0:0:8:800:200C:417B]:23456");
resolver.AddWorker("[1080:0:0:0:8:800:200C:417C]:34567");
EXPECT_THAT(resolver.GetWorkerIndex("[1080:0:0:0:8:800:200C:417A]:12345"),
IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("[1080:0:0:0:8:800:200C:417B]:23456"),
IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("[1080:0:0:0:8:800:200C:417C]:34567"),
IsOkAndHolds(2));
}
TEST(WorkerIndexResolverTest, AddressesWithProtocols) {
WorkerIndexResolver resolver(std::vector<std::string>{
"http:
TF_EXPECT_OK(resolver.ValidateWorker("http:
TF_EXPECT_OK(resolver.ValidateWorker("http:
TF_EXPECT_OK(resolver.ValidateWorker("http:
resolver.AddWorker("http:
resolver.AddWorker("http:
resolver.AddWorker("http:
EXPECT_THAT(resolver.GetWorkerIndex("http:
IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("http:
IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("http:
IsOkAndHolds(2));
}
TEST(WorkerIndexResolverTest, AddressesWithProtocolsAndDynamicPorts) {
WorkerIndexResolver resolver(std::vector<std::string>{
"http:
"http:
TF_EXPECT_OK(resolver.ValidateWorker("http:
TF_EXPECT_OK(resolver.ValidateWorker("http:
TF_EXPECT_OK(resolver.ValidateWorker("http:
resolver.AddWorker("http:
resolver.AddWorker("http:
resolver.AddWorker("http:
EXPECT_THAT(resolver.GetWorkerIndex("http:
IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("http:
IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("http:
IsOkAndHolds(2));
}
TEST(WorkerIndexResolverTest, HostNameHasColons) {
WorkerIndexResolver resolver(
std::vector<std::string>{":worker:task:0:%port%", ":worker:task:1:%port%",
":worker:task:2:34567"});
TF_EXPECT_OK(resolver.ValidateWorker(":worker:task:0:12345"));
TF_EXPECT_OK(resolver.ValidateWorker(":worker:task:1:23456"));
TF_EXPECT_OK(resolver.ValidateWorker(":worker:task:2:34567"));
resolver.AddWorker(":worker:task:0:12345");
resolver.AddWorker(":worker:task:1:23456");
resolver.AddWorker(":worker:task:2:34567");
EXPECT_THAT(resolver.GetWorkerIndex(":worker:task:0:12345"), IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex(":worker:task:1:23456"), IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex(":worker:task:2:34567"), IsOkAndHolds(2));
}
TEST(WorkerIndexResolverTest, ChangeWorkerPort) {
WorkerIndexResolver resolver(std::vector<std::string>{
"/worker/task/0", "/worker/task/1", "/worker/task/2"});
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/2:12345"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/1:23456"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/0:34567"));
resolver.AddWorker("/worker/task/2:12345");
resolver.AddWorker("/worker/task/1:23456");
resolver.AddWorker("/worker/task/0:34567");
EXPECT_THAT(resolver.ValidateWorker("/worker/task/0:99999"),
StatusIs(error::FAILED_PRECONDITION,
HasSubstr("already running at the configured host")));
EXPECT_THAT(resolver.ValidateWorker("/worker/task/1:99999"),
StatusIs(error::FAILED_PRECONDITION,
HasSubstr("already running at the configured host")));
EXPECT_THAT(resolver.ValidateWorker("/worker/task/2:99999"),
StatusIs(error::FAILED_PRECONDITION,
HasSubstr("already running at the configured host")));
}
TEST(WorkerIndexResolverTest, WorkerNotFound) {
WorkerIndexResolver resolver(std::vector<std::string>{
"/worker/task/0", "/worker/task/1", "/worker/task/2"});
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/0:34567"),
StatusIs(error::NOT_FOUND));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/1:23456"),
StatusIs(error::NOT_FOUND));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/2:12345"),
StatusIs(error::NOT_FOUND));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/3:45678"),
StatusIs(error::NOT_FOUND));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/2:12345"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/1:23456"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/0:34567"));
EXPECT_THAT(resolver.ValidateWorker("/worker/task/3:45678"),
StatusIs(error::FAILED_PRECONDITION,
HasSubstr("The worker's address is not configured")));
resolver.AddWorker("/worker/task/3:45678");
resolver.AddWorker("/worker/task/2:12345");
resolver.AddWorker("/worker/task/1:23456");
resolver.AddWorker("/worker/task/0:34567");
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/0:34567"), IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/1:23456"), IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/2:12345"), IsOkAndHolds(2));
EXPECT_THAT(
resolver.GetWorkerIndex("/worker/task/3:45678"),
StatusIs(error::NOT_FOUND,
HasSubstr(
"Worker /worker/task/3:45678 is not in the workers list.")));
}
TEST(WorkerIndexResolverTest, MultipleWorkersInOneHost) {
WorkerIndexResolver resolver(
std::vector<std::string>{"localhost", "localhost", "localhost"});
TF_EXPECT_OK(resolver.ValidateWorker("localhost:12345"));
resolver.AddWorker("localhost:12345");
TF_EXPECT_OK(resolver.ValidateWorker("localhost:23456"));
resolver.AddWorker("localhost:23456");
TF_EXPECT_OK(resolver.ValidateWorker("localhost:34567"));
resolver.AddWorker("localhost:34567");
EXPECT_THAT(resolver.GetWorkerIndex("localhost:12345"), IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("localhost:23456"), IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("localhost:34567"), IsOkAndHolds(2));
}
TEST(WorkerIndexResolverTest, MoreWorkersThanConfigured) {
WorkerIndexResolver resolver(std::vector<std::string>{
"localhost:%port%", "localhost:%port%", "localhost:%port%"});
TF_EXPECT_OK(resolver.ValidateWorker("localhost:12345"));
resolver.AddWorker("localhost:12345");
TF_EXPECT_OK(resolver.ValidateWorker("localhost:23456"));
resolver.AddWorker("localhost:23456");
TF_EXPECT_OK(resolver.ValidateWorker("localhost:34567"));
resolver.AddWorker("localhost:34567");
TF_EXPECT_OK(resolver.ValidateWorker("localhost:12345"));
resolver.AddWorker("localhost:12345");
TF_EXPECT_OK(resolver.ValidateWorker("localhost:23456"));
resolver.AddWorker("localhost:23456");
TF_EXPECT_OK(resolver.ValidateWorker("localhost:34567"));
resolver.AddWorker("localhost:34567");
EXPECT_THAT(resolver.ValidateWorker("localhost:45678"),
StatusIs(error::FAILED_PRECONDITION,
HasSubstr("already running at the configured host")));
EXPECT_THAT(resolver.ValidateWorker("localhost:56789"),
StatusIs(error::FAILED_PRECONDITION,
HasSubstr("already running at the configured host")));
}
TEST(WorkerIndexResolverTest, WorkerNotConfigured) {
WorkerIndexResolver resolver(std::vector<std::string>{""});
EXPECT_THAT(resolver.GetWorkerIndex("localhost:12345"),
StatusIs(error::NOT_FOUND));
EXPECT_THAT(resolver.ValidateWorker("localhost:12345"),
StatusIs(error::FAILED_PRECONDITION,
HasSubstr("The worker's address is not configured")));
resolver.AddWorker("localhost:12345");
EXPECT_THAT(resolver.GetWorkerIndex("localhost:12345"),
StatusIs(error::NOT_FOUND));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/graph_rewriters.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/graph_rewriters_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
747b49b9-8414-4351-bedf-958d67aaa510 | cpp | tensorflow/tensorflow | convert | tensorflow/lite/delegates/gpu/common/convert.cc | third_party/xla/xla/tests/convert_test.cc | #include "tensorflow/lite/delegates/gpu/common/convert.h"
#include <stdint.h>
#include <string.h>
#include <string>
#include <vector>
#include "fp16.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/common/util.h"
namespace tflite {
namespace gpu {
namespace {
constexpr int kPhwc4ChannelsInPlane = 4;
constexpr int kPhwo4i4ChannelsInPlane = 4;
constexpr int kPiohw4ChannelsInPlane = 4;
absl::Status ConvertToPHWO4I4(absl::Span<const float> in, const OHWI& shape,
absl::Span<float> out, bool reverse_space) {
if (in.size() != shape.DimensionsProduct()) {
return absl::InvalidArgumentError(absl::StrCat(
"ConvertToPHWO4I4: Input data size does not match expected size: ",
in.size(), " != ", shape.DimensionsProduct()));
}
if (out.size() != GetElementsSizeForPHWO4I4(shape)) {
return absl::InvalidArgumentError(absl::StrCat(
"ConvertToPHWO4I4: Output data size does not match expected size: ",
out.size(), " != ", GetElementsSizeForPHWO4I4(shape)));
}
float* output = out.data();
for (int p = 0; p < DivideRoundUp(shape.o, kPhwo4i4ChannelsInPlane); ++p) {
for (int h = 0; h < shape.h; ++h) {
for (int w = 0; w < shape.w; ++w) {
for (int c = 0; c < DivideRoundUp(shape.i, kPhwo4i4ChannelsInPlane);
++c) {
for (int co = 0; co < kPhwo4i4ChannelsInPlane; ++co) {
for (int ci = 0; ci < kPhwo4i4ChannelsInPlane; ++ci) {
float value = 0;
if (c * kPhwo4i4ChannelsInPlane + ci < shape.i &&
p * kPhwo4i4ChannelsInPlane + co < shape.o) {
int tensor_o = p * kPhwo4i4ChannelsInPlane + co;
int tensor_i = c * kPhwo4i4ChannelsInPlane + ci;
const int in_h = reverse_space ? shape.h - 1 - h : h;
const int in_w = reverse_space ? shape.w - 1 - w : w;
value = in[shape.LinearIndex({tensor_o, in_h, in_w, tensor_i})];
}
(*output++) = value;
}
}
}
}
}
}
return absl::OkStatus();
}
}
uint32_t GetElementsSizeForPHWO4I4(const OHWI& shape) {
return AlignByN(shape.i, kPhwo4i4ChannelsInPlane) *
AlignByN(shape.o, kPhwo4i4ChannelsInPlane) * shape.h * shape.w;
}
uint32_t GetElementsSizeForPHWO4I4(const IHWO& shape) {
return AlignByN(shape.i, kPhwo4i4ChannelsInPlane) *
AlignByN(shape.o, kPhwo4i4ChannelsInPlane) * shape.h * shape.w;
}
std::vector<float> ConvertToPHWO4I4(
const Tensor<OHWI, DataType::FLOAT32>& tensor) {
std::vector<float> transposed(GetElementsSizeForPHWO4I4(tensor.shape));
ConvertToPHWO4I4(tensor.data, tensor.shape,
absl::MakeSpan(transposed.data(), transposed.size()),
false)
.IgnoreError();
return transposed;
}
std::vector<float> ConvertToPHWO4I4Transposed(
const Tensor<OHWI, DataType::FLOAT32>& tensor) {
std::vector<float> transposed(GetElementsSizeForPHWO4I4(tensor.shape));
ConvertToPHWO4I4(tensor.data, tensor.shape,
absl::MakeSpan(transposed.data(), transposed.size()),
true)
.IgnoreError();
return transposed;
}
uint3 Get3DSizeForPHWO4I4(const OHWI& shape) {
return uint3(AlignByN(shape.i, 4), shape.h * shape.w,
DivideRoundUp(shape.o, 4));
}
absl::Status ConvertToPHWO4I4(absl::Span<const float> in, const IHWO& shape,
absl::Span<float> out) {
if (in.size() != shape.DimensionsProduct()) {
return absl::InvalidArgumentError(absl::StrCat(
"ConvertToPHWO4I4: Input data size does not match expected size: ",
in.size(), " != ", shape.DimensionsProduct()));
}
if (out.size() != GetElementsSizeForPHWO4I4(shape)) {
return absl::InvalidArgumentError(absl::StrCat(
"ConvertToPHWO4I4: Output data size does not match expected size: ",
out.size(), " != ", GetElementsSizeForPHWO4I4(shape)));
}
const int dst_depth = DivideRoundUp(shape.o, 4);
const int src_depth = DivideRoundUp(shape.i, 4);
float* output = out.data();
for (int f = 0; f < dst_depth; ++f) {
for (int y = 0; y < shape.h; ++y) {
for (int x = 0; x < shape.w; ++x) {
for (int ch = 0; ch < src_depth; ++ch) {
for (int co = 0; co < 4; ++co) {
for (int ci = 0; ci < 4; ++ci) {
const int src_channel = ch * 4 + ci;
const int dst_channel = f * 4 + co;
float value = 0;
if (src_channel < shape.i && dst_channel < shape.o) {
value = in[shape.LinearIndex({src_channel, y, x, dst_channel})];
}
(*output++) = value;
}
}
}
}
}
}
return absl::OkStatus();
}
std::vector<float> ConvertToPHWO4I4(
const Tensor<IHWO, DataType::FLOAT32>& tensor) {
std::vector<float> transposed(GetElementsSizeForPHWO4I4(tensor.shape));
ConvertToPHWO4I4(tensor.data, tensor.shape,
absl::MakeSpan(transposed.data(), transposed.size()))
.IgnoreError();
return transposed;
}
uint32_t GetElementsSizeForPIOHW4(const OHWI& shape) {
return AlignByN(shape.o * shape.i, kPiohw4ChannelsInPlane) * shape.h *
shape.w;
}
absl::Status ConvertToPIOHW4(absl::Span<const float> in, const OHWI& shape,
absl::Span<float> out) {
if (in.size() != shape.DimensionsProduct()) {
return absl::InvalidArgumentError(absl::StrCat(
"ConvertToPIOHW4: Input data size does not match expected size: ",
in.size(), " != ", shape.DimensionsProduct()));
}
if (out.size() != GetElementsSizeForPIOHW4(shape)) {
return absl::InvalidArgumentError(absl::StrCat(
"ConvertToPIOHW4: Output data size does not match expected size: ",
out.size(), " != ", GetElementsSizeForPIOHW4(shape)));
}
int32_t output_channels = shape.o * shape.i;
int32_t num_planes = DivideRoundUp(output_channels, kPiohw4ChannelsInPlane);
float* output = out.data();
for (int p = 0; p < num_planes; ++p) {
for (int h = 0; h < shape.h; ++h) {
for (int w = 0; w < shape.w; ++w) {
for (int c = 0; c < kPiohw4ChannelsInPlane; ++c) {
int output_c = p * kPiohw4ChannelsInPlane + c;
(*output++) = output_c >= output_channels
? 0
: in[shape.LinearIndex({output_c % shape.o, h, w,
output_c / shape.o})];
}
}
}
}
return absl::OkStatus();
}
std::vector<float> ConvertToPIOHW4(
const Tensor<OHWI, DataType::FLOAT32>& tensor) {
std::vector<float> transposed(GetElementsSizeForPIOHW4(tensor.shape));
ConvertToPIOHW4(tensor.data, tensor.shape,
absl::MakeSpan(transposed.data(), transposed.size()))
.IgnoreError();
return transposed;
}
template <typename T>
absl::Status ValidateConvertToPHWC4(absl::Span<const float> in,
const BHWC& shape, absl::Span<T> out) {
if (in.size() != shape.DimensionsProduct()) {
return absl::InvalidArgumentError(absl::StrCat(
"ConvertToPHWC4: Input data size does not match expected size: ",
in.size(), " != ", shape.DimensionsProduct()));
}
if (out.size() != GetElementsSizeForPHWC4(shape)) {
return absl::InvalidArgumentError(absl::StrCat(
"ConvertToPHWC4: Output data size does not match expected size: ",
out.size(), " != ", GetElementsSizeForPHWC4(shape)));
}
return absl::OkStatus();
}
absl::Status ConvertToPHWC4(absl::Span<const float> in, const BHWC& shape,
absl::Span<float> out) {
RETURN_IF_ERROR(ValidateConvertToPHWC4(in, shape, out));
if (shape.c == 4) {
std::memcpy(out.data(), in.data(),
shape.DimensionsProduct() * sizeof(float));
return absl::OkStatus();
}
int num_planes = DivideRoundUp(shape.c, kPhwc4ChannelsInPlane);
const int num_pixels = shape.h * shape.w;
const int num_full_planes = shape.c / kPhwc4ChannelsInPlane;
for (int b = 0; b < shape.b; b++) {
float* dest =
out.data() + b * num_pixels * num_planes * kPhwc4ChannelsInPlane;
for (int p = 0; p < num_full_planes; p++) {
const float* src =
in.data() + shape.LinearIndex({b, 0, 0, p * kPhwc4ChannelsInPlane});
for (int i = 0; i < num_pixels; i++) {
std::memcpy(dest, src, kPhwc4ChannelsInPlane * sizeof(float));
src += shape.c;
dest += kPhwc4ChannelsInPlane;
}
}
}
const int padded_size = num_pixels * num_planes * kPhwc4ChannelsInPlane;
const int remaining_channels =
shape.c - num_full_planes * kPhwc4ChannelsInPlane;
if (remaining_channels == 0) {
return absl::OkStatus();
}
for (int b = 0; b < shape.b; b++) {
const float* src =
in.data() +
shape.LinearIndex({b, 0, 0, num_full_planes * kPhwc4ChannelsInPlane});
float* dest = out.data() + b * padded_size +
num_pixels * num_full_planes * kPhwc4ChannelsInPlane;
for (int p = 0; p < num_pixels; p++) {
std::memcpy(dest, src, remaining_channels * sizeof(float));
std::memset(dest + remaining_channels, 0,
(4 - remaining_channels) * sizeof(float));
src += shape.c;
dest += kPhwc4ChannelsInPlane;
}
}
return absl::OkStatus();
}
absl::Status ConvertToPHWC4Half(absl::Span<const float> in, const BHWC& shape,
absl::Span<HalfBits> out) {
RETURN_IF_ERROR(ValidateConvertToPHWC4(in, shape, out));
int num_planes = DivideRoundUp(shape.c, kPhwc4ChannelsInPlane);
const int num_pixels = shape.h * shape.w;
const int num_full_planes = shape.c / kPhwc4ChannelsInPlane;
for (int b = 0; b < shape.b; b++) {
HalfBits* dest =
out.data() + b * num_pixels * num_planes * kPhwc4ChannelsInPlane;
for (int p = 0; p < num_full_planes; p++) {
const float* src =
in.data() + shape.LinearIndex({b, 0, 0, p * kPhwc4ChannelsInPlane});
for (int i = 0; i < num_pixels; i++) {
dest[0] = fp16_ieee_from_fp32_value(src[0]);
dest[1] = fp16_ieee_from_fp32_value(src[1]);
dest[2] = fp16_ieee_from_fp32_value(src[2]);
dest[3] = fp16_ieee_from_fp32_value(src[3]);
src += shape.c;
dest += kPhwc4ChannelsInPlane;
}
}
}
const int padded_size = num_pixels * num_planes * kPhwc4ChannelsInPlane;
const int remaining_channels =
shape.c - num_full_planes * kPhwc4ChannelsInPlane;
if (remaining_channels == 0) {
return absl::OkStatus();
}
for (int b = 0; b < shape.b; b++) {
const float* src =
in.data() +
shape.LinearIndex({b, 0, 0, num_full_planes * kPhwc4ChannelsInPlane});
HalfBits* dest = out.data() + b * padded_size +
num_pixels * num_full_planes * kPhwc4ChannelsInPlane;
switch (remaining_channels) {
case 1:
for (int p = 0; p < num_pixels; p++) {
dest[0] = fp16_ieee_from_fp32_value(src[0]);
dest[1] = 0;
dest[2] = 0;
dest[3] = 0;
src += shape.c;
dest += kPhwc4ChannelsInPlane;
}
break;
case 2:
for (int p = 0; p < num_pixels; p++) {
dest[0] = fp16_ieee_from_fp32_value(src[0]);
dest[1] = fp16_ieee_from_fp32_value(src[1]);
dest[2] = 0;
dest[3] = 0;
src += shape.c;
dest += kPhwc4ChannelsInPlane;
}
break;
case 3:
for (int p = 0; p < num_pixels; p++) {
dest[0] = fp16_ieee_from_fp32_value(src[0]);
dest[1] = fp16_ieee_from_fp32_value(src[1]);
dest[2] = fp16_ieee_from_fp32_value(src[2]);
dest[3] = 0;
src += shape.c;
dest += kPhwc4ChannelsInPlane;
}
break;
default:
return absl::UnimplementedError(
"ConvertToPHWC4Half: Unsupported channels per planes count.");
}
}
return absl::OkStatus();
}
std::vector<float> ConvertToPHWC4(
const Tensor<BHWC, DataType::FLOAT32>& tensor) {
std::vector<float> transposed(GetElementsSizeForPHWC4(tensor.shape));
ConvertToPHWC4(tensor.data, tensor.shape,
absl::MakeSpan(transposed.data(), transposed.size()))
.IgnoreError();
return transposed;
}
std::vector<float> ConvertToPHWC4(
const Tensor<HWC, DataType::FLOAT32>& tensor) {
const BHWC batched_shape =
BHWC(1, tensor.shape.h, tensor.shape.w, tensor.shape.c);
std::vector<float> transposed(GetElementsSizeForPHWC4(batched_shape));
ConvertToPHWC4(tensor.data, batched_shape,
absl::MakeSpan(transposed.data(), transposed.size()))
.IgnoreError();
return transposed;
}
uint32_t GetElementsSizeForPHWC4(const BHWC& shape) {
return shape.b * shape.h * shape.w * AlignByN(shape.c, kPhwc4ChannelsInPlane);
}
template <typename T>
absl::Status ValidateConvertFromPHWC4(absl::Span<const T> in, const BHWC& shape,
absl::Span<float> out) {
if (in.size() != GetElementsSizeForPHWC4(shape)) {
return absl::InvalidArgumentError(absl::StrCat(
"ConvertFromPHWC4: Input data size does not match expected size: ",
in.size(), " != ", GetElementsSizeForPHWC4(shape)));
}
if (out.size() != shape.DimensionsProduct()) {
return absl::InvalidArgumentError(absl::StrCat(
"ConvertFromPHWC4: Output data size does not match expected size: ",
out.size(), " != ", shape.DimensionsProduct()));
}
return absl::OkStatus();
}
absl::Status ConvertFromPHWC4(absl::Span<const float> in, const BHWC& shape,
absl::Span<float> out) {
RETURN_IF_ERROR(ValidateConvertFromPHWC4(in, shape, out));
if (shape.c == 4) {
std::memcpy(out.data(), in.data(),
shape.DimensionsProduct() * sizeof(float));
return absl::OkStatus();
}
int num_planes = DivideRoundUp(shape.c, kPhwc4ChannelsInPlane);
const int num_pixels = shape.h * shape.w;
const int padded_size = num_pixels * num_planes * kPhwc4ChannelsInPlane;
const int num_full_planes = shape.c / kPhwc4ChannelsInPlane;
for (int b = 0; b < shape.b; b++) {
const float* src = in.data() + b * padded_size;
for (int p = 0; p < num_full_planes; p++) {
float* dest =
out.data() + shape.LinearIndex({b, 0, 0, p * kPhwc4ChannelsInPlane});
for (int i = 0; i < num_pixels; i++) {
std::memcpy(dest, src, kPhwc4ChannelsInPlane * sizeof(float));
src += kPhwc4ChannelsInPlane;
dest += shape.c;
}
}
}
const int remaining_channels =
shape.c - num_full_planes * kPhwc4ChannelsInPlane;
if (remaining_channels == 0) {
return absl::OkStatus();
}
for (int b = 0; b < shape.b; b++) {
const float* src = in.data() + b * padded_size +
num_pixels * num_full_planes * kPhwc4ChannelsInPlane;
float* dest =
out.data() +
shape.LinearIndex({b, 0, 0, num_full_planes * kPhwc4ChannelsInPlane});
for (int p = 0; p < num_pixels; p++) {
std::memcpy(dest, src, remaining_channels * sizeof(float));
src += kPhwc4ChannelsInPlane;
dest += shape.c;
}
}
return absl::OkStatus();
}
absl::Status ConvertFromPHWC4Half(absl::Span<const HalfBits> in,
const BHWC& shape, absl::Span<float> out) {
RETURN_IF_ERROR(ValidateConvertFromPHWC4(in, shape, out));
int num_planes = DivideRoundUp(shape.c, kPhwc4ChannelsInPlane);
const int num_pixels = shape.h * shape.w;
const int padded_size = num_pixels * num_planes * kPhwc4ChannelsInPlane;
const int num_full_planes = shape.c / kPhwc4ChannelsInPlane;
for (int b = 0; b < shape.b; b++) {
const HalfBits* src = in.data() + b * padded_size;
for (int p = 0; p < num_full_planes; p++) {
float* dest =
out.data() + shape.LinearIndex({b, 0, 0, p * kPhwc4ChannelsInPlane});
for (int i = 0; i < num_pixels; i++) {
dest[0] = fp16_ieee_to_fp32_value(src[0]);
dest[1] = fp16_ieee_to_fp32_value(src[1]);
dest[2] = fp16_ieee_to_fp32_value(src[2]);
dest[3] = fp16_ieee_to_fp32_value(src[3]);
src += kPhwc4ChannelsInPlane;
dest += shape.c;
}
}
}
const int remaining_channels =
shape.c - num_full_planes * kPhwc4ChannelsInPlane;
if (remaining_channels == 0) {
return absl::OkStatus();
}
for (int b = 0; b < shape.b; b++) {
const HalfBits* src = in.data() + b * padded_size +
num_pixels * num_full_planes * kPhwc4ChannelsInPlane;
float* dest =
out.data() +
shape.LinearIndex({b, 0, 0, num_full_planes * kPhwc4ChannelsInPlane});
switch (remaining_channels) {
case 1:
for (int p = 0; p < num_pixels; p++) {
dest[0] = fp16_ieee_to_fp32_value(src[0]);
src += kPhwc4ChannelsInPlane;
dest += shape.c;
}
break;
case 2:
for (int p = 0; p < num_pixels; p++) {
dest[0] = fp16_ieee_to_fp32_value(src[0]);
dest[1] = fp16_ieee_to_fp32_value(src[1]);
src += kPhwc4ChannelsInPlane;
dest += shape.c;
}
break;
case 3:
for (int p = 0; p < num_pixels; p++) {
dest[0] = fp16_ieee_to_fp32_value(src[0]);
dest[1] = fp16_ieee_to_fp32_value(src[1]);
dest[2] = fp16_ieee_to_fp32_value(src[2]);
src += kPhwc4ChannelsInPlane;
dest += shape.c;
}
break;
default:
return absl::UnimplementedError(
"ConvertToPHWC4Half: Unsupported channels per planes count.");
}
}
return absl::OkStatus();
}
}
} | #include <array>
#include <cmath>
#include <cstdint>
#include <limits>
#include <memory>
#include <random>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/casts.h"
#include "xla/client/local_client.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/shape_util.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/ml_dtypes.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class ConvertTest : public ClientLibraryTestBase {
public:
explicit ConvertTest(se::Platform* platform = nullptr)
: ClientLibraryTestBase(platform) {
mutable_debug_options()->add_xla_disable_hlo_passes("algsimp");
mutable_debug_options()->add_xla_disable_hlo_passes("inline");
mutable_debug_options()->add_xla_disable_hlo_passes(
"simplify-fp-conversions");
mutable_debug_options()->set_xla_allow_excess_precision(false);
}
};
template <typename T>
class ConvertTestT : public ConvertTest {
public:
using ConvertTest::ConvertTest;
};
using FloatingPointTypeList =
::testing::Types<tsl::float8_e5m2, tsl::float8_e4m3, tsl::float8_e4m3fn,
tsl::float8_e5m2fnuz, tsl::float8_e4m3fnuz,
tsl::float8_e3m4, Eigen::half, bfloat16, float, double>;
TYPED_TEST_SUITE(ConvertTestT, FloatingPointTypeList);
template <typename T>
class ConvertTestF16 : public ConvertTest {
public:
using ConvertTest::ConvertTest;
};
using F16TypeList = ::testing::Types<Eigen::half, bfloat16>;
TYPED_TEST_SUITE(ConvertTestF16, F16TypeList);
TEST_F(ConvertTest, ConvertR1S32ToR1S32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<int32_t>(&builder, {42, 64});
ConvertElementType(a, S32);
std::vector<int32_t> expected = {42, 64};
ComputeAndCompareR1<int32_t>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1S32ToR1U32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<int32_t>(&builder, {42, 64});
ConvertElementType(a, U32);
std::vector<uint32_t> expected = {42, 64};
ComputeAndCompareR1<uint32_t>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1S32ToR1PRED) {
XlaBuilder builder(TestName());
auto a = ConstantR1<int32_t>(&builder, {42, 0, -64});
ConvertElementType(a, PRED);
std::array<bool, 3> expected = {true, false, true};
ComputeAndCompareR1<bool>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1U32ToR1U32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<uint32_t>(&builder, {42, 64});
ConvertElementType(a, U32);
std::vector<uint32_t> expected = {42, 64};
ComputeAndCompareR1<uint32_t>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1U32ToR1S32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<uint32_t>(&builder, {42, 64});
ConvertElementType(a, S32);
std::vector<int32_t> expected = {42, 64};
ComputeAndCompareR1<int32_t>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1U32ToR1PRED) {
XlaBuilder builder(TestName());
auto a = ConstantR1<uint32_t>(&builder, {42, 0, 64});
ConvertElementType(a, PRED);
std::array<bool, 3> expected = {true, false, true};
ComputeAndCompareR1<bool>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1F32ToR1F32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<float>(&builder, {42.0f, 64.0f});
ConvertElementType(a, F32);
std::vector<float> expected = {42.0f, 64.0f};
ComputeAndCompareR1<float>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1F32ToR1PRED) {
XlaBuilder builder(TestName());
auto a = ConstantR1<float>(&builder, {42.0f, 0.0f, 64.0f});
ConvertElementType(a, PRED);
std::array<bool, 3> expected = {true, false, true};
ComputeAndCompareR1<bool>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1S32ToR1F32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<int32_t>(&builder, {42, 64});
ConvertElementType(a, F32);
std::vector<float> expected = {42.0f, 64.0f};
ComputeAndCompareR1<float>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1PREDToR1S32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<bool>(&builder, {true, false, true});
ConvertElementType(a, S32);
std::vector<int32_t> expected = {1, 0, 1};
ComputeAndCompareR1<int32_t>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1PREDToR1U32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<bool>(&builder, {true, false, true});
ConvertElementType(a, U32);
std::vector<uint32_t> expected = {1, 0, 1};
ComputeAndCompareR1<uint32_t>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1PREDToR1F32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<bool>(&builder, {true, false, true});
ConvertElementType(a, F32);
std::vector<float> expected = {1., 0., 1.};
ComputeAndCompareR1<float>(&builder, expected, {});
}
XLA_TEST_F(ConvertTest, ConvertR1S0S32ToR1S0F32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<int32_t>(&builder, {});
ConvertElementType(a, F32);
std::vector<float> expected = {};
ComputeAndCompareR1<float>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1F32ToR1S32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<float>(&builder, {42.6, 64.4});
ConvertElementType(a, S32);
std::vector<int32_t> expected = {42, 64};
ComputeAndCompareR1<int32_t>(&builder, expected, {});
}
XLA_TEST_F(ConvertTest, ConvertR1S64ToR1F32) {
XlaBuilder builder(TestName());
std::vector<int64_t> arg{
-9223371216516022272,
-2,
-1,
-0x7FFFFFFF,
-0x80000000,
0,
1,
2,
1073742145,
1073742656,
0x7FFFFFFF,
0x80000000,
826720496944058148,
4296062029846194332,
0x0007FB72E4000000LL,
0x0007FB72E4000001LL,
0x0007FB72E6000000LL,
0x0007FB72E7000000LL,
0x0007FB72E7FFFFFFLL,
0x0007FB72E8000000LL,
0x0007FB72E8000001LL,
0x0007FB72EA000000LL,
0x0007FB72EB000000LL,
0x0007FB72EBFFFFFFLL,
0x0007FB72EC000000LL,
0x7FFFFF0000000000LL,
0x7FFFFF8000000000LL,
0x7FFFFFFFFFFFFF00,
static_cast<int64_t>(0xFFFFFFFFFFFFFFFF),
static_cast<int64_t>(0x0000f234e67e0001LL),
static_cast<int64_t>(0x8000000000000000),
static_cast<int64_t>(0x8000000000000000LL),
static_cast<int64_t>(0x8000000000000001LL),
static_cast<int64_t>(0x8000008000000000LL),
static_cast<int64_t>(0x8000010000000000LL),
};
Literal arg_literal = LiteralUtil::CreateR1<int64_t>({arg});
auto arg_param = Parameter(&builder, 0, arg_literal.shape(), "arg_param");
std::unique_ptr<GlobalData> arg_data =
client_->TransferToServer(arg_literal).value();
ConvertElementType(arg_param, F32);
std::vector<float> expected(arg.size());
for (int64_t i = 0; i < arg.size(); ++i) {
expected[i] = static_cast<float>(arg[i]);
}
ComputeAndCompareR1<float>(&builder, expected, {arg_data.get()});
}
XLA_TEST_F(ConvertTest, ConvertR1U32ToR1F32) {
XlaBuilder builder(TestName());
std::vector<uint32_t> arg{0, 1, 0x1000, 0x7fffffff,
0x80000000, 0x80000001, 0x80000002, 0x80000003,
0x80000080, 0x80000081, 0x80000082, 0xFFFFFFFF};
Literal arg_literal = LiteralUtil::CreateR1<uint32_t>({arg});
auto arg_param = Parameter(&builder, 0, arg_literal.shape(), "arg_param");
std::unique_ptr<GlobalData> arg_data =
client_->TransferToServer(arg_literal).value();
ConvertElementType(arg_param, F32);
std::vector<float> expected(arg.size());
for (int64_t i = 0; i < arg.size(); ++i) {
expected[i] = static_cast<float>(arg[i]);
}
ComputeAndCompareR1<float>(&builder, expected, {arg_data.get()});
}
XLA_TEST_F(ConvertTest, ConvertR1F32ToR1U32) {
XlaBuilder builder(TestName());
std::vector<float> arg{0.0f, 1.0f, 16777216.0f,
16777218.0f, 2147483647.0f, 4294967040.0f};
Literal arg_literal = LiteralUtil::CreateR1<float>({arg});
auto arg_param = Parameter(&builder, 0, arg_literal.shape(), "arg_param");
std::unique_ptr<GlobalData> arg_data =
client_->TransferToServer(arg_literal).value();
ConvertElementType(arg_param, U32);
std::vector<uint32_t> expected(arg.size());
for (int64_t i = 0; i < arg.size(); ++i) {
expected[i] = static_cast<uint32_t>(arg[i]);
}
ComputeAndCompareR1<uint32_t>(&builder, expected, {arg_data.get()});
}
XLA_TEST_F(ConvertTest, ConvertR1U32ToR1S64) {
XlaBuilder builder(TestName());
std::vector<uint32_t> arg{0, 1, 0x1000, 0x7fffffff, 0x80000082, 0xFFFFFFFF};
Literal arg_literal = LiteralUtil::CreateR1<uint32_t>({arg});
auto arg_param = Parameter(&builder, 0, arg_literal.shape(), "arg_param");
std::unique_ptr<GlobalData> arg_data =
client_->TransferToServer(arg_literal).value();
ConvertElementType(arg_param, S64);
std::vector<int64_t> expected(arg.size());
for (int64_t i = 0; i < arg.size(); ++i) {
expected[i] = static_cast<int64_t>(arg[i]);
}
ComputeAndCompareR1<int64_t>(&builder, expected, {arg_data.get()});
}
XLA_TEST_F(ConvertTest, ConvertR1S32ToR1S64) {
XlaBuilder builder(TestName());
std::vector<int32_t> arg{0, 1, 0x1000, -1, -0x1000};
Literal arg_literal = LiteralUtil::CreateR1<int32_t>({arg});
auto arg_param = Parameter(&builder, 0, arg_literal.shape(), "arg_param");
std::unique_ptr<GlobalData> arg_data =
client_->TransferToServer(arg_literal).value();
ConvertElementType(arg_param, S64);
std::vector<int64_t> expected(arg.size());
for (int64_t i = 0; i < arg.size(); ++i) {
expected[i] = static_cast<int64_t>(arg[i]);
}
ComputeAndCompareR1<int64_t>(&builder, expected, {arg_data.get()});
}
XLA_TEST_F(ConvertTest, ConvertR1F32ToR1S64) {
XlaBuilder builder(TestName());
std::vector<float> arg{0.0f,
0.5f,
0.99f,
1.0f,
1.5f,
1.99f,
2.0f,
2.01f,
2147483648.f,
-0.5f,
-0.99f,
-1.0f,
-1.5f,
-1.99f,
-2.0f,
-2.01f,
9223371487098961920.f,
9223370937343148032.f,
-9223371487098961920.f,
-9223370937343148032.f};
Literal arg_literal = LiteralUtil::CreateR1<float>({arg});
auto arg_param = Parameter(&builder, 0, arg_literal.shape(), "arg_param");
std::unique_ptr<GlobalData> arg_data =
client_->TransferToServer(arg_literal).value();
ConvertElementType(arg_param, S64);
std::vector<int64_t> expected(arg.size());
for (int64_t i = 0; i < arg.size(); ++i) {
expected[i] = static_cast<int64_t>(arg[i]);
}
ComputeAndCompareR1<int64_t>(&builder, expected, {arg_data.get()});
}
XLA_TEST_F(ConvertTest, ConvertR1U8ToR1F32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<uint8_t>(&builder, {32, 64});
ConvertElementType(a, F32);
std::vector<float> expected = {32.0, 64.0};
ComputeAndCompareR1<float>(&builder, expected, {});
}
XLA_TEST_F(ConvertTest, ConvertR1U8ToR1S32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<uint8_t>(&builder, {32, 64});
ConvertElementType(a, S32);
std::vector<int32_t> expected = {32, 64};
ComputeAndCompareR1<int32_t>(&builder, expected, {});
}
XLA_TEST_F(ConvertTest, ConvertR1U8ToR1U32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<uint8_t>(&builder, {32, 64});
ConvertElementType(a, U32);
std::vector<uint32_t> expected = {32, 64};
ComputeAndCompareR1<uint32_t>(&builder, expected, {});
}
XLA_TEST_F(ConvertTest, ConvertR1F32ToR1F64) {
XlaBuilder builder(TestName());
auto a = ConstantR1<float>(&builder, {32.0f, 64.0f});
ConvertElementType(a, F64);
std::vector<double> expected = {32.0, 64.0};
ComputeAndCompareR1<double>(&builder, expected, {});
}
XLA_TEST_F(ConvertTest, ConvertR1F64ToR1F32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<double>(&builder, {32.0, 64.0});
ConvertElementType(a, F32);
std::vector<float> expected = {32.0f, 64.0f};
ComputeAndCompareR1<float>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertS32Extremes) {
XlaBuilder builder(TestName());
auto a = ConstantR1<int32_t>(&builder, {std::numeric_limits<int32_t>::min(),
std::numeric_limits<int32_t>::max()});
ConvertElementType(a, F32);
std::vector<float> expected = {
static_cast<float>(std::numeric_limits<int32_t>::min()),
static_cast<float>(std::numeric_limits<int32_t>::max())};
ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
}
TEST_F(ConvertTest, ConvertMapToS32) {
XlaBuilder builder(TestName());
auto b = builder.CreateSubBuilder("convert");
auto param = Parameter(b.get(), 0, ShapeUtil::MakeShape(F32, {}), "in");
ConvertElementType(param, S32);
auto a = ConstantR1<float>(&builder, {42.0f, 64.0f});
Map(&builder, {a}, b->BuildAndNoteError(), {0});
std::vector<int32_t> expected = {42, 64};
ComputeAndCompareR1<int32_t>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertMapToF32) {
XlaBuilder builder(TestName());
auto b = builder.CreateSubBuilder("convert");
auto param = Parameter(b.get(), 0, ShapeUtil::MakeShape(S32, {}), "in");
ConvertElementType(param, F32);
auto a = ConstantR1<int32_t>(&builder, {42, 64});
Map(&builder, {a}, b->BuildAndNoteError(), {0});
std::vector<float> expected = {42.0f, 64.0f};
ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
}
TEST_F(ConvertTest, ConvertReshape) {
XlaBuilder builder(TestName());
auto input = ConstantR1<int32_t>(&builder, {42});
auto reshape = Reshape(input, {0}, {});
ConvertElementType(reshape, F32);
ComputeAndCompareR0<float>(&builder, 42.0f, {}, ErrorSpec(0.0001));
}
std::vector<float> GetInterestingF16ConversionTestCases() {
float infinity = std::numeric_limits<float>::infinity();
float half_min_positive_normal = absl::bit_cast<float, uint32_t>(0x38800000);
float half_max_subnormal = absl::bit_cast<float, uint32_t>(0x387fc000);
float half_min_positive_subnormal =
absl::bit_cast<float, uint32_t>(0x33800000);
float half_max = 65504.0f;
std::vector<float> test_cases(
{-infinity, -(half_max * 2 + 1), -half_max, -42.0f, -1.0f,
-half_min_positive_subnormal, -half_max_subnormal,
-half_min_positive_normal, -0.0f, 0.0f, half_min_positive_subnormal,
half_max_subnormal, half_min_positive_normal, 1.0f, 42.0f, half_max,
(half_max * 2 + 1), infinity});
return test_cases;
}
XLA_TEST_F(ConvertTest, ConvertR1F16ToR1F32) {
std::vector<float> test_cases = GetInterestingF16ConversionTestCases();
std::vector<half> input;
absl::c_transform(test_cases, std::back_inserter(input),
[](float f) { return Eigen::half(f); });
std::vector<float> expected_output;
absl::c_transform(input, std::back_inserter(expected_output),
[](Eigen::half h) { return static_cast<float>(h); });
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<GlobalData> dot_lhs_handle,
client_->TransferToServer(LiteralUtil::CreateR1<half>(input)));
XlaBuilder builder(TestName());
ConvertElementType(
Parameter(&builder, 0,
ShapeUtil::MakeShape(F16, {static_cast<int64_t>(input.size())}),
"param"),
F32);
ComputeAndCompareR1<float>(&builder, expected_output, {dot_lhs_handle.get()});
}
XLA_TEST_F(ConvertTest, ConvertR1F32ToR1F16) {
std::vector<float> input = GetInterestingF16ConversionTestCases();
std::vector<half> expected_output;
absl::c_transform(input, std::back_inserter(expected_output),
[](float f) { return Eigen::half(f); });
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<GlobalData> dot_lhs_handle,
client_->TransferToServer(LiteralUtil::CreateR1<float>(input)));
XlaBuilder builder(TestName());
ConvertElementType(
Parameter(&builder, 0,
ShapeUtil::MakeShape(F32, {static_cast<int64_t>(input.size())}),
"param"),
F16);
ComputeAndCompareR1<half>(&builder, expected_output, {dot_lhs_handle.get()});
}
XLA_TEST_F(ConvertTest, ConvertC64ToC64) {
XlaBuilder builder(TestName());
std::vector<complex64> x = {{42.0f, 64.0f}};
ConvertElementType(ConstantR1<complex64>(&builder, x), C64);
ComputeAndCompareR1<complex64>(&builder, x, {}, ErrorSpec(0.0001));
}
XLA_TEST_F(ConvertTest, ConvertS64S64) {
XlaBuilder builder(TestName());
std::vector<int64_t> x = {{-42, 64}};
ConvertElementType(ConstantR1<int64_t>(&builder, x), S64);
ComputeAndCompareR1<int64_t>(&builder, x, {});
}
XLA_TEST_F(ConvertTest, ConvertU64U64) {
XlaBuilder builder(TestName());
std::vector<uint64_t> x = {{42, 64}};
ConvertElementType(ConstantR1<uint64_t>(&builder, x), U64);
ComputeAndCompareR1<uint64_t>(&builder, x, {});
}
XLA_TEST_F(ConvertTest, ConvertU64S64) {
XlaBuilder builder(TestName());
std::vector<uint64_t> unsigned_x = {{42, UINT64_MAX}};
ConvertElementType(ConstantR1<uint64_t>(&builder, unsigned_x), S64);
std::vector<int64_t> signed_x = {{42, -1}};
ComputeAndCompareR1<int64_t>(&builder, signed_x, {});
}
XLA_TEST_F(ConvertTest, ConvertS64U64) {
XlaBuilder builder(TestName());
std::vector<int64_t> signed_x = {{42, -1, INT64_MIN}};
ConvertElementType(ConstantR1<int64_t>(&builder, signed_x), U64);
std::vector<uint64_t> unsigned_x = {{42, UINT64_MAX, IPow<uint64_t>(2, 63)}};
ComputeAndCompareR1<uint64_t>(&builder, unsigned_x, {});
}
TEST_F(ConvertTest, ConvertR1S4ToR1S8) {
XlaBuilder builder(TestName());
auto a = ConstantR1<s4>(&builder, {s4(0), s4(1), s4(2), s4(-8)});
ConvertElementType(a, S8);
std::vector<int8_t> expected = {0, 1, 2, -8};
ComputeAndCompareR1<int8_t>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1S4ParameterToR1S8) {
XlaBuilder builder(TestName());
Literal arg_literal =
LiteralUtil::CreateR1<s4>({s4(0), s4(1), s4(2), s4(-8)});
auto arg_param = Parameter(&builder, 0, arg_literal.shape(), "arg_param");
std::unique_ptr<GlobalData> arg_data =
client_->TransferToServer(arg_literal).value();
ConvertElementType(arg_param, S8);
std::vector<int8_t> expected = {0, 1, 2, -8};
ComputeAndCompareR1<int8_t>(&builder, expected, {arg_data.get()});
}
TEST_F(ConvertTest, ConvertR1U4ToR1U8) {
XlaBuilder builder(TestName());
auto a = ConstantR1<u4>(&builder, {u4(0), u4(1), u4(2), u4(15)});
ConvertElementType(a, U8);
std::vector<uint8_t> expected = {0, 1, 2, 15};
ComputeAndCompareR1<uint8_t>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1U4ParameterToR1U8) {
XlaBuilder builder(TestName());
Literal arg_literal =
LiteralUtil::CreateR1<u4>({u4(0), u4(1), u4(2), u4(15)});
auto arg_param = Parameter(&builder, 0, arg_literal.shape(), "arg_param");
std::unique_ptr<GlobalData> arg_data =
client_->TransferToServer(arg_literal).value();
ConvertElementType(arg_param, U8);
std::vector<uint8_t> expected = {0, 1, 2, 15};
ComputeAndCompareR1<uint8_t>(&builder, expected, {arg_data.get()});
}
TEST_F(ConvertTest, ConvertR1S8ToR1S4) {
XlaBuilder builder(TestName());
auto a = ConstantR1<int8_t>(&builder, {0, 1, 2, -8});
ConvertElementType(a, S4);
std::vector<s4> expected = {s4(0), s4(1), s4(2), s4(-8)};
ComputeAndCompareR1<s4>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1U8ToR1U4) {
XlaBuilder builder(TestName());
auto a = ConstantR1<uint8_t>(&builder, {0, 1, 2, 15});
ConvertElementType(a, U4);
std::vector<u4> expected = {u4(0), u4(1), u4(2), u4(15)};
ComputeAndCompareR1<u4>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1S8ToR1S4Roundtrip) {
XlaBuilder builder(TestName());
auto a = ConstantR1<int8_t>(&builder, {0, 8, -8, -9, 127, -128});
auto b = ConvertElementType(a, S4);
ConvertElementType(b, S8);
std::vector<int8_t> expected = {0, -8, -8, 7, -1, 0};
ComputeAndCompareR1<int8_t>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1F32ToR1S4) {
XlaBuilder builder(TestName());
auto a = ConstantR1<float>(&builder, {0., 2.5, -2.5});
ConvertElementType(a, S4);
std::vector<s4> expected = {s4(0), s4(2), s4(-2)};
ComputeAndCompareR1<s4>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1S4ToR1F32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<s4>(&builder, {s4(0), s4(1), s4(2), s4(-8)});
ConvertElementType(a, F32);
std::vector<float> expected = {0, 1, 2, -8};
ComputeAndCompareR1<float>(&builder, expected, {});
}
XLA_TEST_F(ConvertTest, ConvertBF16F32) {
XlaBuilder builder(TestName());
std::vector<bfloat16> all_bfloats(1 << 16);
for (int i = 0; i < all_bfloats.size(); ++i) {
all_bfloats[i] =
Eigen::numext::bit_cast<bfloat16>(static_cast<uint16_t>(i));
}
std::vector<uint32_t> expected(all_bfloats.size());
for (int i = 0; i < expected.size(); ++i) {
expected[i] = (1U << 16) * i;
}
xla::XlaOp all_bfloats_bf16 = ConstantR1<bfloat16>(&builder, all_bfloats);
xla::XlaOp all_bfloats_f32 = ConvertElementType(all_bfloats_bf16, F32);
BitcastConvertType(all_bfloats_f32, U32);
TF_ASSERT_OK_AND_ASSIGN(const auto results, ExecuteAndTransfer(&builder, {}));
for (int i = 0; i < expected.size(); ++i) {
const auto result = results.Get<uint32_t>({i});
const auto correct = expected[i];
if (all_bfloats[i] != 0.0f &&
all_bfloats[i] < std::numeric_limits<float>::min()) {
const float same_signed_zero =
Eigen::numext::signbit(all_bfloats[i]) ? -0.0f : 0.0f;
if (result != correct) {
EXPECT_EQ(result, absl::bit_cast<uint32_t>(same_signed_zero));
}
} else if (Eigen::numext::isnan(all_bfloats[i])) {
ASSERT_TRUE(std::isnan(absl::bit_cast<float>(correct)));
EXPECT_TRUE(std::isnan(absl::bit_cast<float>(result)));
} else {
EXPECT_EQ(result, correct);
}
}
}
XLA_TEST_F(ConvertTest, ConvertF32BF16) {
XlaBuilder builder(TestName());
std::vector<float> floats(100);
std::minstd_rand0 generator;
for (int i = 0; i < floats.size(); ++i) {
floats[i] = generator();
if (i < 10) {
auto val = absl::bit_cast<uint32_t>(floats[i]);
val |= 1 << 15;
floats[i] = absl::bit_cast<float>(val);
}
}
floats.push_back(std::numeric_limits<float>::quiet_NaN());
floats.push_back(-std::numeric_limits<float>::quiet_NaN());
floats.push_back(absl::bit_cast<float>(0x7F800001));
floats.push_back(absl::bit_cast<float>(0xFF800001));
std::vector<bfloat16> expected(floats.size());
for (int i = 0; i < expected.size(); ++i) {
expected[i] = static_cast<bfloat16>(floats[i]);
}
xla::XlaOp lit_f32 = ConstantR1<float>(&builder, floats);
xla::XlaOp lit_bf16 = ConvertElementType(lit_f32, BF16);
BitcastConvertType(lit_bf16, U16);
TF_ASSERT_OK_AND_ASSIGN(const auto results, ExecuteAndTransfer(&builder, {}));
for (int i = 0; i < expected.size(); ++i) {
const auto result = results.Get<uint16_t>({i});
const auto correct = absl::bit_cast<uint16_t>(expected[i]);
if (floats[i] != 0.0f && floats[i] < std::numeric_limits<float>::min()) {
const bfloat16 same_signed_zero =
bfloat16(std::signbit(floats[i]) ? -0.0f : 0.0f);
if (result != correct) {
EXPECT_EQ(result, absl::bit_cast<uint16_t>(same_signed_zero));
}
} else if (std::isnan(floats[i])) {
ASSERT_TRUE(std::isnan(absl::bit_cast<bfloat16>(correct)));
EXPECT_TRUE(std::isnan(absl::bit_cast<bfloat16>(result)));
if (client_->platform()->Name() == "Host") {
EXPECT_EQ(result >> 15, correct >> 15);
}
} else {
EXPECT_EQ(result, correct);
}
}
}
XLA_TYPED_TEST(ConvertTestT, ConvertFPToPred) {
XlaBuilder builder(this->TestName());
using FP = TypeParam;
auto a = ConstantR1<FP>(&builder, {FP{0.0}, FP{0.25}, FP{2.0}, FP{-0.0}});
ConvertElementType(a, PRED);
std::array<bool, 4> expected = {false, true, true, false};
this->template ComputeAndCompareR1<bool>(&builder, expected, {});
}
XLA_TEST_F(ConvertTest, ConvertF16F8e5m2Roundtrip) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
float inf = std::numeric_limits<float>::infinity();
struct TestCase {
float input;
float expected_roundtrip;
} test_cases[] = {
{0.0, 0.0},
{-0.0, 0.0},
{1.0, 1.0},
{-1.0, -1.0},
{nan, nan},
{inf, inf},
{0x1.2p0, 0x1p0},
{0x1.6p0, 0x1.8p0},
{0x1.Cp15, 0x1.Cp15},
{0x1.DFCp15, 0x1.Cp15},
{0x1.Ep15, inf},
{0x1p16, inf},
{0x1p-14, 0x1p-14},
{0x1.Cp-15, 0x1p-14},
{0x0.8p-14, 0x0.8p-14},
{0x0.Ap-14, 0x0.8p-14},
{0x0.Ep-14, 0x1.0p-14},
{0x0.98p-14, 0x0.8p-14},
{0x0.A8p-14, 0x0.Cp-14},
{0x0.2p-14, 0},
{0x0.204p-14, 0x0.4p-14},
{0x0.DFCp-14, 0x0.Cp-14},
};
std::vector<Eigen::half> inputs;
std::vector<Eigen::half> expected_roundtrip;
for (auto test_case : test_cases) {
inputs.push_back(Eigen::half{test_case.input});
expected_roundtrip.push_back(Eigen::half{test_case.expected_roundtrip});
}
auto f8 =
ConvertElementType(ConstantR1<Eigen::half>(&builder, inputs), F8E5M2);
ConvertElementType(f8, F16);
ComputeAndCompareR1<Eigen::half>(&builder, expected_roundtrip, {},
ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, DISABLED_ON_CPU(ConvertF32F8e5m2Roundtrip)) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
float inf = std::numeric_limits<float>::infinity();
struct TestCase {
float input;
float expected_roundtrip;
} test_cases[] = {
{0.0, 0.0},
{-0.0, -0.0},
{1.0, 1.0},
{-1.0, -1.0},
{nan, nan},
{inf, inf},
{0x1.2p0, 0x1p0},
{0x1.6p0, 0x1.8p0},
{0x1.Cp15, 0x1.Cp15},
{0x1.DFFFFEp15, 0x1.Cp15},
{0x1.Ep15, inf},
{0x1p16, inf},
{0x1p-14, 0x1p-14},
{0x1.Cp-15, 0x1p-14},
{0x1.0p-15, 0x0.8p-14},
{0x1.4p-15, 0x0.8p-14},
{0x1.Cp-15, 0x1.0p-14},
{0x1.3p-15, 0x0.8p-14},
{0x1.5p-15, 0x0.Cp-14},
{0x1p-17, 0},
{0x1.000002p-17, 0x0.4p-14},
{0x1.BFFFFEp-15, 0x0.Cp-14},
};
std::vector<float> inputs;
std::vector<float> expected_roundtrip;
for (auto test_case : test_cases) {
inputs.push_back(test_case.input);
expected_roundtrip.push_back(test_case.expected_roundtrip);
}
auto f8 = ConvertElementType(ConstantR1<float>(&builder, inputs), F8E5M2);
ConvertElementType(f8, F32);
ComputeAndCompareR1<float>(&builder, expected_roundtrip, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e5m2RoundtripExhaustive) {
XlaBuilder builder(this->TestName());
using From = tsl::float8_e5m2;
std::vector<From> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(Eigen::numext::bit_cast<From>(static_cast<uint8_t>(i)));
}
xla::XlaOp all_f8_as_fp =
ConvertElementType(ConstantR1<From>(&builder, all_f8),
primitive_util::NativeToPrimitiveType<TypeParam>());
ConvertElementType(all_f8_as_fp, F8E5M2);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e5m2RoundtripExhaustive2) {
XlaBuilder builder(this->TestName());
if constexpr (std::is_same_v<TypeParam, tsl::float8_e3m4>) {
GTEST_SKIP() << "Skipping test for E3M4 as it requires an ml_dtypes "
"release with https:
} else {
std::vector<TypeParam> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(static_cast<TypeParam>(
Eigen::numext::bit_cast<tsl::float8_e5m2>(static_cast<uint8_t>(i))));
}
ConvertElementType(ConstantR1<TypeParam>(&builder, all_f8), F8E5M2);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e5m2RoundtripExhaustive3) {
XlaBuilder builder(this->TestName());
using From = tsl::float8_e5m2;
std::vector<From> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(Eigen::numext::bit_cast<From>(static_cast<uint8_t>(i)));
}
ConvertElementType(ConstantR1<From>(&builder, all_f8),
primitive_util::NativeToPrimitiveType<TypeParam>());
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestF16, ConvertF8e5m2F16RoundtripExhaustive4) {
XlaBuilder builder(this->TestName());
std::vector<TypeParam> inputs;
for (int i = 0; i < 65536; i++) {
inputs.push_back(
Eigen::numext::bit_cast<TypeParam>(static_cast<uint16_t>(i)));
}
xla::XlaOp all_f16_to_f8 = ConstantR1<TypeParam>(&builder, inputs);
ConvertElementType(all_f16_to_f8, F8E5M2);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, ConvertF16F8e4m3Roundtrip) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
float inf = std::numeric_limits<float>::infinity();
struct TestCase {
float input;
float expected_roundtrip;
} test_cases[] = {
{0.0, 0.0},
{-0.0, -0.0},
{1.0, 1.0},
{-1.0, -1.0},
{nan, nan},
{-nan, -nan},
{inf, inf},
{-inf, -inf},
{0x1.1p0, 0x1p0},
{0x1.3p0, 0x1.4p0},
{0x1.Ep7, 0x1.Ep7},
{0x1.EFCp7, 0x1.Ep7},
{0x1.Fp7, inf},
{0x1p8, inf},
{0x1p-6, 0x1p-6},
{0x1.Ep-7, 0x1p-6},
{0x0.2p-6, 0x0.2p-6},
{0x0.Ep-6, 0x0.Ep-6},
{0x0.8p-6, 0x0.8p-6},
{0x0.9p-6, 0x0.8p-6},
{0x0.Fp-6, 0x0.8p-5},
{0x0.8Fp-6, 0x0.8p-6},
{0x0.91p-6, 0x0.Ap-6},
{0x1p-10, 0},
{0x1.004p-10, 0x0.2p-6},
{0x0.EFCp-6, 0x0.Ep-6},
};
std::vector<Eigen::half> inputs;
std::vector<Eigen::half> expected_roundtrip;
for (auto test_case : test_cases) {
inputs.push_back(Eigen::half{test_case.input});
expected_roundtrip.push_back(Eigen::half{test_case.expected_roundtrip});
}
auto f8 =
ConvertElementType(ConstantR1<Eigen::half>(&builder, inputs), F8E4M3);
ConvertElementType(f8, F16);
ComputeAndCompareR1<Eigen::half>(&builder, expected_roundtrip, {},
ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, DISABLED_ON_CPU(ConvertF32F8e4m3Roundtrip)) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
float inf = std::numeric_limits<float>::infinity();
struct TestCase {
float input;
float expected_roundtrip;
} test_cases[] = {
{0.0, 0.0},
{-0.0, -0.0},
{1.0, 1.0},
{-1.0, -1.0},
{nan, nan},
{-nan, -nan},
{inf, inf},
{-inf, -inf},
{0x1.1p0, 0x1p0},
{0x1.3p0, 0x1.4p0},
{0x1.Ep7, 0x1.Ep7},
{0x1.EFFFFEp7, 0x1.Ep7},
{0x1.Fp7, inf},
{0x1p8, inf},
{0x1p-6, 0x1p-6},
{0x1.Ep-7, 0x1p-6},
{0x0.2p-6, 0x0.2p-6},
{0x0.Ep-6, 0x0.Ep-6},
{0x0.8p-6, 0x0.8p-6},
{0x0.9p-6, 0x0.8p-6},
{0x0.Fp-6, 0x0.8p-5},
{0x0.8Fp-6, 0x0.8p-6},
{0x0.91p-6, 0x0.Ap-6},
{0x1p-10, 0},
{0x1.000002p-10, 0x0.2p-6},
{0x0.EFFFFEp-6, 0x0.Ep-6},
};
std::vector<float> inputs;
std::vector<float> expected_roundtrip;
for (auto test_case : test_cases) {
inputs.push_back(test_case.input);
expected_roundtrip.push_back(test_case.expected_roundtrip);
}
auto f8 = ConvertElementType(ConstantR1<float>(&builder, inputs), F8E4M3);
ConvertElementType(f8, F32);
ComputeAndCompareR1<float>(&builder, expected_roundtrip, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e4m3RoundtripExhaustive) {
XlaBuilder builder(this->TestName());
using From = tsl::float8_e4m3;
std::vector<From> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(Eigen::numext::bit_cast<From>(static_cast<uint8_t>(i)));
}
xla::XlaOp all_f8_as_fp =
ConvertElementType(ConstantR1<From>(&builder, all_f8),
primitive_util::NativeToPrimitiveType<TypeParam>());
ConvertElementType(all_f8_as_fp, F8E4M3);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e4m3RoundtripExhaustive2) {
XlaBuilder builder(this->TestName());
std::vector<TypeParam> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(static_cast<TypeParam>(
Eigen::numext::bit_cast<tsl::float8_e4m3>(static_cast<uint8_t>(i))));
}
ConvertElementType(ConstantR1<TypeParam>(&builder, all_f8), F8E4M3);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e4m3RoundtripExhaustive3) {
XlaBuilder builder(this->TestName());
using From = tsl::float8_e4m3;
std::vector<From> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(Eigen::numext::bit_cast<From>(static_cast<uint8_t>(i)));
}
ConvertElementType(ConstantR1<From>(&builder, all_f8),
primitive_util::NativeToPrimitiveType<TypeParam>());
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestF16, ConvertF8e4m3F16RoundtripExhaustive4) {
XlaBuilder builder(this->TestName());
std::vector<TypeParam> inputs;
for (int i = 0; i < 65536; i++) {
inputs.push_back(
Eigen::numext::bit_cast<TypeParam>(static_cast<uint16_t>(i)));
}
xla::XlaOp all_f16_to_f8 = ConstantR1<TypeParam>(&builder, inputs);
ConvertElementType(all_f16_to_f8, F8E4M3);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, ConvertF16F8e4m3fnRoundtrip) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
float inf = std::numeric_limits<float>::infinity();
struct TestCase {
float input;
float expected_roundtrip;
} test_cases[] = {
{0.0, 0.0},
{-0.0, -0.0},
{1.0, 1.0},
{-1.0, -1.0},
{inf, nan},
{0x1.1p0, 0x1p0},
{0x1.3p0, 0x1.4p0},
{0x1.Cp8, 0x1.Cp8},
{0x1.Dp8, 0x1.Cp8},
{0x1.D04p8, nan},
{0x1p9, nan},
{0x1p-6, 0x1p-6},
{0x1.Ep-7, 0x1p-6},
{0x1.0p-8, 0x0.4p-6},
{0x1.4p-8, 0x0.4p-6},
{0x1.Cp-8, 0x0.8p-6},
{0x1.3p-8, 0x0.4p-6},
{0x1.5p-8, 0x0.6p-6},
{0x1p-10, 0},
{0x1.004p-10, 0x0.2p-6},
{0x1.DFCp-7, 0x0.Ep-6},
};
std::vector<Eigen::half> inputs;
std::vector<Eigen::half> expected_roundtrip;
for (auto test_case : test_cases) {
inputs.push_back(Eigen::half{test_case.input});
expected_roundtrip.push_back(Eigen::half{test_case.expected_roundtrip});
}
auto f8 =
ConvertElementType(ConstantR1<Eigen::half>(&builder, inputs), F8E4M3FN);
ConvertElementType(f8, F16);
ComputeAndCompareR1<Eigen::half>(&builder, expected_roundtrip, {},
ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, DISABLED_ON_CPU(ConvertF32F8e4m3fnRoundtrip)) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
float inf = std::numeric_limits<float>::infinity();
struct TestCase {
float input;
float expected_roundtrip;
} test_cases[] = {
{0.0, 0.0},
{-0.0, -0.0},
{1.0, 1.0},
{-1.0, -1.0},
{inf, nan},
{0x1.1p0, 0x1p0},
{0x1.3p0, 0x1.4p0},
{0x1.Cp8, 0x1.Cp8},
{0x1.Dp8, 0x1.Cp8},
{0x1.D00002p8, nan},
{0x1p9, nan},
{0x1p-6, 0x1p-6},
{0x1.Ep-7, 0x1p-6},
{0x1.0p-8, 0x0.4p-6},
{0x1.4p-8, 0x0.4p-6},
{0x1.Cp-8, 0x0.8p-6},
{0x1.3p-8, 0x0.4p-6},
{0x1.5p-8, 0x0.6p-6},
{0x1p-10, 0},
{0x1.000002p-10, 0x0.2p-6},
{0x1.DFFFFEp-7, 0x0.Ep-6},
};
std::vector<float> inputs;
std::vector<float> expected_roundtrip;
for (auto test_case : test_cases) {
inputs.push_back(test_case.input);
expected_roundtrip.push_back(test_case.expected_roundtrip);
}
auto f8 = ConvertElementType(ConstantR1<float>(&builder, inputs), F8E4M3FN);
ConvertElementType(f8, F32);
ComputeAndCompareR1<float>(&builder, expected_roundtrip, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e4m3fnRoundtripExhaustive) {
XlaBuilder builder(this->TestName());
using From = tsl::float8_e4m3fn;
std::vector<From> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(Eigen::numext::bit_cast<From>(static_cast<uint8_t>(i)));
}
xla::XlaOp all_f8_as_fp =
ConvertElementType(ConstantR1<From>(&builder, all_f8),
primitive_util::NativeToPrimitiveType<TypeParam>());
ConvertElementType(all_f8_as_fp, F8E4M3FN);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e4m3fnRoundtripExhaustive2) {
XlaBuilder builder(this->TestName());
std::vector<TypeParam> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(static_cast<TypeParam>(
Eigen::numext::bit_cast<tsl::float8_e4m3fn>(static_cast<uint8_t>(i))));
}
ConvertElementType(ConstantR1<TypeParam>(&builder, all_f8), F8E4M3FN);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e4m3fnRoundtripExhaustive3) {
XlaBuilder builder(this->TestName());
using From = tsl::float8_e4m3fn;
std::vector<From> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(Eigen::numext::bit_cast<From>(static_cast<uint8_t>(i)));
}
ConvertElementType(ConstantR1<From>(&builder, all_f8),
primitive_util::NativeToPrimitiveType<TypeParam>());
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestF16, ConvertF8e4m3fnF16RoundtripExhaustive4) {
XlaBuilder builder(this->TestName());
std::vector<TypeParam> inputs;
for (int i = 0; i < 65536; i++) {
inputs.push_back(
Eigen::numext::bit_cast<TypeParam>(static_cast<uint16_t>(i)));
}
xla::XlaOp all_f16_to_f8 = ConstantR1<TypeParam>(&builder, inputs);
ConvertElementType(all_f16_to_f8, F8E4M3FN);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, ConvertF16F8e4m3b11fnuzRoundtrip) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
float inf = std::numeric_limits<float>::infinity();
struct TestCase {
float input;
float expected_roundtrip;
} test_cases[] = {
{0.0, 0.0},
{-0.0, 0.0},
{1.0, 1.0},
{-1.0, -1.0},
{inf, nan},
{0x1.1p0, 0x1p0},
{0x1.3p0, 0x1.4p0},
{0x1.Ep4, 0x1.Ep4},
{0x1.EFCp4, 0x1.Ep4},
{0x1.Fp4, nan},
{0x1p5, nan},
{0x1p-10, 0x1p-10},
{0x1.Ep-11, 0x1p-10},
{0x1.0p-12, 0x0.4p-10},
{0x1.4p-12, 0x0.4p-10},
{0x1.Cp-12, 0x0.8p-10},
{0x1.3p-12, 0x0.4p-10},
{0x1.5p-12, 0x0.6p-10},
{0x1p-14, 0},
{0x1.004p-14, 0x0.2p-10},
{0x1.DFCp-11, 0x0.Ep-10},
};
std::vector<Eigen::half> inputs;
std::vector<Eigen::half> expected_roundtrip;
for (auto test_case : test_cases) {
inputs.push_back(Eigen::half{test_case.input});
expected_roundtrip.push_back(Eigen::half{test_case.expected_roundtrip});
}
auto f8 = ConvertElementType(ConstantR1<Eigen::half>(&builder, inputs),
F8E4M3B11FNUZ);
ConvertElementType(f8, F16);
ComputeAndCompareR1<Eigen::half>(&builder, expected_roundtrip, {},
ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, DISABLED_ON_CPU(ConvertF32F8e4m3b11fnuzRoundtrip)) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
float inf = std::numeric_limits<float>::infinity();
struct TestCase {
float input;
float expected_roundtrip;
} test_cases[] = {
{0.0, 0.0},
{-0.0, 0.0},
{1.0, 1.0},
{-1.0, -1.0},
{inf, nan},
{0x1.1p0, 0x1p0},
{0x1.3p0, 0x1.4p0},
{0x1.Ep4, 0x1.Ep4},
{0x1.EFFFFEp4, 0x1.Ep4},
{0x1.Fp4, nan},
{0x1p5, nan},
{0x1p-10, 0x1p-10},
{0x1.Ep-11, 0x1p-10},
{0x1.0p-12, 0x0.4p-10},
{0x1.4p-12, 0x0.4p-10},
{0x1.Cp-12, 0x0.8p-10},
{0x1.3p-12, 0x0.4p-10},
{0x1.5p-12, 0x0.6p-10},
{0x1p-14, 0},
{0x1.000002p-14, 0x0.2p-10},
{0x1.DFFFFEp-11, 0x0.Ep-10},
};
std::vector<float> inputs;
std::vector<float> expected_roundtrip;
for (auto test_case : test_cases) {
inputs.push_back(test_case.input);
expected_roundtrip.push_back(test_case.expected_roundtrip);
}
auto f8 =
ConvertElementType(ConstantR1<float>(&builder, inputs), F8E4M3B11FNUZ);
ConvertElementType(f8, F32);
ComputeAndCompareR1<float>(&builder, expected_roundtrip, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e4m3b11fnuzRoundtripExhaustive) {
XlaBuilder builder(this->TestName());
using From = tsl::float8_e4m3b11fnuz;
std::vector<From> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(Eigen::numext::bit_cast<From>(static_cast<uint8_t>(i)));
}
xla::XlaOp all_f8_as_fp =
ConvertElementType(ConstantR1<From>(&builder, all_f8),
primitive_util::NativeToPrimitiveType<TypeParam>());
ConvertElementType(all_f8_as_fp, F8E4M3B11FNUZ);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e4m3b11fnuzRoundtripExhaustive2) {
XlaBuilder builder(this->TestName());
std::vector<TypeParam> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(
static_cast<TypeParam>(Eigen::numext::bit_cast<tsl::float8_e4m3b11fnuz>(
static_cast<uint8_t>(i))));
}
ConvertElementType(ConstantR1<TypeParam>(&builder, all_f8), F8E4M3B11FNUZ);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e4m3b11fnuzRoundtripExhaustive3) {
XlaBuilder builder(this->TestName());
using From = tsl::float8_e4m3b11fnuz;
std::vector<From> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(Eigen::numext::bit_cast<From>(static_cast<uint8_t>(i)));
}
ConvertElementType(ConstantR1<From>(&builder, all_f8),
primitive_util::NativeToPrimitiveType<TypeParam>());
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestF16, ConvertF8e4m3b11fnuzF16RoundtripExhaustive4) {
XlaBuilder builder(this->TestName());
std::vector<TypeParam> all_f16;
for (int i = 0; i < 65536; i++) {
all_f16.push_back(
Eigen::numext::bit_cast<TypeParam>(static_cast<uint16_t>(i)));
}
ConvertElementType(ConstantR1<TypeParam>(&builder, all_f16), F8E4M3B11FNUZ);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, ConvertF16F8e5m2fnuzRoundtrip) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
float inf = std::numeric_limits<float>::infinity();
struct TestCase {
float input;
float expected_roundtrip;
} test_cases[] = {
{0.0, 0.0},
{-0.0, 0.0},
{1.0, 1.0},
{-1.0, -1.0},
{nan, nan},
{inf, nan},
{0x1.2p0, 0x1p0},
{0x1.6p0, 0x1.8p0},
{0x1.Cp15, 0x1.Cp15},
{0x1.DFCp15, 0x1.Cp15},
{0x1.Ep15, nan},
{0x1p16, nan},
{0x1p-15, 0x1p-15},
{0x1.Cp-16, 0x1p-15},
{0x0.4p-14, 0x0.8p-15},
{0x0.5p-14, 0x0.8p-15},
{0x0.7p-14, 0x1.0p-15},
{0x0.4Cp-14, 0x0.8p-15},
{0x0.54p-14, 0x0.Cp-15},
{0x0.1p-14, 0},
{0x0.104p-14, 0x0.4p-15},
{0x0.6FCp-14, 0x0.Cp-15},
};
std::vector<Eigen::half> inputs;
std::vector<Eigen::half> expected_roundtrip;
for (auto test_case : test_cases) {
inputs.push_back(Eigen::half{test_case.input});
expected_roundtrip.push_back(Eigen::half{test_case.expected_roundtrip});
}
auto f8 =
ConvertElementType(ConstantR1<Eigen::half>(&builder, inputs), F8E5M2FNUZ);
ConvertElementType(f8, F16);
ComputeAndCompareR1<Eigen::half>(&builder, expected_roundtrip, {},
ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, ConvertF32F8e5m2fnuzRoundtrip) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
float inf = std::numeric_limits<float>::infinity();
struct TestCase {
float input;
float expected_roundtrip;
} test_cases[] = {
{0.0, 0.0},
{-0.0, 0.0},
{1.0, 1.0},
{-1.0, -1.0},
{nan, nan},
{inf, nan},
{0x1.2p0, 0x1p0},
{0x1.6p0, 0x1.8p0},
{0x1.Cp15, 0x1.Cp15},
{0x1.DFFFFEp15, 0x1.Cp15},
{0x1.Ep15, nan},
{0x1p16, nan},
{0x1p-15, 0x1p-15},
{0x1.Cp-16, 0x1p-15},
{0x1.0p-16, 0x0.8p-15},
{0x1.4p-16, 0x0.8p-15},
{0x1.Cp-16, 0x1.0p-15},
{0x1.3p-16, 0x0.8p-15},
{0x1.5p-16, 0x0.Cp-15},
{0x1p-18, 0},
{0x1.000002p-18, 0x0.4p-15},
{0x1.BFFFFEp-16, 0x0.Cp-15},
};
std::vector<float> inputs;
std::vector<float> expected_roundtrip;
for (auto test_case : test_cases) {
inputs.push_back(test_case.input);
expected_roundtrip.push_back(test_case.expected_roundtrip);
}
auto f8 = ConvertElementType(ConstantR1<float>(&builder, inputs), F8E5M2FNUZ);
ConvertElementType(f8, F32);
ComputeAndCompareR1<float>(&builder, expected_roundtrip, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e5m2fnuzRoundtripExhaustive) {
XlaBuilder builder(this->TestName());
using From = tsl::float8_e5m2fnuz;
std::vector<From> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(Eigen::numext::bit_cast<From>(static_cast<uint8_t>(i)));
}
xla::XlaOp all_f8_as_fp =
ConvertElementType(ConstantR1<From>(&builder, all_f8),
primitive_util::NativeToPrimitiveType<TypeParam>());
ConvertElementType(all_f8_as_fp, F8E5M2FNUZ);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e5m2fnuzRoundtripExhaustive2) {
XlaBuilder builder(this->TestName());
if constexpr (std::is_same_v<TypeParam, tsl::float8_e3m4>) {
GTEST_SKIP() << "Skipping test for E3M4 as it requires an ml_dtypes "
"release with https:
} else {
std::vector<TypeParam> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(
static_cast<TypeParam>(Eigen::numext::bit_cast<tsl::float8_e5m2fnuz>(
static_cast<uint8_t>(i))));
}
ConvertElementType(ConstantR1<TypeParam>(&builder, all_f8), F8E5M2FNUZ);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e5m2fnuzRoundtripExhaustive3) {
XlaBuilder builder(this->TestName());
using From = tsl::float8_e5m2fnuz;
std::vector<From> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(Eigen::numext::bit_cast<From>(static_cast<uint8_t>(i)));
}
ConvertElementType(ConstantR1<From>(&builder, all_f8),
primitive_util::NativeToPrimitiveType<TypeParam>());
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestF16, ConvertF8e5m2fnuzF16RoundtripExhaustive4) {
XlaBuilder builder(this->TestName());
std::vector<TypeParam> all_f16;
for (int i = 0; i < 65536; i++) {
all_f16.push_back(
Eigen::numext::bit_cast<TypeParam>(static_cast<uint16_t>(i)));
}
ConvertElementType(ConstantR1<TypeParam>(&builder, all_f16), F8E5M2FNUZ);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, ConvertF16F8e4m3fnuzRoundtrip) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
float inf = std::numeric_limits<float>::infinity();
struct TestCase {
float input;
float expected_roundtrip;
} test_cases[] = {
{0.0, 0.0},
{-0.0, 0.0},
{1.0, 1.0},
{-1.0, -1.0},
{inf, nan},
{0x1.1p0, 0x1p0},
{0x1.3p0, 0x1.4p0},
{0x1.Ep7, 0x1.Ep7},
{0x1.EFCp7, 0x1.Ep7},
{0x1.Fp7, nan},
{0x1p8, nan},
{0x1p-7, 0x1p-7},
{0x1.Ep-8, 0x1p-7},
{0x1.0p-9, 0x0.4p-7},
{0x1.4p-9, 0x0.4p-7},
{0x1.Cp-9, 0x0.8p-7},
{0x1.3p-9, 0x0.4p-7},
{0x1.5p-9, 0x0.6p-7},
{0x1p-11, 0},
{0x1.004p-11, 0x0.2p-7},
{0x1.DFCp-8, 0x0.Ep-7},
};
std::vector<Eigen::half> inputs;
std::vector<Eigen::half> expected_roundtrip;
for (auto test_case : test_cases) {
inputs.push_back(Eigen::half{test_case.input});
expected_roundtrip.push_back(Eigen::half{test_case.expected_roundtrip});
}
auto f8 =
ConvertElementType(ConstantR1<Eigen::half>(&builder, inputs), F8E4M3FNUZ);
ConvertElementType(f8, F16);
ComputeAndCompareR1<Eigen::half>(&builder, expected_roundtrip, {},
ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, ConvertF32F8e4m3fnuzRoundtrip) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
float inf = std::numeric_limits<float>::infinity();
struct TestCase {
float input;
float expected_roundtrip;
} test_cases[] = {
{0.0, 0.0},
{-0.0, 0.0},
{1.0, 1.0},
{-1.0, -1.0},
{inf, nan},
{0x1.1p0, 0x1p0},
{0x1.3p0, 0x1.4p0},
{0x1.Ep7, 0x1.Ep7},
{0x1.EFFFFEp7, 0x1.Ep7},
{0x1.Fp7, nan},
{0x1p8, nan},
{0x1p-7, 0x1p-7},
{0x1.Ep-8, 0x1p-7},
{0x1.0p-9, 0x0.4p-7},
{0x1.4p-9, 0x0.4p-7},
{0x1.Cp-9, 0x0.8p-7},
{0x1.3p-9, 0x0.4p-7},
{0x1.5p-9, 0x0.6p-7},
{0x1p-11, 0},
{0x1.000002p-11, 0x0.2p-7},
{0x1.DFFFFEp-8, 0x0.Ep-7},
};
std::vector<float> inputs;
std::vector<float> expected_roundtrip;
for (auto test_case : test_cases) {
inputs.push_back(test_case.input);
expected_roundtrip.push_back(test_case.expected_roundtrip);
}
auto f8 = ConvertElementType(ConstantR1<float>(&builder, inputs), F8E4M3FNUZ);
ConvertElementType(f8, F32);
ComputeAndCompareR1<float>(&builder, expected_roundtrip, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e4m3fnuzRoundtripExhaustive) {
XlaBuilder builder(this->TestName());
using From = tsl::float8_e4m3fnuz;
std::vector<From> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(Eigen::numext::bit_cast<From>(static_cast<uint8_t>(i)));
}
xla::XlaOp all_f8_as_fp =
ConvertElementType(ConstantR1<From>(&builder, all_f8),
primitive_util::NativeToPrimitiveType<TypeParam>());
ConvertElementType(all_f8_as_fp, F8E4M3FNUZ);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e4m3fnuzRoundtripExhaustive2) {
XlaBuilder builder(this->TestName());
std::vector<TypeParam> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(
static_cast<TypeParam>(Eigen::numext::bit_cast<tsl::float8_e4m3fnuz>(
static_cast<uint8_t>(i))));
}
ConvertElementType(ConstantR1<TypeParam>(&builder, all_f8), F8E4M3FNUZ);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e4m3fnuzRoundtripExhaustive3) {
XlaBuilder builder(this->TestName());
using From = tsl::float8_e4m3fnuz;
std::vector<From> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(Eigen::numext::bit_cast<From>(static_cast<uint8_t>(i)));
}
ConvertElementType(ConstantR1<From>(&builder, all_f8),
primitive_util::NativeToPrimitiveType<TypeParam>());
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestF16, ConvertF8e4m3fnuzF16RoundtripExhaustive4) {
XlaBuilder builder(this->TestName());
std::vector<TypeParam> all_f16;
for (int i = 0; i < 65536; i++) {
all_f16.push_back(
Eigen::numext::bit_cast<TypeParam>(static_cast<uint16_t>(i)));
}
ConvertElementType(ConstantR1<TypeParam>(&builder, all_f16), F8E4M3FNUZ);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, ConvertF16F8e3m4Roundtrip) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
float inf = std::numeric_limits<float>::infinity();
struct TestCase {
float input;
float expected_roundtrip;
} test_cases[] = {
{0.0, 0.0},
{-0.0, -0.0},
{1.0, 1.0},
{-1.0, -1.0},
{nan, nan},
{-nan, -nan},
{inf, inf},
{-inf, -inf},
{0x1.08p0, 0x1p0},
{0x1.18p0, 0x1.2p0},
{0x1.Fp3, 0x1.Fp3},
{0x1.F7Cp3, 0x1.Fp3},
{0x1.F8p3, inf},
{0x1p4, inf},
{0x1p-2, 0x1p-2},
{0x1.Fp-3, 0x1p-2},
{0x0.1p-2, 0x0.1p-2},
{0x0.Fp-2, 0x0.Fp-2},
{0x0.8p-2, 0x0.8p-2},
{0x0.88p-2, 0x0.8p-2},
{0x0.F8p-2, 0x0.8p-1},
{0x0.87p-2, 0x0.8p-2},
{0x0.89p-2, 0x0.9p-2},
{0x1p-7, 0},
{0x1.004p-7, 0x0.1p-2},
{0x0.F7Cp-2, 0x0.Fp-2},
};
std::vector<Eigen::half> inputs;
std::vector<Eigen::half> expected_roundtrip;
for (auto test_case : test_cases) {
inputs.push_back(Eigen::half{test_case.input});
expected_roundtrip.push_back(Eigen::half{test_case.expected_roundtrip});
}
auto f8 =
ConvertElementType(ConstantR1<Eigen::half>(&builder, inputs), F8E3M4);
ConvertElementType(f8, F16);
ComputeAndCompareR1<Eigen::half>(&builder, expected_roundtrip, {},
ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, DISABLED_ON_CPU(ConvertF32F8e3m4Roundtrip)) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
float inf = std::numeric_limits<float>::infinity();
struct TestCase {
float input;
float expected_roundtrip;
} test_cases[] = {
{0.0, 0.0},
{-0.0, -0.0},
{1.0, 1.0},
{-1.0, -1.0},
{nan, nan},
{-nan, -nan},
{inf, inf},
{-inf, -inf},
{0x1.08p0, 0x1p0},
{0x1.18p0, 0x1.2p0},
{0x1.Fp3, 0x1.Fp3},
{0x1.F7FFFEp3, 0x1.Fp3},
{0x1.F8p3, inf},
{0x1p4, inf},
{0x1p-2, 0x1p-2},
{0x1.Fp-3, 0x1p-2},
{0x0.1p-2, 0x0.1p-2},
{0x0.Fp-2, 0x0.Fp-2},
{0x0.8p-2, 0x0.8p-2},
{0x0.88p-2, 0x0.8p-2},
{0x0.F8p-2, 0x0.8p-1},
{0x0.87p-2, 0x0.8p-2},
{0x0.89p-2, 0x0.9p-2},
{0x1p-7, 0},
{0x1.000002p-7, 0x0.1p-2},
{0x0.F7FFFEp-2, 0x0.Fp-2},
};
std::vector<float> inputs;
std::vector<float> expected_roundtrip;
for (auto test_case : test_cases) {
inputs.push_back(test_case.input);
expected_roundtrip.push_back(test_case.expected_roundtrip);
}
auto f8 = ConvertElementType(ConstantR1<float>(&builder, inputs), F8E3M4);
ConvertElementType(f8, F32);
ComputeAndCompareR1<float>(&builder, expected_roundtrip, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e3m4RoundtripExhaustive) {
XlaBuilder builder(this->TestName());
using From = tsl::float8_e3m4;
std::vector<From> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(Eigen::numext::bit_cast<From>(static_cast<uint8_t>(i)));
}
xla::XlaOp all_f8_as_fp =
ConvertElementType(ConstantR1<From>(&builder, all_f8),
primitive_util::NativeToPrimitiveType<TypeParam>());
ConvertElementType(all_f8_as_fp, F8E3M4);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e3m4RoundtripExhaustive2) {
XlaBuilder builder(this->TestName());
std::vector<TypeParam> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(static_cast<TypeParam>(
Eigen::numext::bit_cast<tsl::float8_e3m4>(static_cast<uint8_t>(i))));
}
ConvertElementType(ConstantR1<TypeParam>(&builder, all_f8), F8E3M4);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestT, ConvertF8e3m4RoundtripExhaustive3) {
XlaBuilder builder(this->TestName());
using From = tsl::float8_e3m4;
std::vector<From> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(Eigen::numext::bit_cast<From>(static_cast<uint8_t>(i)));
}
ConvertElementType(ConstantR1<From>(&builder, all_f8),
primitive_util::NativeToPrimitiveType<TypeParam>());
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TYPED_TEST(ConvertTestF16, ConvertF8e3m4F16RoundtripExhaustive4) {
XlaBuilder builder(this->TestName());
std::vector<TypeParam> inputs;
for (int i = 0; i < 65536; i++) {
inputs.push_back(
Eigen::numext::bit_cast<TypeParam>(static_cast<uint16_t>(i)));
}
xla::XlaOp all_f16_to_f8 = ConstantR1<TypeParam>(&builder, inputs);
ConvertElementType(all_f16_to_f8, F8E3M4);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/convert.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tests/convert_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bb3eda25-8fdd-4ad9-89dd-5e546b6e1c89 | cpp | tensorflow/tensorflow | grappler_item | tensorflow/core/grappler/grappler_item.cc | tensorflow/core/grappler/grappler_item_test.cc | #include "tensorflow/core/grappler/grappler_item.h"
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/transitive_fanin.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace grappler {
GrapplerItem::OptimizationOptions CreateOptOptionsForEager() {
GrapplerItem::OptimizationOptions optimization_options;
optimization_options.allow_pruning_stateful_and_dataset_ops = true;
optimization_options.is_eager_mode = true;
optimization_options.optimize_function_library = false;
return optimization_options;
}
GrapplerItem GrapplerItem::WithGraph(GraphDef&& graph_def) const {
GrapplerItem item;
item.id = id;
item.feed = feed;
item.fetch = fetch;
item.init_ops = init_ops;
item.keep_ops = keep_ops;
item.expected_init_time = expected_init_time;
item.save_op = save_op;
item.restore_op = restore_op;
item.save_restore_loc_tensor = save_restore_loc_tensor;
item.queue_runners = queue_runners;
item.devices_ = devices_;
item.optimization_options_ = optimization_options_;
item.graph.Swap(&graph_def);
return item;
}
std::vector<const NodeDef*> GrapplerItem::MainOpsFanin() const {
std::vector<const NodeDef*> fanin_nodes;
TF_CHECK_OK(ComputeTransitiveFanin(graph, fetch, &fanin_nodes));
return fanin_nodes;
}
std::vector<const NodeDef*> GrapplerItem::EnqueueOpsFanin() const {
std::vector<string> enqueue_ops;
for (const auto& queue_runner : queue_runners) {
for (const string& enqueue_op : queue_runner.enqueue_op_name()) {
enqueue_ops.push_back(enqueue_op);
}
}
std::vector<const NodeDef*> fanin_nodes;
TF_CHECK_OK(ComputeTransitiveFanin(graph, fetch, &fanin_nodes));
return fanin_nodes;
}
std::vector<const NodeDef*> GrapplerItem::InitOpsFanin() const {
std::vector<const NodeDef*> fanin_nodes;
TF_CHECK_OK(ComputeTransitiveFanin(graph, init_ops, &fanin_nodes));
return fanin_nodes;
}
std::vector<const NodeDef*> GrapplerItem::MainVariables() const {
std::vector<const NodeDef*> fanin;
TF_CHECK_OK(ComputeTransitiveFanin(graph, init_ops, &fanin));
std::vector<const NodeDef*> vars;
for (const NodeDef* node : fanin) {
if (IsVariable(*node)) {
vars.push_back(node);
}
}
return vars;
}
std::unordered_set<string> GrapplerItem::NodesToPreserve() const {
std::unordered_set<string> result;
for (const string& f : fetch) {
VLOG(1) << "Add fetch " << f;
result.insert(NodeName(f));
}
for (const auto& f : feed) {
VLOG(1) << "Add feed " << f.first;
result.insert(NodeName(f.first));
}
for (const auto& node : init_ops) {
result.insert(NodeName(node));
}
for (const auto& node : keep_ops) {
result.insert(NodeName(node));
}
if (!save_op.empty()) {
result.insert(NodeName(save_op));
}
if (!restore_op.empty()) {
result.insert(NodeName(restore_op));
}
if (!save_restore_loc_tensor.empty()) {
result.insert(NodeName(save_restore_loc_tensor));
}
for (const auto& queue_runner : queue_runners) {
for (const string& enqueue_op : queue_runner.enqueue_op_name()) {
result.insert(NodeName(enqueue_op));
}
if (!queue_runner.close_op_name().empty()) {
result.insert(NodeName(queue_runner.close_op_name()));
}
if (!queue_runner.cancel_op_name().empty()) {
result.insert(NodeName(queue_runner.cancel_op_name()));
}
}
absl::optional<FunctionLibraryDefinition> fn_library;
if (!optimization_options_.allow_pruning_stateful_and_dataset_ops) {
fn_library.emplace(OpRegistry::Global(), graph.library());
}
for (const NodeDef& node : graph.node()) {
const auto attrs = AttrSlice(&node.attr());
if (!optimization_options_.allow_pruning_stateful_and_dataset_ops &&
(IsStateful(node, &*fn_library) || IsDataset(node))) {
result.insert(node.name());
}
bool do_not_remove;
if (TryGetNodeAttr(attrs, "_grappler_do_not_remove", &do_not_remove) &&
do_not_remove) {
result.insert(node.name());
}
}
return result;
}
const std::unordered_set<string>& GrapplerItem::devices() const {
return devices_;
}
Status GrapplerItem::AddDevice(const string& device) {
DeviceNameUtils::ParsedName name;
if (!DeviceNameUtils::ParseFullName(device, &name)) {
return errors::InvalidArgument("Invalid device name: device=", device);
} else if (!name.has_job || !name.has_replica || !name.has_task ||
!name.has_type || !name.has_id) {
return errors::InvalidArgument("Not a fully defined device name: device=",
device);
}
devices_.insert(DeviceNameUtils::ParsedNameToString(name));
return absl::OkStatus();
}
Status GrapplerItem::AddDevices(const GrapplerItem& other) {
std::vector<absl::string_view> invalid_devices;
for (const string& device : other.devices()) {
Status added = AddDevice(device);
if (!added.ok()) invalid_devices.emplace_back(device);
}
return invalid_devices.empty()
? absl::OkStatus()
: errors::InvalidArgument("Skipped invalid devices: [",
absl::StrJoin(invalid_devices, ", "),
"]");
}
Status GrapplerItem::InferDevicesFromGraph() {
absl::flat_hash_set<absl::string_view> invalid_devices;
for (const NodeDef& node : graph.node()) {
Status added = AddDevice(node.device());
if (!added.ok()) invalid_devices.insert(node.device());
}
VLOG(2) << "Inferred device set: [" << absl::StrJoin(devices_, ", ") << "]";
return invalid_devices.empty()
? absl::OkStatus()
: errors::InvalidArgument("Skipped invalid devices: [",
absl::StrJoin(invalid_devices, ", "),
"]");
}
void GrapplerItem::ClearDevices() { devices_.clear(); }
const GrapplerItem::OptimizationOptions& GrapplerItem::optimization_options()
const {
return optimization_options_;
}
GrapplerItem::OptimizationOptions& GrapplerItem::optimization_options() {
return optimization_options_;
}
}
} | #include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class GrapplerItemTest : public ::testing::Test {};
TEST_F(GrapplerItemTest, Basic) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {{"CPU:0"}});
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
EXPECT_TRUE(item.InitOpsFanin().empty());
std::vector<string> graph_nodes;
for (const auto& node : item.graph.node()) {
graph_nodes.push_back(node.name());
}
std::vector<string> main_ops;
for (const auto& node : item.MainOpsFanin()) {
main_ops.push_back(node->name());
}
std::sort(graph_nodes.begin(), graph_nodes.end());
std::sort(main_ops.begin(), main_ops.end());
EXPECT_EQ(main_ops, graph_nodes);
}
TEST_F(GrapplerItemTest, InferDevices) {
using test::function::NDef;
const string cpu0 = "/job:work/replica:1/task:1/device:CPU:0";
const string cpu1 = "/job:work/replica:1/task:1/device:CPU:1";
const string cpu2 = "/device:CPU:2";
GrapplerItem item;
item.graph = test::function::GDef(
{
NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, cpu0),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, cpu1),
NDef("c", "Placeholder", {}, {{"dtype", DT_FLOAT}}, cpu2),
},
{} );
ASSERT_FALSE(item.InferDevicesFromGraph().ok());
EXPECT_EQ(item.devices().size(), 2);
EXPECT_NE(item.devices().find(cpu0), item.devices().end());
EXPECT_NE(item.devices().find(cpu1), item.devices().end());
item.ClearDevices();
EXPECT_EQ(item.devices().size(), 0);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/grappler_item.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/grappler_item_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1ea6cafa-d2ab-4fd4-aa3e-15210d5072f6 | cpp | tensorflow/tensorflow | pack | tensorflow/lite/kernels/pack.cc | tensorflow/lite/delegates/hexagon/builders/tests/pack_test.cc | #include <stdint.h>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace pack {
namespace {
constexpr int kOutputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TfLitePackParams* data =
reinterpret_cast<TfLitePackParams*>(node->builtin_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), data->values_count);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input0;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input0));
const int dimension_size = NumDimensions(input0) + 1;
if (data->axis < 0) {
data->axis += dimension_size;
}
TF_LITE_ENSURE(context, NumDimensions(input0) >= data->axis);
TF_LITE_ENSURE(context, data->axis >= 0);
if (input0->type != kTfLiteInt32 && input0->type != kTfLiteFloat32 &&
input0->type != kTfLiteUInt8 && input0->type != kTfLiteUInt32 &&
input0->type != kTfLiteInt8 && input0->type != kTfLiteInt16 &&
input0->type != kTfLiteInt64) {
TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by pack.",
TfLiteTypeGetName(input0->type));
return kTfLiteError;
}
for (int i = 1; i < data->values_count; ++i) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, i, &input));
TF_LITE_ENSURE(context, HaveSameShapes(input0, input));
TF_LITE_ENSURE_TYPES_EQ(context, input0->type, input->type);
}
const TfLiteIntArray* input_shape = input0->dims;
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(dimension_size);
int i = 0;
for (int index = 0; index < dimension_size; ++index) {
if (index == data->axis) {
output_shape->data[index] = data->values_count;
} else {
output_shape->data[index] = input_shape->data[i++];
}
}
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_TYPES_EQ(context, output->type, input0->type);
for (int i = 0; i < data->values_count; i++) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, i, &input));
TF_LITE_ENSURE_EQ(context, input->params.zero_point,
output->params.zero_point);
TF_LITE_ENSURE_EQ(context, input->params.scale, output->params.scale);
}
return context->ResizeTensor(context, output, output_shape);
}
template <typename T>
TfLiteStatus PackImpl(TfLiteContext* context, TfLiteNode* node,
TfLiteTensor* output, int values_count, int axis) {
TF_LITE_ENSURE(context, axis >= 0);
VectorOfTensors<T> all_inputs(*context, *node->inputs);
tflite::PackParams op_params;
op_params.axis = axis;
op_params.inputs_count = values_count;
reference_ops::Pack<T>(op_params, all_inputs.shapes(), all_inputs.data(),
GetTensorShape(output), GetTensorData<T>(output));
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLitePackParams* data =
reinterpret_cast<TfLitePackParams*>(node->builtin_data);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
switch (output->type) {
case kTfLiteInt8:
case kTfLiteUInt8:
return PackImpl<int8_t>(context, node, output, data->values_count,
data->axis);
case kTfLiteInt16:
return PackImpl<int16_t>(context, node, output, data->values_count,
data->axis);
case kTfLiteFloat32:
case kTfLiteInt32:
case kTfLiteUInt32:
return PackImpl<int32_t>(context, node, output, data->values_count,
data->axis);
case kTfLiteInt64:
return PackImpl<int64_t>(context, node, output, data->values_count,
data->axis);
default: {
TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by pack.",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
}
}
}
}
TfLiteRegistration* Register_PACK() {
static TfLiteRegistration r = {nullptr, nullptr, pack::Prepare, pack::Eval};
return &r;
}
}
}
} | #include <initializer_list>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
using testing::ElementsAreArray;
class PackOpModel : public SingleOpModelWithHexagon {
public:
PackOpModel(const TensorData& input_template, int axis, int values_count) {
std::vector<std::vector<int>> all_input_shapes;
for (int i = 0; i < values_count; ++i) {
all_input_shapes.push_back(input_template.shape);
AddInput(input_template);
}
output_ = AddOutput({input_template.type, {}, input_template.min,
input_template.max});
SetBuiltinOp(BuiltinOperator_PACK, BuiltinOptions_PackOptions,
CreatePackOptions(builder_, values_count, axis).Union());
BuildInterpreter(all_input_shapes);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
template <typename integer_type>
void SetInput(int index, std::initializer_list<float> data) {
QuantizeAndPopulate<integer_type>(index, data);
}
template <typename integer_type>
std::vector<float> GetDequantizedOutput() {
return Dequantize<integer_type>(ExtractVector<integer_type>(output_),
GetScale(output_), GetZeroPoint(output_));
}
private:
int output_;
};
template <typename InputType>
struct PackOpTest : public ::testing::Test {
using TypeToTest = InputType;
TensorType TENSOR_TYPE =
(std::is_same<InputType, int16_t>::value
? TensorType_INT16
: (std::is_same<InputType, uint8_t>::value ? TensorType_UINT8
: TensorType_INT8));
};
using TestTypes = testing::Types<int8_t, uint8_t>;
TYPED_TEST_CASE(PackOpTest, TestTypes);
TYPED_TEST(PackOpTest, ThreeInputs) {
PackOpModel model({TestFixture::TENSOR_TYPE, {2}, -10, 10}, 0, 3);
model.SetInput<typename TestFixture::TypeToTest>(0, {1, 4});
model.SetInput<typename TestFixture::TypeToTest>(1, {2, 5});
model.SetInput<typename TestFixture::TypeToTest>(2, {3, 6});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
auto ref_output_shape = model.GetOutputShape();
auto ref_output =
model.GetDequantizedOutput<typename TestFixture::TypeToTest>();
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(ref_output_shape));
EXPECT_THAT(model.GetDequantizedOutput<typename TestFixture::TypeToTest>(),
ElementsAreArray(ArrayFloatNear(ref_output)));
}
TYPED_TEST(PackOpTest, ThreeInputsDifferentAxis) {
PackOpModel model({TestFixture::TENSOR_TYPE, {2}, -10, 10}, 1, 3);
model.SetInput<typename TestFixture::TypeToTest>(0, {1, 4});
model.SetInput<typename TestFixture::TypeToTest>(1, {2, 5});
model.SetInput<typename TestFixture::TypeToTest>(2, {3, 6});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
auto ref_output_shape = model.GetOutputShape();
auto ref_output =
model.GetDequantizedOutput<typename TestFixture::TypeToTest>();
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(ref_output_shape));
EXPECT_THAT(model.GetDequantizedOutput<typename TestFixture::TypeToTest>(),
ElementsAreArray(ArrayFloatNear(ref_output)));
}
TYPED_TEST(PackOpTest, ThreeInputsNegativeAxis) {
PackOpModel model({TestFixture::TENSOR_TYPE, {2}, -10, 10}, -1, 3);
model.SetInput<typename TestFixture::TypeToTest>(0, {1, 4});
model.SetInput<typename TestFixture::TypeToTest>(1, {2, 5});
model.SetInput<typename TestFixture::TypeToTest>(2, {3, 6});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
auto ref_output_shape = model.GetOutputShape();
auto ref_output =
model.GetDequantizedOutput<typename TestFixture::TypeToTest>();
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(ref_output_shape));
EXPECT_THAT(model.GetDequantizedOutput<typename TestFixture::TypeToTest>(),
ElementsAreArray(ArrayFloatNear(ref_output)));
}
TYPED_TEST(PackOpTest, MultilDimensions) {
PackOpModel model({TestFixture::TENSOR_TYPE, {2, 3}, -10, 20}, 1, 2);
model.SetInput<typename TestFixture::TypeToTest>(0, {1, 2, 3, 4, 5, 6});
model.SetInput<typename TestFixture::TypeToTest>(1, {7, 8, 9, 10, 11, 12});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
auto ref_output_shape = model.GetOutputShape();
auto ref_output =
model.GetDequantizedOutput<typename TestFixture::TypeToTest>();
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(ref_output_shape));
EXPECT_THAT(model.GetDequantizedOutput<typename TestFixture::TypeToTest>(),
ElementsAreArray(ArrayFloatNear(ref_output)));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/pack.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/hexagon/builders/tests/pack_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1b0d1084-8d5e-4768-bdf2-88e35ad4abd6 | cpp | tensorflow/tensorflow | batch_dot_simplification | third_party/xla/xla/service/batch_dot_simplification.cc | third_party/xla/xla/service/batch_dot_simplification_test.cc | #include "xla/service/batch_dot_simplification.h"
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
absl::StatusOr<bool>
BatchDotSimplification::ElideDegenerateBatchDimensionFromBatchDot(
HloInstruction* batch_dot) {
if (Cast<HloDotInstruction>(batch_dot)->sparse_operands()) {
return false;
}
const auto& is_iota = [](absl::Span<const int64_t> dims) {
for (int64_t i = 0; i < dims.size(); ++i) {
if (dims[i] != i) {
return false;
}
}
return true;
};
if (!absl::c_equal(
batch_dot->dot_dimension_numbers().lhs_batch_dimensions(),
batch_dot->dot_dimension_numbers().rhs_batch_dimensions()) ||
!is_iota(batch_dot->dot_dimension_numbers().lhs_batch_dimensions())) {
return false;
}
const DotDimensionNumbers& dim_numbers = batch_dot->dot_dimension_numbers();
HloInstruction *lhs = batch_dot->mutable_operand(0),
*rhs = batch_dot->mutable_operand(1);
const Shape& lhs_shape = lhs->shape();
if (dim_numbers.lhs_contracting_dimensions_size() != 1) {
return false;
}
std::vector<int64_t> degenerate_dims;
for (int64_t batch_dim : dim_numbers.lhs_batch_dimensions()) {
if (lhs_shape.dimensions(batch_dim) == 1) {
degenerate_dims.push_back(batch_dim);
}
}
if (degenerate_dims.empty()) {
return false;
}
TF_ASSIGN_OR_RETURN(HloInstruction * new_lhs,
ElideDegenerateDims(lhs, degenerate_dims));
TF_ASSIGN_OR_RETURN(HloInstruction * new_rhs,
ElideDegenerateDims(rhs, degenerate_dims));
DotDimensionNumbers new_dim_numbers = dim_numbers;
new_dim_numbers.clear_lhs_batch_dimensions();
new_dim_numbers.clear_rhs_batch_dimensions();
for (int64_t i = 0, e = dim_numbers.lhs_batch_dimensions_size() -
degenerate_dims.size();
i < e; i++) {
new_dim_numbers.add_lhs_batch_dimensions(i);
new_dim_numbers.add_rhs_batch_dimensions(i);
}
new_dim_numbers.set_lhs_contracting_dimensions(
0,
new_dim_numbers.lhs_contracting_dimensions(0) - degenerate_dims.size());
new_dim_numbers.set_rhs_contracting_dimensions(
0,
new_dim_numbers.rhs_contracting_dimensions(0) - degenerate_dims.size());
TF_ASSIGN_OR_RETURN(
HloInstruction * new_dot,
MakeDotHlo(new_lhs, new_rhs, new_dim_numbers,
batch_dot->precision_config(),
batch_dot->shape().element_type()));
TF_ASSIGN_OR_RETURN(HloInstruction * new_dot_reshaped,
MakeReshapeHlo(batch_dot->shape(), new_dot));
VLOG(2) << "Replaced " << batch_dot->ToString() << " with "
<< new_dot->ToString();
TF_RETURN_IF_ERROR(
batch_dot->parent()->ReplaceInstruction(batch_dot, new_dot_reshaped));
return true;
}
absl::StatusOr<bool> BatchDotSimplification::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
std::vector<HloInstruction*> dot_instrs;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
absl::c_copy_if(computation->instructions(), std::back_inserter(dot_instrs),
[](HloInstruction* instr) {
return instr->opcode() == HloOpcode::kDot;
});
}
for (HloInstruction* dot_instr : dot_instrs) {
TF_ASSIGN_OR_RETURN(bool elided_batch_dim_from_one,
ElideDegenerateBatchDimensionFromBatchDot(dot_instr));
changed |= elided_batch_dim_from_one;
}
return changed;
}
} | #include "xla/service/batch_dot_simplification.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
class BatchDotSimplificationTest : public HloTestBase {};
TEST_F(BatchDotSimplificationTest,
ElideSingleDegenerateBatchDotDim_VectorVector) {
const std::string hlo_text = R"(
HloModule BatchDot
main {
a = f32[1,3] parameter(0)
b = f32[1,3] parameter(1)
ROOT dot = f32[1] dot(a, b), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_text));
BatchDotSimplification pass;
ASSERT_TRUE(pass.Run(m.get()).value());
HloInstruction* root = m->entry_computation()->root_instruction();
EXPECT_THAT(root,
op::Reshape(op::Dot(
op::Reshape(op::Parameter(0)), op::Reshape(op::Parameter(1)),
0, 0)));
}
TEST_F(BatchDotSimplificationTest,
ElideSingleDegenerateBatchDotDim_MatrixVector) {
const std::string hlo_text = R"(
HloModule BatchDot
main {
a = f32[1,9,3] parameter(0)
b = f32[1,3] parameter(1)
ROOT dot = f32[1,9] dot(a, b), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_text));
BatchDotSimplification pass;
ASSERT_TRUE(pass.Run(m.get()).value());
HloInstruction* root = m->entry_computation()->root_instruction();
EXPECT_THAT(root,
op::Reshape(op::Dot(
op::Reshape(op::Parameter(0)), op::Reshape(op::Parameter(1)),
1, 0)));
}
TEST_F(BatchDotSimplificationTest,
ElideSingleDegenerateBatchDotDim_MatrixMatrix) {
const std::string hlo_text = R"(
HloModule BatchDot
main {
a = f32[1,9,3] parameter(0)
b = f32[1,3,7] parameter(1)
ROOT dot = f32[1,9,7] dot(a, b), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_text));
BatchDotSimplification pass;
ASSERT_TRUE(pass.Run(m.get()).value());
HloInstruction* root = m->entry_computation()->root_instruction();
EXPECT_THAT(root,
op::Reshape(op::Dot(
op::Reshape(op::Parameter(0)), op::Reshape(op::Parameter(1)),
1, 0)));
}
TEST_F(BatchDotSimplificationTest,
ElideMultipleDegenerateBatchDotDims_VectorVector) {
const std::string hlo_text = R"(
HloModule BatchDot
main {
a = f32[9,1,7,1,3] parameter(0)
b = f32[9,1,7,1,3] parameter(1)
ROOT dot = f32[9,1,7,1] dot(a, b), lhs_batch_dims={0,1,2,3}, rhs_batch_dims={0,1,2,3}, lhs_contracting_dims={4}, rhs_contracting_dims={4}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_text));
BatchDotSimplification pass;
ASSERT_TRUE(pass.Run(m.get()).value());
HloInstruction* root = m->entry_computation()->root_instruction();
EXPECT_THAT(root,
op::Reshape(op::Dot(
op::Reshape(op::Parameter(0)), op::Reshape(op::Parameter(1)),
2, 2)));
}
TEST_F(BatchDotSimplificationTest,
ElideMultipleDegenerateBatchDotDims_VectorMatrix) {
const std::string hlo_text = R"(
HloModule BatchDot
main {
a = f32[9,1,7,1,3] parameter(0)
b = f32[9,1,7,1,20,3] parameter(1)
ROOT dot = f32[9,1,7,1,20] dot(a, b), lhs_batch_dims={0,1,2,3}, rhs_batch_dims={0,1,2,3}, lhs_contracting_dims={4}, rhs_contracting_dims={5}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_text));
BatchDotSimplification pass;
ASSERT_TRUE(pass.Run(m.get()).value());
HloInstruction* root = m->entry_computation()->root_instruction();
EXPECT_THAT(root,
op::Reshape(op::Dot(
op::Reshape(op::Parameter(0)), op::Reshape(op::Parameter(1)),
2, 3)));
}
TEST_F(BatchDotSimplificationTest,
ElideMultipleDegenerateBatchDotDims_MatrixMatrix) {
const std::string hlo_text = R"(
HloModule BatchDot
main {
a = f32[9,1,7,1,19,3] parameter(0)
b = f32[9,1,7,1,3,20] parameter(1)
ROOT dot = f32[9,1,7,1,19,20] dot(a, b), lhs_batch_dims={0,1,2,3}, rhs_batch_dims={0,1,2,3}, lhs_contracting_dims={5}, rhs_contracting_dims={4}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_text));
BatchDotSimplification pass;
ASSERT_TRUE(pass.Run(m.get()).value());
HloInstruction* root = m->entry_computation()->root_instruction();
EXPECT_THAT(root,
op::Reshape(op::Dot(
op::Reshape(op::Parameter(0)), op::Reshape(op::Parameter(1)),
3, 2)));
}
TEST_F(BatchDotSimplificationTest,
ElideMultipleDegenerateBatchDotDimsNonContracting) {
const char* hlo_text = R"(
HloModule BatchDot
main {
a = f32[1,101] parameter(0)
b = f32[1,101] parameter(1)
ROOT dot = f32[1,101,101] dot(a,b), lhs_batch_dims={0},
lhs_contracting_dims={},
rhs_batch_dims={0},
rhs_contracting_dims={}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_text));
BatchDotSimplification pass;
ASSERT_FALSE(pass.Run(m.get()).value());
}
TEST_F(BatchDotSimplificationTest,
ElideMultipleDegenerateBatchDotDimsMultipleContracting) {
const char* hlo_text = R"(
HloModule BatchDot
main {
lhs = f32[1,5,17,10,13] parameter(0)
rhs = f32[1,9,10,13,6,5] parameter(1)
ROOT dot = f32[10,1,17,9,6] dot(lhs,rhs), lhs_batch_dims={3,0},
rhs_batch_dims={2,0},
lhs_contracting_dims={1,4},
rhs_contracting_dims={5,3}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_text));
BatchDotSimplification pass;
ASSERT_FALSE(pass.Run(m.get()).value());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/batch_dot_simplification.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/batch_dot_simplification_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
09263936-949c-419b-9c6c-b6f3f0944461 | cpp | google/tensorstore | array_endian_codec | tensorstore/internal/riegeli/array_endian_codec.cc | tensorstore/internal/riegeli/array_endian_codec_test.cc | #include "tensorstore/internal/riegeli/array_endian_codec.h"
#include <stddef.h>
#include <stdint.h>
#include <cassert>
#include <memory>
#include <string_view>
#include <utility>
#include "absl/meta/type_traits.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "riegeli/base/chain.h"
#include "riegeli/bytes/copy_all.h"
#include "riegeli/bytes/limiting_reader.h"
#include "riegeli/bytes/writer.h"
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/metrics/counter.h"
#include "tensorstore/internal/metrics/metadata.h"
#include "tensorstore/internal/unaligned_data_type_functions.h"
#include "tensorstore/util/element_pointer.h"
#include "tensorstore/util/endian.h"
#include "tensorstore/util/extents.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
using ::tensorstore::internal_metrics::MetricMetadata;
namespace tensorstore {
namespace internal {
namespace {
auto& contiguous_bytes = internal_metrics::Counter<int64_t>::New(
"/tensorstore/internal/riegeli/contiguous_bytes",
MetricMetadata("Endian codec bytes from contiguous buffers",
internal_metrics::Units::kBytes));
auto& noncontiguous_bytes = internal_metrics::Counter<int64_t>::New(
"/tensorstore/internal/riegeli/noncontiguous_bytes",
MetricMetadata("Endian codec bytes from non-contiguous buffers",
internal_metrics::Units::kBytes));
}
[[nodiscard]] bool EncodeArrayEndian(SharedArrayView<const void> decoded,
endian encoded_endian,
ContiguousLayoutOrder order,
riegeli::Writer& writer) {
const auto& functions =
kUnalignedDataTypeFunctions[static_cast<size_t>(decoded.dtype().id())];
assert(functions.copy != nullptr);
if ((encoded_endian == endian::native ||
functions.swap_endian_inplace == nullptr) &&
IsContiguousLayout(decoded, order)) {
const size_t length = decoded.num_elements() * decoded.dtype().size();
if (writer.PrefersCopying()) {
return writer.Write(std::string_view(
reinterpret_cast<const char*>(decoded.data()), length));
}
return writer.Write(
internal::MakeCordFromSharedPtr(std::move(decoded.pointer()), length));
}
const internal::ElementwiseFunction<1, void*>* write_func =
encoded_endian == endian::native ? &functions.write_native_endian
: &functions.write_swapped_endian;
return internal::IterateOverArrays(
{write_func, &writer},
nullptr, {order, include_repeated_elements}, decoded);
}
namespace {
class ContiguousBufferSinkWriter : public riegeli::Writer {
public:
std::shared_ptr<const void> data;
size_t expected_length;
size_t expected_alignment;
void DoFail() { Fail(absl::UnimplementedError("")); }
bool PushSlow(size_t min_length, size_t recommended_length) override {
DoFail();
return false;
}
bool ValidateContiguousBuffer(std::string_view buf) {
if (buf.size() != expected_length ||
(reinterpret_cast<uintptr_t>(buf.data()) % expected_alignment) != 0) {
DoFail();
return false;
}
return true;
}
template <typename T>
bool WriteCordLike(T&& src) {
if (this->data) {
DoFail();
return false;
}
auto buf = src.TryFlat();
if (!buf) {
DoFail();
return false;
}
if (!ValidateContiguousBuffer(*buf)) return false;
auto data = std::make_shared<absl::remove_cvref_t<T>>(std::forward<T>(src));
buf = data->TryFlat();
if (!buf) {
DoFail();
return false;
}
if (!ValidateContiguousBuffer(*buf)) return false;
this->data = std::shared_ptr<const void>(std::move(data), buf->data());
return true;
}
bool WriteSlow(const riegeli::Chain& src) override {
return WriteCordLike(src);
}
bool WriteSlow(const absl::Cord& src) override { return WriteCordLike(src); }
};
}
Result<SharedArray<const void>> DecodeArrayEndian(
riegeli::Reader& reader, DataType dtype, span<const Index> decoded_shape,
endian encoded_endian, ContiguousLayoutOrder order) {
const auto& functions =
kUnalignedDataTypeFunctions[static_cast<size_t>(dtype.id())];
assert(functions.copy != nullptr);
size_t expected_length = dtype.size() * ProductOfExtents(decoded_shape);
const auto may_be_contiguous = [&] {
if (encoded_endian != endian::native &&
functions.swap_endian_inplace != nullptr) {
return false;
}
if (!reader.SupportsRewind()) {
return false;
}
if (!reader.SupportsSize()) {
return false;
}
auto size_opt = reader.Size();
if (!size_opt) return false;
if (*size_opt < expected_length ||
*size_opt - expected_length != reader.pos()) {
return false;
}
return true;
};
if (may_be_contiguous()) {
auto pos = reader.pos();
ContiguousBufferSinkWriter buffer_sink_writer;
buffer_sink_writer.expected_length = expected_length;
buffer_sink_writer.expected_alignment = dtype->alignment;
if (riegeli::CopyAll(reader, buffer_sink_writer, expected_length).ok()) {
absl::Status status;
if (functions.validate) {
if (!(*functions.validate)[IterationBufferKind::kContiguous](
nullptr, {1, static_cast<Index>(expected_length)},
IterationBufferPointer(
const_cast<void*>(buffer_sink_writer.data.get()), 0,
dtype.size()),
&status)) {
return status;
}
}
contiguous_bytes.IncrementBy(expected_length);
return tensorstore::SharedArray<const void>(
SharedElementPointer<const void>(std::move(buffer_sink_writer.data),
dtype),
decoded_shape, order);
}
if (!reader.Seek(pos)) {
return reader.status();
}
}
auto decoded =
tensorstore::AllocateArray(decoded_shape, order, default_init, dtype);
TENSORSTORE_RETURN_IF_ERROR(
DecodeArrayEndian(reader, encoded_endian, order, decoded));
reader.VerifyEnd();
if (!reader.ok()) {
return reader.status();
}
noncontiguous_bytes.IncrementBy(expected_length);
return decoded;
}
absl::Status DecodeArrayEndian(riegeli::Reader& reader, endian encoded_endian,
ContiguousLayoutOrder order,
ArrayView<void> decoded) {
const auto& functions =
kUnalignedDataTypeFunctions[static_cast<size_t>(decoded.dtype().id())];
assert(functions.copy != nullptr);
riegeli::LimitingReader limiting_reader(
&reader, riegeli::LimitingReaderBase::Options().set_exact_length(
decoded.dtype().size() * decoded.num_elements()));
[[maybe_unused]] const auto unused_result = internal::IterateOverArrays(
{encoded_endian == endian::native ? &functions.read_native_endian
: &functions.read_swapped_endian,
&limiting_reader},
nullptr, {order, include_repeated_elements}, decoded);
if (!limiting_reader.VerifyEndAndClose()) {
return limiting_reader.status();
}
return absl::OkStatus();
}
}
} | #include "tensorstore/internal/riegeli/array_endian_codec.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/cord.h"
#include "absl/strings/cord_test_helpers.h"
#include "riegeli/bytes/cord_reader.h"
#include "riegeli/bytes/cord_writer.h"
#include "riegeli/bytes/string_reader.h"
#include "riegeli/zlib/zlib_reader.h"
#include "riegeli/zlib/zlib_writer.h"
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/internal/flat_cord_builder.h"
#include "tensorstore/util/endian.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using tensorstore::AllocateArray;
using tensorstore::c_order;
using tensorstore::ContiguousLayoutOrder;
using tensorstore::DataType;
using tensorstore::dtype_v;
using tensorstore::endian;
using tensorstore::fortran_order;
using tensorstore::Index;
using tensorstore::IsContiguousLayout;
using tensorstore::MatchesStatus;
using tensorstore::Result;
using tensorstore::SharedArray;
using tensorstore::span;
using tensorstore::internal::DecodeArrayEndian;
using tensorstore::internal::EncodeArrayEndian;
using tensorstore::internal::FlatCordBuilder;
Result<absl::Cord> EncodeArrayAsCord(SharedArray<const void> array,
endian endianness,
ContiguousLayoutOrder order) {
absl::Cord encoded;
riegeli::CordWriter writer{&encoded};
if (EncodeArrayEndian(array, endianness, order, writer) && writer.Close()) {
return encoded;
}
return writer.status();
}
Result<SharedArray<const void>> DecodeArrayFromCord(
DataType dtype, span<const Index> decoded_shape, absl::Cord encoded,
endian endianness, ContiguousLayoutOrder order) {
riegeli::CordReader reader{&encoded};
return DecodeArrayEndian(reader, dtype, decoded_shape, endianness, order);
}
template <typename T = uint32_t>
SharedArray<const void> MakeTestArray(ContiguousLayoutOrder order = c_order,
Index a = 1000, Index b = 2000) {
auto c_array = AllocateArray<T>({a, b}, order, tensorstore::default_init);
for (Index a_i = 0; a_i < a; ++a_i) {
for (Index b_i = 0; b_i < b; ++b_i) {
c_array(a_i, b_i) = static_cast<T>(a_i * b + b_i);
}
}
return c_array;
}
TEST(EncodeArrayEndianTest, ContiguousLayout) {
auto c_array = MakeTestArray();
auto f_array = tensorstore::MakeCopy(c_array, fortran_order);
Index num_elements = c_array.num_elements();
ASSERT_TRUE(IsContiguousLayout(c_array, c_order));
ASSERT_TRUE(IsContiguousLayout(f_array, fortran_order));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
absl::Cord c_encoded,
EncodeArrayAsCord(c_array, endian::native, c_order));
{
auto flat = c_encoded.TryFlat();
ASSERT_TRUE(flat);
EXPECT_EQ(reinterpret_cast<const char*>(c_array.data()), flat->data());
EXPECT_EQ(num_elements * c_array.dtype().size(), flat->size());
}
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
absl::Cord f_encoded,
EncodeArrayAsCord(f_array, endian::native, fortran_order));
{
auto flat = f_encoded.TryFlat();
ASSERT_TRUE(flat);
EXPECT_EQ(reinterpret_cast<const char*>(f_array.data()), flat->data());
EXPECT_EQ(num_elements * c_array.dtype().size(), flat->size());
}
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
absl::Cord encoded,
EncodeArrayAsCord(c_array, endian::native, fortran_order));
EXPECT_EQ(f_encoded, encoded);
}
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
absl::Cord encoded,
EncodeArrayAsCord(f_array, endian::native, c_order));
EXPECT_EQ(c_encoded, encoded);
}
}
Result<SharedArray<const void>> RoundTripArrayViaCord(
SharedArray<const void> array, endian endianness,
ContiguousLayoutOrder order) {
TENSORSTORE_ASSIGN_OR_RETURN(auto encoded,
EncodeArrayAsCord(array, endianness, order));
return DecodeArrayFromCord(array.dtype(), array.shape(), encoded, endianness,
order);
}
template <typename T = uint16_t>
void TestRoundTripNoCopy(ContiguousLayoutOrder order) {
auto orig_array = MakeTestArray<T>(order);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto decoded, RoundTripArrayViaCord(orig_array, endian::native, order));
ASSERT_EQ(orig_array.data(), decoded.data());
}
template <typename T = uint16_t>
void TestRoundTripCopy(ContiguousLayoutOrder order, endian endianness) {
auto orig_array = MakeTestArray<T>(order, 2, 3);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto decoded, RoundTripArrayViaCord(orig_array, endianness, order));
ASSERT_TRUE(tensorstore::AreArraysIdenticallyEqual(orig_array, decoded))
<< "orig_array=" << orig_array << ", decoded=" << decoded;
}
TEST(EncodeArrayEndianTest, BigEndian) {
auto orig_array = MakeTestArray<uint16_t>(c_order, 2, 3);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto encoded, EncodeArrayAsCord(orig_array, endian::big, c_order));
EXPECT_THAT(encoded.Flatten(), ::testing::ElementsAreArray({
0,
0,
0,
1,
0,
2,
0,
3,
0,
4,
0,
5,
}));
}
TEST(DecodeArrayEndianTest, BigEndian) {
auto orig_array = MakeTestArray<uint16_t>(c_order, 2, 3);
std::string encoded{
0, 0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5,
};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto decoded,
DecodeArrayFromCord(orig_array.dtype(), orig_array.shape(),
absl::Cord(encoded), endian::big, c_order));
EXPECT_EQ(orig_array, decoded);
}
TEST(EncodeArrayEndianTest, RoundTripNoCopyCOrder) {
TestRoundTripNoCopy(c_order);
}
TEST(EncodeArrayEndianTest, RoundTripNoCopyCOrderBool) {
TestRoundTripNoCopy<bool>(c_order);
}
TEST(DecodeArrayEndianTest, InvalidBool) {
std::string encoded{0, 1, 2, 1};
EXPECT_THAT(DecodeArrayFromCord(dtype_v<bool>, {{2, 2}}, absl::Cord(encoded),
endian::native, c_order),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid bool value: 2; at byte 2"));
}
TEST(DecodeArrayEndianTest, InvalidBoolNoCopy) {
std::string encoded;
FlatCordBuilder builder(1000 * 2000);
std::fill_n(builder.data(), builder.size(), 0);
builder.data()[builder.size() - 1] = 2;
EXPECT_THAT(
DecodeArrayFromCord(dtype_v<bool>, {{1000, 2000}},
std::move(builder).Build(), endian::native, c_order),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid bool value: 2"));
}
TEST(EncodeArrayEndianTest, RoundTripNoCopyFOrder) {
TestRoundTripNoCopy(fortran_order);
}
TEST(EncodeArrayEndianTest, RoundTripCopyCOrderBig) {
TestRoundTripCopy(c_order, endian::big);
}
TEST(EncodeArrayEndianTest, RoundTripCopyCOrderLittle) {
TestRoundTripCopy(c_order, endian::little);
}
TEST(EncodeArrayEndianTest, RoundTripCopyFOrderBig) {
TestRoundTripCopy(fortran_order, endian::big);
}
TEST(EncodeArrayEndianTest, RoundTripCopyFOrderLittle) {
TestRoundTripCopy(fortran_order, endian::little);
}
TEST(DecodeArrayEndianTest, StringReader) {
auto orig_array = MakeTestArray<uint8_t>(c_order, 2, 3);
std::string encoded{
0, 1, 2, 3, 4, 5,
};
riegeli::StringReader reader{encoded};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto decoded,
DecodeArrayEndian(reader, orig_array.dtype(), orig_array.shape(),
endian::native, c_order));
EXPECT_EQ(orig_array, decoded);
}
TEST(DecodeArrayEndianTest, LengthTooShort) {
auto orig_array = MakeTestArray<uint8_t>(c_order, 2, 3);
std::string encoded{
0, 1, 2, 3, 4,
};
riegeli::StringReader reader{encoded};
EXPECT_THAT(
DecodeArrayEndian(reader, orig_array.dtype(), orig_array.shape(),
endian::native, c_order),
MatchesStatus(absl::StatusCode::kInvalidArgument, "Not enough data.*"));
}
TEST(DecodeArrayEndianTest, LengthTooLong) {
auto orig_array = MakeTestArray<uint8_t>(c_order, 2, 3);
std::string encoded{
0, 1, 2, 3, 4, 5, 6,
};
riegeli::StringReader reader{encoded};
EXPECT_THAT(DecodeArrayEndian(reader, orig_array.dtype(), orig_array.shape(),
endian::native, c_order),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"End of data expected.*"));
}
TEST(EncodeArrayEndianTest, Zlib) {
auto orig_array = MakeTestArray<uint16_t>(c_order);
absl::Cord encoded;
{
riegeli::ZlibWriter writer{riegeli::CordWriter{&encoded}};
ASSERT_TRUE(EncodeArrayEndian(orig_array, endian::native, c_order, writer));
ASSERT_TRUE(writer.Close());
}
{
riegeli::ZlibReader reader{riegeli::CordReader{encoded}};
EXPECT_THAT(DecodeArrayEndian(reader, orig_array.dtype(),
orig_array.shape(), endian::native, c_order),
::testing::Optional(orig_array));
}
}
TEST(DecodeArrayEndianTest, Misaligned) {
int a = 1000, b = 2000;
int num_elements = a * b;
size_t buffer_size = 1000 * 2000 * 2 + 1;
std::unique_ptr<char[]> source(new char[1000 * 2000 * 2 + 1]);
for (int i = 0; i < num_elements; ++i) {
uint16_t x = static_cast<uint16_t>(i);
memcpy(&source[i * 2 + 1], &x, 2);
}
auto cord = absl::MakeCordFromExternal(
std::string_view(source.get() + 1, buffer_size - 1), [] {});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto decoded, DecodeArrayFromCord(dtype_v<uint16_t>, {{1000, 2000}}, cord,
endian::native, c_order));
ASSERT_NE(decoded.data(), &source[1]);
EXPECT_THAT(decoded, MakeTestArray<uint16_t>(c_order));
}
TEST(DecodeArrayEndianTest, Fragmented) {
auto c_array = MakeTestArray<uint16_t>();
size_t total_bytes = c_array.num_elements() * c_array.dtype().size();
std::vector<absl::Cord> parts{
absl::MakeCordFromExternal(
std::string_view(reinterpret_cast<const char*>(c_array.data()),
total_bytes / 2),
[] {}),
absl::MakeCordFromExternal(
std::string_view(
reinterpret_cast<const char*>(c_array.data()) + total_bytes / 2,
total_bytes / 2),
[] {})};
absl::Cord cord = absl::MakeFragmentedCord(parts);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto decoded, DecodeArrayFromCord(dtype_v<uint16_t>, {{1000, 2000}}, cord,
endian::native, c_order));
EXPECT_THAT(decoded, MakeTestArray<uint16_t>(c_order));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/riegeli/array_endian_codec.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/riegeli/array_endian_codec_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
05232ffb-1c4d-4174-b2a9-2e9bb1d48464 | cpp | google/cel-cpp | portable_cel_expr_builder_factory | eval/public/portable_cel_expr_builder_factory.cc | eval/public/portable_cel_expr_builder_factory_test.cc | #include "eval/public/portable_cel_expr_builder_factory.h"
#include <memory>
#include <utility>
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "base/ast_internal/ast_impl.h"
#include "base/kind.h"
#include "common/memory.h"
#include "common/values/legacy_type_reflector.h"
#include "eval/compiler/cel_expression_builder_flat_impl.h"
#include "eval/compiler/comprehension_vulnerability_check.h"
#include "eval/compiler/constant_folding.h"
#include "eval/compiler/flat_expr_builder.h"
#include "eval/compiler/flat_expr_builder_extensions.h"
#include "eval/compiler/qualified_reference_resolver.h"
#include "eval/compiler/regex_precompilation_optimization.h"
#include "eval/public/cel_expression.h"
#include "eval/public/cel_function.h"
#include "eval/public/cel_options.h"
#include "eval/public/structs/legacy_type_provider.h"
#include "extensions/protobuf/memory_manager.h"
#include "extensions/select_optimization.h"
#include "runtime/runtime_options.h"
namespace google::api::expr::runtime {
namespace {
using ::cel::MemoryManagerRef;
using ::cel::ast_internal::AstImpl;
using ::cel::extensions::CreateSelectOptimizationProgramOptimizer;
using ::cel::extensions::kCelAttribute;
using ::cel::extensions::kCelHasField;
using ::cel::extensions::ProtoMemoryManagerRef;
using ::cel::extensions::SelectOptimizationAstUpdater;
using ::cel::runtime_internal::CreateConstantFoldingOptimizer;
struct ArenaBackedConstfoldingFactory {
MemoryManagerRef memory_manager;
absl::StatusOr<std::unique_ptr<ProgramOptimizer>> operator()(
PlannerContext& ctx, const AstImpl& ast) const {
return CreateConstantFoldingOptimizer(memory_manager)(ctx, ast);
}
};
}
std::unique_ptr<CelExpressionBuilder> CreatePortableExprBuilder(
std::unique_ptr<LegacyTypeProvider> type_provider,
const InterpreterOptions& options) {
if (type_provider == nullptr) {
ABSL_LOG(ERROR) << "Cannot pass nullptr as type_provider to "
"CreatePortableExprBuilder";
return nullptr;
}
cel::RuntimeOptions runtime_options = ConvertToRuntimeOptions(options);
auto builder =
std::make_unique<CelExpressionBuilderFlatImpl>(runtime_options);
builder->GetTypeRegistry()
->InternalGetModernRegistry()
.set_use_legacy_container_builders(options.use_legacy_container_builders);
builder->GetTypeRegistry()->RegisterTypeProvider(std::move(type_provider));
FlatExprBuilder& flat_expr_builder = builder->flat_expr_builder();
flat_expr_builder.AddAstTransform(NewReferenceResolverExtension(
(options.enable_qualified_identifier_rewrites)
? ReferenceResolverOption::kAlways
: ReferenceResolverOption::kCheckedOnly));
if (options.enable_comprehension_vulnerability_check) {
builder->flat_expr_builder().AddProgramOptimizer(
CreateComprehensionVulnerabilityCheck());
}
if (options.constant_folding) {
builder->flat_expr_builder().AddProgramOptimizer(
ArenaBackedConstfoldingFactory{
ProtoMemoryManagerRef(options.constant_arena)});
}
if (options.enable_regex_precompilation) {
flat_expr_builder.AddProgramOptimizer(
CreateRegexPrecompilationExtension(options.regex_max_program_size));
}
if (options.enable_select_optimization) {
flat_expr_builder.AddAstTransform(
std::make_unique<SelectOptimizationAstUpdater>());
absl::Status status =
builder->GetRegistry()->RegisterLazyFunction(CelFunctionDescriptor(
kCelAttribute, false, {cel::Kind::kAny, cel::Kind::kList}));
if (!status.ok()) {
ABSL_LOG(ERROR) << "Failed to register " << kCelAttribute << ": "
<< status;
}
status = builder->GetRegistry()->RegisterLazyFunction(CelFunctionDescriptor(
kCelHasField, false, {cel::Kind::kAny, cel::Kind::kList}));
if (!status.ok()) {
ABSL_LOG(ERROR) << "Failed to register " << kCelHasField << ": "
<< status;
}
flat_expr_builder.AddProgramOptimizer(
CreateSelectOptimizationProgramOptimizer());
}
return builder;
}
} | #include "eval/public/portable_cel_expr_builder_factory.h"
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "google/protobuf/duration.pb.h"
#include "google/protobuf/timestamp.pb.h"
#include "google/protobuf/wrappers.pb.h"
#include "absl/container/node_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "eval/public/activation.h"
#include "eval/public/builtin_func_registrar.h"
#include "eval/public/cel_options.h"
#include "eval/public/cel_value.h"
#include "eval/public/structs/legacy_type_adapter.h"
#include "eval/public/structs/legacy_type_info_apis.h"
#include "eval/public/structs/legacy_type_provider.h"
#include "eval/testutil/test_message.pb.h"
#include "extensions/protobuf/memory_manager.h"
#include "internal/casts.h"
#include "internal/proto_time_encoding.h"
#include "internal/testing.h"
#include "parser/parser.h"
namespace google::api::expr::runtime {
namespace {
using ::google::api::expr::v1alpha1::ParsedExpr;
using ::google::protobuf::Int64Value;
absl::optional<CelValue> Unwrap(const google::protobuf::MessageLite* wrapper) {
if (wrapper->GetTypeName() == "google.protobuf.Duration") {
const auto* duration =
cel::internal::down_cast<const google::protobuf::Duration*>(wrapper);
return CelValue::CreateDuration(cel::internal::DecodeDuration(*duration));
} else if (wrapper->GetTypeName() == "google.protobuf.Timestamp") {
const auto* timestamp =
cel::internal::down_cast<const google::protobuf::Timestamp*>(wrapper);
return CelValue::CreateTimestamp(cel::internal::DecodeTime(*timestamp));
}
return absl::nullopt;
}
struct NativeToCelValue {
template <typename T>
absl::optional<CelValue> Convert(T arg) const {
return absl::nullopt;
}
absl::optional<CelValue> Convert(int64_t v) const {
return CelValue::CreateInt64(v);
}
absl::optional<CelValue> Convert(const std::string& str) const {
return CelValue::CreateString(&str);
}
absl::optional<CelValue> Convert(double v) const {
return CelValue::CreateDouble(v);
}
absl::optional<CelValue> Convert(bool v) const {
return CelValue::CreateBool(v);
}
absl::optional<CelValue> Convert(const Int64Value& v) const {
return CelValue::CreateInt64(v.value());
}
};
template <typename MessageT, typename FieldT>
class FieldImpl;
template <typename MessageT>
class ProtoField {
public:
template <typename FieldT>
using FieldImpl = FieldImpl<MessageT, FieldT>;
virtual ~ProtoField() = default;
virtual absl::Status Set(MessageT* m, CelValue v) const = 0;
virtual absl::StatusOr<CelValue> Get(const MessageT* m) const = 0;
virtual bool Has(const MessageT* m) const = 0;
};
template <typename MessageT, typename FieldT>
struct ScalarApiWrap {
using GetFn = FieldT (MessageT::*)() const;
using HasFn = bool (MessageT::*)() const;
using SetFn = void (MessageT::*)(FieldT);
ScalarApiWrap(GetFn get_fn, HasFn has_fn, SetFn set_fn)
: get_fn(get_fn), has_fn(has_fn), set_fn(set_fn) {}
FieldT InvokeGet(const MessageT* msg) const {
return std::invoke(get_fn, msg);
}
bool InvokeHas(const MessageT* msg) const {
if (has_fn == nullptr) return true;
return std::invoke(has_fn, msg);
}
void InvokeSet(MessageT* msg, FieldT arg) const {
if (set_fn != nullptr) {
std::invoke(set_fn, msg, arg);
}
}
GetFn get_fn;
HasFn has_fn;
SetFn set_fn;
};
template <typename MessageT, typename FieldT>
struct ComplexTypeApiWrap {
public:
using GetFn = const FieldT& (MessageT::*)() const;
using HasFn = bool (MessageT::*)() const;
using SetAllocatedFn = void (MessageT::*)(FieldT*);
ComplexTypeApiWrap(GetFn get_fn, HasFn has_fn,
SetAllocatedFn set_allocated_fn)
: get_fn(get_fn), has_fn(has_fn), set_allocated_fn(set_allocated_fn) {}
const FieldT& InvokeGet(const MessageT* msg) const {
return std::invoke(get_fn, msg);
}
bool InvokeHas(const MessageT* msg) const {
if (has_fn == nullptr) return true;
return std::invoke(has_fn, msg);
}
void InvokeSetAllocated(MessageT* msg, FieldT* arg) const {
if (set_allocated_fn != nullptr) {
std::invoke(set_allocated_fn, msg, arg);
}
}
GetFn get_fn;
HasFn has_fn;
SetAllocatedFn set_allocated_fn;
};
template <typename MessageT, typename FieldT>
class FieldImpl : public ProtoField<MessageT> {
private:
using ApiWrap = ScalarApiWrap<MessageT, FieldT>;
public:
FieldImpl(typename ApiWrap::GetFn get_fn, typename ApiWrap::HasFn has_fn,
typename ApiWrap::SetFn set_fn)
: api_wrapper_(get_fn, has_fn, set_fn) {}
absl::Status Set(TestMessage* m, CelValue v) const override {
FieldT arg;
if (!v.GetValue(&arg)) {
return absl::InvalidArgumentError("wrong type for set");
}
api_wrapper_.InvokeSet(m, arg);
return absl::OkStatus();
}
absl::StatusOr<CelValue> Get(const TestMessage* m) const override {
FieldT result = api_wrapper_.InvokeGet(m);
auto converted = NativeToCelValue().Convert(result);
if (converted.has_value()) {
return *converted;
}
return absl::UnimplementedError("not implemented for type");
}
bool Has(const TestMessage* m) const override {
return api_wrapper_.InvokeHas(m);
}
private:
ApiWrap api_wrapper_;
};
template <typename MessageT>
class FieldImpl<MessageT, Int64Value> : public ProtoField<MessageT> {
using ApiWrap = ComplexTypeApiWrap<MessageT, Int64Value>;
public:
FieldImpl(typename ApiWrap::GetFn get_fn, typename ApiWrap::HasFn has_fn,
typename ApiWrap::SetAllocatedFn set_fn)
: api_wrapper_(get_fn, has_fn, set_fn) {}
absl::Status Set(TestMessage* m, CelValue v) const override {
int64_t arg;
if (!v.GetValue(&arg)) {
return absl::InvalidArgumentError("wrong type for set");
}
Int64Value* proto_value = new Int64Value();
proto_value->set_value(arg);
api_wrapper_.InvokeSetAllocated(m, proto_value);
return absl::OkStatus();
}
absl::StatusOr<CelValue> Get(const TestMessage* m) const override {
if (!api_wrapper_.InvokeHas(m)) {
return CelValue::CreateNull();
}
Int64Value result = api_wrapper_.InvokeGet(m);
auto converted = NativeToCelValue().Convert(std::move(result));
if (converted.has_value()) {
return *converted;
}
return absl::UnimplementedError("not implemented for type");
}
bool Has(const TestMessage* m) const override {
return api_wrapper_.InvokeHas(m);
}
private:
ApiWrap api_wrapper_;
};
class DemoTypeProvider;
class DemoTimestamp : public LegacyTypeInfoApis, public LegacyTypeMutationApis {
public:
DemoTimestamp() {}
std::string DebugString(
const MessageWrapper& wrapped_message) const override {
return std::string(GetTypename(wrapped_message));
}
absl::string_view GetTypename(
const MessageWrapper& wrapped_message) const override {
return "google.protobuf.Timestamp";
}
const LegacyTypeAccessApis* GetAccessApis(
const MessageWrapper& wrapped_message) const override {
return nullptr;
}
bool DefinesField(absl::string_view field_name) const override {
return field_name == "seconds" || field_name == "nanos";
}
absl::StatusOr<CelValue::MessageWrapper::Builder> NewInstance(
cel::MemoryManagerRef memory_manager) const override;
absl::StatusOr<CelValue> AdaptFromWellKnownType(
cel::MemoryManagerRef memory_manager,
CelValue::MessageWrapper::Builder instance) const override;
absl::Status SetField(
absl::string_view field_name, const CelValue& value,
cel::MemoryManagerRef memory_manager,
CelValue::MessageWrapper::Builder& instance) const override;
private:
absl::Status Validate(const google::protobuf::MessageLite* wrapped_message) const {
if (wrapped_message->GetTypeName() != "google.protobuf.Timestamp") {
return absl::InvalidArgumentError("not a timestamp");
}
return absl::OkStatus();
}
};
class DemoTypeInfo : public LegacyTypeInfoApis {
public:
explicit DemoTypeInfo(const DemoTypeProvider* owning_provider)
: owning_provider_(*owning_provider) {}
std::string DebugString(const MessageWrapper& wrapped_message) const override;
absl::string_view GetTypename(
const MessageWrapper& wrapped_message) const override;
const LegacyTypeAccessApis* GetAccessApis(
const MessageWrapper& wrapped_message) const override;
private:
const DemoTypeProvider& owning_provider_;
};
class DemoTestMessage : public LegacyTypeInfoApis,
public LegacyTypeMutationApis,
public LegacyTypeAccessApis {
public:
explicit DemoTestMessage(const DemoTypeProvider* owning_provider);
std::string DebugString(
const MessageWrapper& wrapped_message) const override {
return std::string(GetTypename(wrapped_message));
}
absl::string_view GetTypename(
const MessageWrapper& wrapped_message) const override {
return "google.api.expr.runtime.TestMessage";
}
const LegacyTypeAccessApis* GetAccessApis(
const MessageWrapper& wrapped_message) const override {
return this;
}
const LegacyTypeMutationApis* GetMutationApis(
const MessageWrapper& wrapped_message) const override {
return this;
}
absl::optional<FieldDescription> FindFieldByName(
absl::string_view name) const override {
if (auto it = fields_.find(name); it != fields_.end()) {
return FieldDescription{0, name};
}
return absl::nullopt;
}
bool DefinesField(absl::string_view field_name) const override {
return fields_.contains(field_name);
}
absl::StatusOr<CelValue::MessageWrapper::Builder> NewInstance(
cel::MemoryManagerRef memory_manager) const override;
absl::StatusOr<CelValue> AdaptFromWellKnownType(
cel::MemoryManagerRef memory_manager,
CelValue::MessageWrapper::Builder instance) const override;
absl::Status SetField(
absl::string_view field_name, const CelValue& value,
cel::MemoryManagerRef memory_manager,
CelValue::MessageWrapper::Builder& instance) const override;
absl::StatusOr<bool> HasField(
absl::string_view field_name,
const CelValue::MessageWrapper& value) const override;
absl::StatusOr<CelValue> GetField(
absl::string_view field_name, const CelValue::MessageWrapper& instance,
ProtoWrapperTypeOptions unboxing_option,
cel::MemoryManagerRef memory_manager) const override;
std::vector<absl::string_view> ListFields(
const CelValue::MessageWrapper& instance) const override {
std::vector<absl::string_view> fields;
fields.reserve(fields_.size());
for (const auto& field : fields_) {
fields.emplace_back(field.first);
}
return fields;
}
private:
using Field = ProtoField<TestMessage>;
const DemoTypeProvider& owning_provider_;
absl::flat_hash_map<absl::string_view, std::unique_ptr<Field>> fields_;
};
class DemoTypeProvider : public LegacyTypeProvider {
public:
DemoTypeProvider() : timestamp_type_(), test_message_(this), info_(this) {}
const LegacyTypeInfoApis* GetTypeInfoInstance() const { return &info_; }
absl::optional<LegacyTypeAdapter> ProvideLegacyType(
absl::string_view name) const override {
if (name == "google.protobuf.Timestamp") {
return LegacyTypeAdapter(nullptr, ×tamp_type_);
} else if (name == "google.api.expr.runtime.TestMessage") {
return LegacyTypeAdapter(&test_message_, &test_message_);
}
return absl::nullopt;
}
absl::optional<const LegacyTypeInfoApis*> ProvideLegacyTypeInfo(
absl::string_view name) const override {
if (name == "google.protobuf.Timestamp") {
return ×tamp_type_;
} else if (name == "google.api.expr.runtime.TestMessage") {
return &test_message_;
}
return absl::nullopt;
}
const std::string& GetStableType(
const google::protobuf::MessageLite* wrapped_message) const {
std::string name = wrapped_message->GetTypeName();
auto [iter, inserted] = stable_types_.insert(name);
return *iter;
}
CelValue WrapValue(const google::protobuf::MessageLite* message) const {
return CelValue::CreateMessageWrapper(
CelValue::MessageWrapper(message, GetTypeInfoInstance()));
}
private:
DemoTimestamp timestamp_type_;
DemoTestMessage test_message_;
DemoTypeInfo info_;
mutable absl::node_hash_set<std::string> stable_types_;
};
std::string DemoTypeInfo::DebugString(
const MessageWrapper& wrapped_message) const {
return wrapped_message.message_ptr()->GetTypeName();
}
absl::string_view DemoTypeInfo::GetTypename(
const MessageWrapper& wrapped_message) const {
return owning_provider_.GetStableType(wrapped_message.message_ptr());
}
const LegacyTypeAccessApis* DemoTypeInfo::GetAccessApis(
const MessageWrapper& wrapped_message) const {
auto adapter = owning_provider_.ProvideLegacyType(
wrapped_message.message_ptr()->GetTypeName());
if (adapter.has_value()) {
return adapter->access_apis();
}
return nullptr;
}
absl::StatusOr<CelValue::MessageWrapper::Builder> DemoTimestamp::NewInstance(
cel::MemoryManagerRef memory_manager) const {
auto* ts = google::protobuf::Arena::Create<google::protobuf::Timestamp>(
cel::extensions::ProtoMemoryManagerArena(memory_manager));
return CelValue::MessageWrapper::Builder(ts);
}
absl::StatusOr<CelValue> DemoTimestamp::AdaptFromWellKnownType(
cel::MemoryManagerRef memory_manager,
CelValue::MessageWrapper::Builder instance) const {
auto value = Unwrap(instance.message_ptr());
ABSL_ASSERT(value.has_value());
return *value;
}
absl::Status DemoTimestamp::SetField(
absl::string_view field_name, const CelValue& value,
cel::MemoryManagerRef memory_manager,
CelValue::MessageWrapper::Builder& instance) const {
ABSL_ASSERT(Validate(instance.message_ptr()).ok());
auto* mutable_ts = cel::internal::down_cast<google::protobuf::Timestamp*>(
instance.message_ptr());
if (field_name == "seconds" && value.IsInt64()) {
mutable_ts->set_seconds(value.Int64OrDie());
} else if (field_name == "nanos" && value.IsInt64()) {
mutable_ts->set_nanos(value.Int64OrDie());
} else {
return absl::UnknownError("no such field");
}
return absl::OkStatus();
}
DemoTestMessage::DemoTestMessage(const DemoTypeProvider* owning_provider)
: owning_provider_(*owning_provider) {
fields_["int64_value"] = std::make_unique<Field::FieldImpl<int64_t>>(
&TestMessage::int64_value,
nullptr, &TestMessage::set_int64_value);
fields_["double_value"] = std::make_unique<Field::FieldImpl<double>>(
&TestMessage::double_value,
nullptr, &TestMessage::set_double_value);
fields_["bool_value"] = std::make_unique<Field::FieldImpl<bool>>(
&TestMessage::bool_value,
nullptr, &TestMessage::set_bool_value);
fields_["int64_wrapper_value"] =
std::make_unique<Field::FieldImpl<Int64Value>>(
&TestMessage::int64_wrapper_value,
&TestMessage::has_int64_wrapper_value,
&TestMessage::set_allocated_int64_wrapper_value);
}
absl::StatusOr<CelValue::MessageWrapper::Builder> DemoTestMessage::NewInstance(
cel::MemoryManagerRef memory_manager) const {
auto* ts = google::protobuf::Arena::Create<TestMessage>(
cel::extensions::ProtoMemoryManagerArena(memory_manager));
return CelValue::MessageWrapper::Builder(ts);
}
absl::Status DemoTestMessage::SetField(
absl::string_view field_name, const CelValue& value,
cel::MemoryManagerRef memory_manager,
CelValue::MessageWrapper::Builder& instance) const {
auto iter = fields_.find(field_name);
if (iter == fields_.end()) {
return absl::UnknownError("no such field");
}
auto* mutable_test_msg =
cel::internal::down_cast<TestMessage*>(instance.message_ptr());
return iter->second->Set(mutable_test_msg, value);
}
absl::StatusOr<CelValue> DemoTestMessage::AdaptFromWellKnownType(
cel::MemoryManagerRef memory_manager,
CelValue::MessageWrapper::Builder instance) const {
return CelValue::CreateMessageWrapper(
instance.Build(owning_provider_.GetTypeInfoInstance()));
}
absl::StatusOr<bool> DemoTestMessage::HasField(
absl::string_view field_name, const CelValue::MessageWrapper& value) const {
auto iter = fields_.find(field_name);
if (iter == fields_.end()) {
return absl::UnknownError("no such field");
}
auto* test_msg =
cel::internal::down_cast<const TestMessage*>(value.message_ptr());
return iter->second->Has(test_msg);
}
absl::StatusOr<CelValue> DemoTestMessage::GetField(
absl::string_view field_name, const CelValue::MessageWrapper& instance,
ProtoWrapperTypeOptions unboxing_option,
cel::MemoryManagerRef memory_manager) const {
auto iter = fields_.find(field_name);
if (iter == fields_.end()) {
return absl::UnknownError("no such field");
}
auto* test_msg =
cel::internal::down_cast<const TestMessage*>(instance.message_ptr());
return iter->second->Get(test_msg);
}
TEST(PortableCelExprBuilderFactoryTest, CreateNullOnMissingTypeProvider) {
std::unique_ptr<CelExpressionBuilder> builder =
CreatePortableExprBuilder(nullptr);
ASSERT_EQ(builder, nullptr);
}
TEST(PortableCelExprBuilderFactoryTest, CreateSuccess) {
google::protobuf::Arena arena;
InterpreterOptions opts;
Activation activation;
std::unique_ptr<CelExpressionBuilder> builder =
CreatePortableExprBuilder(std::make_unique<DemoTypeProvider>(), opts);
ASSERT_OK_AND_ASSIGN(
ParsedExpr expr,
parser::Parse("google.protobuf.Timestamp{seconds: 3000, nanos: 20}"));
ASSERT_OK(RegisterBuiltinFunctions(builder->GetRegistry()));
ASSERT_OK_AND_ASSIGN(
auto plan, builder->CreateExpression(&expr.expr(), &expr.source_info()));
ASSERT_OK_AND_ASSIGN(CelValue result, plan->Evaluate(activation, &arena));
absl::Time result_time;
ASSERT_TRUE(result.GetValue(&result_time));
EXPECT_EQ(result_time,
absl::UnixEpoch() + absl::Minutes(50) + absl::Nanoseconds(20));
}
TEST(PortableCelExprBuilderFactoryTest, CreateCustomMessage) {
google::protobuf::Arena arena;
InterpreterOptions opts;
Activation activation;
std::unique_ptr<CelExpressionBuilder> builder =
CreatePortableExprBuilder(std::make_unique<DemoTypeProvider>(), opts);
ASSERT_OK_AND_ASSIGN(
ParsedExpr expr,
parser::Parse("google.api.expr.runtime.TestMessage{int64_value: 20, "
"double_value: 3.5}.double_value"));
ASSERT_OK(RegisterBuiltinFunctions(builder->GetRegistry(), opts));
ASSERT_OK_AND_ASSIGN(
auto plan, builder->CreateExpression(&expr.expr(), &expr.source_info()));
ASSERT_OK_AND_ASSIGN(CelValue result, plan->Evaluate(activation, &arena));
double result_double;
ASSERT_TRUE(result.GetValue(&result_double)) << result.DebugString();
EXPECT_EQ(result_double, 3.5);
}
TEST(PortableCelExprBuilderFactoryTest, ActivationAndCreate) {
google::protobuf::Arena arena;
InterpreterOptions opts;
Activation activation;
auto provider = std::make_unique<DemoTypeProvider>();
auto* provider_view = provider.get();
std::unique_ptr<CelExpressionBuilder> builder =
CreatePortableExprBuilder(std::move(provider), opts);
builder->set_container("google.api.expr.runtime");
ASSERT_OK_AND_ASSIGN(
ParsedExpr expr,
parser::Parse("TestMessage{int64_value: 20, bool_value: "
"false}.bool_value || my_var.bool_value ? 1 : 2"));
ASSERT_OK(RegisterBuiltinFunctions(builder->GetRegistry(), opts));
ASSERT_OK_AND_ASSIGN(
auto plan, builder->CreateExpression(&expr.expr(), &expr.source_info()));
TestMessage my_var;
my_var.set_bool_value(true);
activation.InsertValue("my_var", provider_view->WrapValue(&my_var));
ASSERT_OK_AND_ASSIGN(CelValue result, plan->Evaluate(activation, &arena));
int64_t result_int64;
ASSERT_TRUE(result.GetValue(&result_int64)) << result.DebugString();
EXPECT_EQ(result_int64, 1);
}
TEST(PortableCelExprBuilderFactoryTest, WrapperTypes) {
google::protobuf::Arena arena;
InterpreterOptions opts;
opts.enable_heterogeneous_equality = true;
Activation activation;
auto provider = std::make_unique<DemoTypeProvider>();
const auto* provider_view = provider.get();
std::unique_ptr<CelExpressionBuilder> builder =
CreatePortableExprBuilder(std::move(provider), opts);
builder->set_container("google.api.expr.runtime");
ASSERT_OK_AND_ASSIGN(ParsedExpr null_expr,
parser::Parse("my_var.int64_wrapper_value != null ? "
"my_var.int64_wrapper_value > 29 : null"));
ASSERT_OK(RegisterBuiltinFunctions(builder->GetRegistry(), opts));
TestMessage my_var;
my_var.set_bool_value(true);
activation.InsertValue("my_var", provider_view->WrapValue(&my_var));
ASSERT_OK_AND_ASSIGN(
auto plan,
builder->CreateExpression(&null_expr.expr(), &null_expr.source_info()));
ASSERT_OK_AND_ASSIGN(CelValue result, plan->Evaluate(activation, &arena));
EXPECT_TRUE(result.IsNull()) << result.DebugString();
my_var.mutable_int64_wrapper_value()->set_value(30);
ASSERT_OK_AND_ASSIGN(result, plan->Evaluate(activation, &arena));
bool result_bool;
ASSERT_TRUE(result.GetValue(&result_bool)) << result.DebugString();
EXPECT_TRUE(result_bool);
}
TEST(PortableCelExprBuilderFactoryTest, SimpleBuiltinFunctions) {
google::protobuf::Arena arena;
InterpreterOptions opts;
opts.enable_heterogeneous_equality = true;
Activation activation;
auto provider = std::make_unique<DemoTypeProvider>();
std::unique_ptr<CelExpressionBuilder> builder =
CreatePortableExprBuilder(std::move(provider), opts);
builder->set_container("google.api.expr.runtime");
ASSERT_OK_AND_ASSIGN(
ParsedExpr ternary_expr,
parser::Parse(
"TestMessage{int64_value: 2}.int64_value + 1 < "
" TestMessage{double_value: 3.5}.double_value - 0.1 ? "
" (google.protobuf.Timestamp{seconds: 300} - timestamp(240) "
" >= duration('1m') ? 'yes' : 'no') :"
" null"));
ASSERT_OK(RegisterBuiltinFunctions(builder->GetRegistry(), opts));
ASSERT_OK_AND_ASSIGN(auto plan,
builder->CreateExpression(&ternary_expr.expr(),
&ternary_expr.source_info()));
ASSERT_OK_AND_ASSIGN(CelValue result, plan->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsString()) << result.DebugString();
EXPECT_EQ(result.StringOrDie().value(), "yes");
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/portable_cel_expr_builder_factory.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/portable_cel_expr_builder_factory_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
b30cea4d-4e1c-4807-9798-2c2d3ac27537 | cpp | google/quiche | packet_number_indexed_queue | quiche/quic/core/packet_number_indexed_queue.h | quiche/quic/core/packet_number_indexed_queue_test.cc | #ifndef QUICHE_QUIC_CORE_PACKET_NUMBER_INDEXED_QUEUE_H_
#define QUICHE_QUIC_CORE_PACKET_NUMBER_INDEXED_QUEUE_H_
#include "quiche/quic/core/quic_constants.h"
#include "quiche/quic/core/quic_packet_number.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/common/quiche_circular_deque.h"
namespace quic {
template <typename T>
class QUICHE_NO_EXPORT PacketNumberIndexedQueue {
public:
PacketNumberIndexedQueue() : number_of_present_entries_(0) {}
T* GetEntry(QuicPacketNumber packet_number);
const T* GetEntry(QuicPacketNumber packet_number) const;
template <typename... Args>
bool Emplace(QuicPacketNumber packet_number, Args&&... args);
bool Remove(QuicPacketNumber packet_number);
template <typename Function>
bool Remove(QuicPacketNumber packet_number, Function f);
void RemoveUpTo(QuicPacketNumber packet_number);
bool IsEmpty() const { return number_of_present_entries_ == 0; }
size_t number_of_present_entries() const {
return number_of_present_entries_;
}
size_t entry_slots_used() const { return entries_.size(); }
QuicPacketNumber first_packet() const { return first_packet_; }
QuicPacketNumber last_packet() const {
if (IsEmpty()) {
return QuicPacketNumber();
}
return first_packet_ + entries_.size() - 1;
}
private:
struct QUICHE_NO_EXPORT EntryWrapper : T {
bool present;
EntryWrapper() : present(false) {}
template <typename... Args>
explicit EntryWrapper(Args&&... args)
: T(std::forward<Args>(args)...), present(true) {}
};
void Cleanup();
const EntryWrapper* GetEntryWrapper(QuicPacketNumber offset) const;
EntryWrapper* GetEntryWrapper(QuicPacketNumber offset) {
const auto* const_this = this;
return const_cast<EntryWrapper*>(const_this->GetEntryWrapper(offset));
}
quiche::QuicheCircularDeque<EntryWrapper> entries_;
size_t number_of_present_entries_;
QuicPacketNumber first_packet_;
};
template <typename T>
T* PacketNumberIndexedQueue<T>::GetEntry(QuicPacketNumber packet_number) {
EntryWrapper* entry = GetEntryWrapper(packet_number);
if (entry == nullptr) {
return nullptr;
}
return entry;
}
template <typename T>
const T* PacketNumberIndexedQueue<T>::GetEntry(
QuicPacketNumber packet_number) const {
const EntryWrapper* entry = GetEntryWrapper(packet_number);
if (entry == nullptr) {
return nullptr;
}
return entry;
}
template <typename T>
template <typename... Args>
bool PacketNumberIndexedQueue<T>::Emplace(QuicPacketNumber packet_number,
Args&&... args) {
if (!packet_number.IsInitialized()) {
QUIC_BUG(quic_bug_10359_1)
<< "Try to insert an uninitialized packet number";
return false;
}
if (IsEmpty()) {
QUICHE_DCHECK(entries_.empty());
QUICHE_DCHECK(!first_packet_.IsInitialized());
entries_.emplace_back(std::forward<Args>(args)...);
number_of_present_entries_ = 1;
first_packet_ = packet_number;
return true;
}
if (packet_number <= last_packet()) {
return false;
}
size_t offset = packet_number - first_packet_;
if (offset > entries_.size()) {
entries_.resize(offset);
}
number_of_present_entries_++;
entries_.emplace_back(std::forward<Args>(args)...);
QUICHE_DCHECK_EQ(packet_number, last_packet());
return true;
}
template <typename T>
bool PacketNumberIndexedQueue<T>::Remove(QuicPacketNumber packet_number) {
return Remove(packet_number, [](const T&) {});
}
template <typename T>
template <typename Function>
bool PacketNumberIndexedQueue<T>::Remove(QuicPacketNumber packet_number,
Function f) {
EntryWrapper* entry = GetEntryWrapper(packet_number);
if (entry == nullptr) {
return false;
}
f(*static_cast<const T*>(entry));
entry->present = false;
number_of_present_entries_--;
if (packet_number == first_packet()) {
Cleanup();
}
return true;
}
template <typename T>
void PacketNumberIndexedQueue<T>::RemoveUpTo(QuicPacketNumber packet_number) {
while (!entries_.empty() && first_packet_.IsInitialized() &&
first_packet_ < packet_number) {
if (entries_.front().present) {
number_of_present_entries_--;
}
entries_.pop_front();
first_packet_++;
}
Cleanup();
}
template <typename T>
void PacketNumberIndexedQueue<T>::Cleanup() {
while (!entries_.empty() && !entries_.front().present) {
entries_.pop_front();
first_packet_++;
}
if (entries_.empty()) {
first_packet_.Clear();
}
}
template <typename T>
auto PacketNumberIndexedQueue<T>::GetEntryWrapper(
QuicPacketNumber packet_number) const -> const EntryWrapper* {
if (!packet_number.IsInitialized() || IsEmpty() ||
packet_number < first_packet_) {
return nullptr;
}
uint64_t offset = packet_number - first_packet_;
if (offset >= entries_.size()) {
return nullptr;
}
const EntryWrapper* entry = &entries_[offset];
if (!entry->present) {
return nullptr;
}
return entry;
}
}
#endif | #include "quiche/quic/core/packet_number_indexed_queue.h"
#include <limits>
#include <map>
#include <string>
#include "quiche/quic/core/quic_packet_number.h"
#include "quiche/quic/platform/api/quic_test.h"
namespace quic::test {
namespace {
class PacketNumberIndexedQueueTest : public QuicTest {
public:
PacketNumberIndexedQueueTest() {}
protected:
PacketNumberIndexedQueue<std::string> queue_;
};
TEST_F(PacketNumberIndexedQueueTest, InitialState) {
EXPECT_TRUE(queue_.IsEmpty());
EXPECT_FALSE(queue_.first_packet().IsInitialized());
EXPECT_FALSE(queue_.last_packet().IsInitialized());
EXPECT_EQ(0u, queue_.number_of_present_entries());
EXPECT_EQ(0u, queue_.entry_slots_used());
}
TEST_F(PacketNumberIndexedQueueTest, InsertingContinuousElements) {
ASSERT_TRUE(queue_.Emplace(QuicPacketNumber(1001), "one"));
EXPECT_EQ("one", *queue_.GetEntry(QuicPacketNumber(1001)));
ASSERT_TRUE(queue_.Emplace(QuicPacketNumber(1002), "two"));
EXPECT_EQ("two", *queue_.GetEntry(QuicPacketNumber(1002)));
EXPECT_FALSE(queue_.IsEmpty());
EXPECT_EQ(QuicPacketNumber(1001u), queue_.first_packet());
EXPECT_EQ(QuicPacketNumber(1002u), queue_.last_packet());
EXPECT_EQ(2u, queue_.number_of_present_entries());
EXPECT_EQ(2u, queue_.entry_slots_used());
}
TEST_F(PacketNumberIndexedQueueTest, InsertingOutOfOrder) {
queue_.Emplace(QuicPacketNumber(1001), "one");
ASSERT_TRUE(queue_.Emplace(QuicPacketNumber(1003), "three"));
EXPECT_EQ(nullptr, queue_.GetEntry(QuicPacketNumber(1002)));
EXPECT_EQ("three", *queue_.GetEntry(QuicPacketNumber(1003)));
EXPECT_EQ(QuicPacketNumber(1001u), queue_.first_packet());
EXPECT_EQ(QuicPacketNumber(1003u), queue_.last_packet());
EXPECT_EQ(2u, queue_.number_of_present_entries());
EXPECT_EQ(3u, queue_.entry_slots_used());
ASSERT_FALSE(queue_.Emplace(QuicPacketNumber(1002), "two"));
}
TEST_F(PacketNumberIndexedQueueTest, InsertingIntoPast) {
queue_.Emplace(QuicPacketNumber(1001), "one");
EXPECT_FALSE(queue_.Emplace(QuicPacketNumber(1000), "zero"));
}
TEST_F(PacketNumberIndexedQueueTest, InsertingDuplicate) {
queue_.Emplace(QuicPacketNumber(1001), "one");
EXPECT_FALSE(queue_.Emplace(QuicPacketNumber(1001), "one"));
}
TEST_F(PacketNumberIndexedQueueTest, RemoveInTheMiddle) {
queue_.Emplace(QuicPacketNumber(1001), "one");
queue_.Emplace(QuicPacketNumber(1002), "two");
queue_.Emplace(QuicPacketNumber(1003), "three");
ASSERT_TRUE(queue_.Remove(QuicPacketNumber(1002)));
EXPECT_EQ(nullptr, queue_.GetEntry(QuicPacketNumber(1002)));
EXPECT_EQ(QuicPacketNumber(1001u), queue_.first_packet());
EXPECT_EQ(QuicPacketNumber(1003u), queue_.last_packet());
EXPECT_EQ(2u, queue_.number_of_present_entries());
EXPECT_EQ(3u, queue_.entry_slots_used());
EXPECT_FALSE(queue_.Emplace(QuicPacketNumber(1002), "two"));
EXPECT_TRUE(queue_.Emplace(QuicPacketNumber(1004), "four"));
}
TEST_F(PacketNumberIndexedQueueTest, RemoveAtImmediateEdges) {
queue_.Emplace(QuicPacketNumber(1001), "one");
queue_.Emplace(QuicPacketNumber(1002), "two");
queue_.Emplace(QuicPacketNumber(1003), "three");
ASSERT_TRUE(queue_.Remove(QuicPacketNumber(1001)));
EXPECT_EQ(nullptr, queue_.GetEntry(QuicPacketNumber(1001)));
ASSERT_TRUE(queue_.Remove(QuicPacketNumber(1003)));
EXPECT_EQ(nullptr, queue_.GetEntry(QuicPacketNumber(1003)));
EXPECT_EQ(QuicPacketNumber(1002u), queue_.first_packet());
EXPECT_EQ(QuicPacketNumber(1003u), queue_.last_packet());
EXPECT_EQ(1u, queue_.number_of_present_entries());
EXPECT_EQ(2u, queue_.entry_slots_used());
EXPECT_TRUE(queue_.Emplace(QuicPacketNumber(1004), "four"));
}
TEST_F(PacketNumberIndexedQueueTest, RemoveAtDistantFront) {
queue_.Emplace(QuicPacketNumber(1001), "one");
queue_.Emplace(QuicPacketNumber(1002), "one (kinda)");
queue_.Emplace(QuicPacketNumber(2001), "two");
EXPECT_EQ(QuicPacketNumber(1001u), queue_.first_packet());
EXPECT_EQ(QuicPacketNumber(2001u), queue_.last_packet());
EXPECT_EQ(3u, queue_.number_of_present_entries());
EXPECT_EQ(1001u, queue_.entry_slots_used());
ASSERT_TRUE(queue_.Remove(QuicPacketNumber(1002)));
EXPECT_EQ(QuicPacketNumber(1001u), queue_.first_packet());
EXPECT_EQ(QuicPacketNumber(2001u), queue_.last_packet());
EXPECT_EQ(2u, queue_.number_of_present_entries());
EXPECT_EQ(1001u, queue_.entry_slots_used());
ASSERT_TRUE(queue_.Remove(QuicPacketNumber(1001)));
EXPECT_EQ(QuicPacketNumber(2001u), queue_.first_packet());
EXPECT_EQ(QuicPacketNumber(2001u), queue_.last_packet());
EXPECT_EQ(1u, queue_.number_of_present_entries());
EXPECT_EQ(1u, queue_.entry_slots_used());
}
TEST_F(PacketNumberIndexedQueueTest, RemoveAtDistantBack) {
queue_.Emplace(QuicPacketNumber(1001), "one");
queue_.Emplace(QuicPacketNumber(2001), "two");
EXPECT_EQ(QuicPacketNumber(1001u), queue_.first_packet());
EXPECT_EQ(QuicPacketNumber(2001u), queue_.last_packet());
ASSERT_TRUE(queue_.Remove(QuicPacketNumber(2001)));
EXPECT_EQ(QuicPacketNumber(1001u), queue_.first_packet());
EXPECT_EQ(QuicPacketNumber(2001u), queue_.last_packet());
}
TEST_F(PacketNumberIndexedQueueTest, ClearAndRepopulate) {
queue_.Emplace(QuicPacketNumber(1001), "one");
queue_.Emplace(QuicPacketNumber(2001), "two");
ASSERT_TRUE(queue_.Remove(QuicPacketNumber(1001)));
ASSERT_TRUE(queue_.Remove(QuicPacketNumber(2001)));
EXPECT_TRUE(queue_.IsEmpty());
EXPECT_FALSE(queue_.first_packet().IsInitialized());
EXPECT_FALSE(queue_.last_packet().IsInitialized());
EXPECT_TRUE(queue_.Emplace(QuicPacketNumber(101), "one"));
EXPECT_TRUE(queue_.Emplace(QuicPacketNumber(201), "two"));
EXPECT_EQ(QuicPacketNumber(101u), queue_.first_packet());
EXPECT_EQ(QuicPacketNumber(201u), queue_.last_packet());
}
TEST_F(PacketNumberIndexedQueueTest, FailToRemoveElementsThatNeverExisted) {
ASSERT_FALSE(queue_.Remove(QuicPacketNumber(1000)));
queue_.Emplace(QuicPacketNumber(1001), "one");
ASSERT_FALSE(queue_.Remove(QuicPacketNumber(1000)));
ASSERT_FALSE(queue_.Remove(QuicPacketNumber(1002)));
}
TEST_F(PacketNumberIndexedQueueTest, FailToRemoveElementsTwice) {
queue_.Emplace(QuicPacketNumber(1001), "one");
ASSERT_TRUE(queue_.Remove(QuicPacketNumber(1001)));
ASSERT_FALSE(queue_.Remove(QuicPacketNumber(1001)));
ASSERT_FALSE(queue_.Remove(QuicPacketNumber(1001)));
}
TEST_F(PacketNumberIndexedQueueTest, RemoveUpTo) {
queue_.Emplace(QuicPacketNumber(1001), "one");
queue_.Emplace(QuicPacketNumber(2001), "two");
EXPECT_EQ(QuicPacketNumber(1001u), queue_.first_packet());
EXPECT_EQ(2u, queue_.number_of_present_entries());
queue_.RemoveUpTo(QuicPacketNumber(1001));
EXPECT_EQ(QuicPacketNumber(1001u), queue_.first_packet());
EXPECT_EQ(2u, queue_.number_of_present_entries());
queue_.RemoveUpTo(QuicPacketNumber(1100));
EXPECT_EQ(QuicPacketNumber(2001u), queue_.first_packet());
EXPECT_EQ(1u, queue_.number_of_present_entries());
queue_.RemoveUpTo(QuicPacketNumber(2001));
EXPECT_EQ(QuicPacketNumber(2001u), queue_.first_packet());
EXPECT_EQ(1u, queue_.number_of_present_entries());
queue_.RemoveUpTo(QuicPacketNumber(2002));
EXPECT_FALSE(queue_.first_packet().IsInitialized());
EXPECT_EQ(0u, queue_.number_of_present_entries());
}
TEST_F(PacketNumberIndexedQueueTest, ConstGetter) {
queue_.Emplace(QuicPacketNumber(1001), "one");
const auto& const_queue = queue_;
EXPECT_EQ("one", *const_queue.GetEntry(QuicPacketNumber(1001)));
EXPECT_EQ(nullptr, const_queue.GetEntry(QuicPacketNumber(1002)));
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/packet_number_indexed_queue.h | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/packet_number_indexed_queue_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
f5feb927-0ca3-4b2d-b028-8915edaf8170 | cpp | tensorflow/tensorflow | all_gather_combiner | third_party/xla/xla/service/all_gather_combiner.cc | third_party/xla/xla/service/all_gather_combiner_test.cc | #include "xla/service/all_gather_combiner.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <iterator>
#include <limits>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/functional/function_ref.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/hlo/utils/hlo_sharding_util.h"
#include "xla/layout.h"
#include "xla/service/collective_combiner_utils.h"
#include "xla/service/hlo_domain_map.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
int64_t FindMostFrequentGatherDim(
absl::Span<HloInstruction* const> to_combine) {
assert(!to_combine.empty());
int64_t min_rank = std::numeric_limits<int64_t>::max();
std::vector<int64_t> frequency;
for (const HloInstruction* it : to_combine) {
int64_t dim = Cast<HloAllGatherInstruction>(it)->all_gather_dimension();
frequency.resize(std::max(dim + 1, static_cast<int64_t>(frequency.size())),
0);
frequency[dim]++;
min_rank = std::min(min_rank, it->shape().rank());
}
int64_t most_frequent_dim = std::distance(
frequency.begin(), std::max_element(frequency.begin(), frequency.end()));
return most_frequent_dim < min_rank ? most_frequent_dim : 0;
}
absl::Status CombineAllGathers(absl::Span<HloInstruction* const> to_combine,
bool combine_by_dim) {
if (to_combine.size() < 2) {
return absl::OkStatus();
}
VLOG(1) << "Combined " << to_combine.size() << " AllGather ops";
HloComputation& computation = *to_combine.back()->parent();
std::vector<HloInstruction*> operands;
std::vector<std::optional<std::vector<int64_t>>> operand_permutations;
std::vector<Shape> output_shapes;
int64_t most_frequent_dim = FindMostFrequentGatherDim(to_combine);
VLOG(1) << "Combining set";
for (HloInstruction* hlo : to_combine) {
VLOG(1) << "Set element: " << hlo->ToString();
TF_RET_CHECK(hlo->opcode() == HloOpcode::kAllGather);
const auto* ag = Cast<HloAllGatherInstruction>(hlo);
TF_RET_CHECK(hlo->operand_count() == 1);
TF_RET_CHECK(hlo->shape().IsArray());
TF_RET_CHECK(!combine_by_dim ||
ag->all_gather_dimension() == most_frequent_dim);
HloInstruction* operand = hlo->operands().front();
operands.push_back(operand);
operand_permutations.emplace_back();
output_shapes.push_back(hlo->shape());
if (ag->all_gather_dimension() != most_frequent_dim) {
const Shape& operand_shape = operand->shape();
auto& perm = operand_permutations.back();
perm = std::vector<int64_t>(operand_shape.rank());
std::iota(perm->begin(), perm->end(), 0);
std::swap((*perm)[most_frequent_dim],
(*perm)[ag->all_gather_dimension()]);
operands.back() =
computation.AddInstruction(HloInstruction::CreateBitcast(
ShapeUtil::PermuteDimensions(*perm, operand_shape), operand));
output_shapes.back() = ShapeUtil::PermuteDimensions(*perm, hlo->shape());
}
}
HloInstruction* combined;
combined = computation.AddInstruction(HloInstruction::CreateAllGather(
ShapeUtil::MakeTupleShape(output_shapes), operands, most_frequent_dim,
to_combine.front()->device_list(),
false, to_combine.front()->channel_id(),
Cast<HloAllGatherInstruction>(to_combine.front())
->use_global_device_ids()));
combined->set_sharding(
hlo_sharding_util::CreateTupleSharding(combined->shape(), to_combine));
VLOG(1) << "Replacing with : " << combined->ToString();
for (int64_t i = 0; i < to_combine.size(); ++i) {
HloInstruction* replacement = computation.AddInstruction(
HloInstruction::CreateGetTupleElement(combined, i));
if (operand_permutations[i]) {
replacement = computation.AddInstruction(HloInstruction::CreateBitcast(
ShapeUtil::PermuteDimensions(*operand_permutations[i],
replacement->shape()),
replacement));
}
TF_RETURN_IF_ERROR(
computation.ReplaceInstruction(to_combine[i], replacement));
}
return absl::OkStatus();
}
}
std::string& AllGatherCombiner::GetGroupKeyExtraArgs(
AllGatherCombiner::GroupKey& key) {
return std::get<6>(key);
}
std::optional<AllGatherCombiner::GroupKey>
AllGatherCombiner::CombineKey(const HloInstruction* instruction,
const HloDomainMap& domain_map,
bool combine_by_dim,
bool combine_different_dtypes) {
if (instruction->opcode() != HloOpcode::kAllGather) {
return std::nullopt;
}
std::vector<std::vector<int64_t>> replica_groups;
const auto* ag = Cast<HloAllGatherInstruction>(instruction);
replica_groups.reserve(ag->replica_groups().size());
for (const ReplicaGroup& replica_group : ag->replica_groups()) {
replica_groups.push_back(
std::vector<int64_t>(replica_group.replica_ids().begin(),
replica_group.replica_ids().end()));
}
int64_t ag_dim_key = combine_by_dim ? ag->all_gather_dimension() : -1;
PrimitiveType data_type = combine_different_dtypes
? PRIMITIVE_TYPE_INVALID
: ag->shape().element_type();
return GroupKey{ag_dim_key,
domain_map.GetDomainMetadataId(ag),
ag->channel_id().has_value(),
ag->use_global_device_ids(),
data_type,
replica_groups,
""};
}
AllGatherCombiner::AllGatherCombiner(int64_t combine_threshold_in_bytes,
int64_t combine_threshold_count,
bool combine_by_dim,
bool combine_different_dtypes)
: combine_threshold_in_bytes_(combine_threshold_in_bytes),
combine_threshold_count_(combine_threshold_count),
combine_by_dim_(combine_by_dim),
combine_different_dtypes_(combine_different_dtypes) {}
absl::StatusOr<bool> AllGatherCombiner::RunWithKeyCombiner(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads,
absl::FunctionRef<std::optional<AllGatherCombiner::GroupKey>(
const HloInstruction*, const HloDomainMap&, bool, bool)>
combine_key) {
VLOG(1) << "Running AllGatherCombiner with threshold of "
<< combine_threshold_in_bytes_ << " bytes";
if (combine_threshold_in_bytes_ <= 0 || combine_threshold_count_ <= 0) {
VLOG(1) << "Skip AllGatherCombiner because the threshold is zero";
return false;
}
if (hlo_query::ContainsLayoutConstrainedCollective(*module,
HloOpcode::kAllGather)) {
VLOG(1) << "Skip AllGatherCombiner because the module contains "
"all-gather with constrained layouts";
return false;
}
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
TF_ASSIGN_OR_RETURN(auto domain_map, HloDomainMap::Create(computation, ""));
auto key_fn = [&](const HloInstruction* instruction) {
return combine_key(instruction, *domain_map, combine_by_dim_,
combine_different_dtypes_);
};
auto combine_fn =
[&](absl::Span<HloInstruction* const> to_combine) -> absl::Status {
return CombineAllGathers(to_combine, combine_by_dim_);
};
TF_ASSIGN_OR_RETURN(
bool computation_changed,
CombineInstructionsByKey<GroupKey>(computation, key_fn, combine_fn,
combine_threshold_in_bytes_,
combine_threshold_count_));
changed |= computation_changed;
}
return changed;
}
absl::StatusOr<bool> AllGatherCombiner::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
TF_ASSIGN_OR_RETURN(
bool changed, RunWithKeyCombiner(module, execution_threads, CombineKey));
return changed;
}
} | #include "xla/service/all_gather_combiner.h"
#include <cstdint>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ::testing::Matcher;
namespace op = xla::testing::opcode_matchers;
int64_t kMaxCombineCount = 256;
std::vector<HloAllGatherInstruction*> FindAllGathers(const HloModule& module) {
std::vector<HloAllGatherInstruction*> results;
for (HloComputation* computation : module.computations()) {
if (computation->IsFusionComputation()) {
continue;
}
for (HloInstruction* hlo : computation->instructions()) {
if (auto it = DynCast<HloAllGatherInstruction>(hlo)) {
results.push_back(it);
}
}
}
return results;
}
int64_t AllGatherCount(const HloModule& module) {
return FindAllGathers(module).size();
}
using AllGatherCombinerTest = HloTestBase;
TEST_F(AllGatherCombinerTest, CombineAllGathers) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[32] parameter(0)
param1 = f32[32] parameter(1)
allgather0 = f32[128] all-gather(param0), replica_groups={}, dimensions={0}
allgather1 = f32[128] all-gather(param1), replica_groups={}, dimensions={0}
ROOT tuple = (f32[128], f32[128]) tuple(allgather0, allgather1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
true);
ASSERT_EQ(AllGatherCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_TRUE(changed);
Matcher<const HloInstruction*> combined_all_gather =
op::AllGather(op::Parameter(0), op::Parameter(1));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(combined_all_gather, 0),
op::GetTupleElement(combined_all_gather, 1)));
}
TEST_F(AllGatherCombinerTest, CombineDifferentDtypes) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[32] parameter(0)
param1 = s32[32] parameter(1)
allgather0 = f32[128] all-gather(param0), replica_groups={}, dimensions={0}
allgather1 = s32[128] all-gather(param1), replica_groups={}, dimensions={0}
ROOT tuple = (f32[128], s32[128]) tuple(allgather0, allgather1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
true,
false);
ASSERT_EQ(AllGatherCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::AllGather(op::Parameter(0)),
op::AllGather(op::Parameter(1))));
}
TEST_F(AllGatherCombinerTest, CombineAllGathersByAllGatherDimension) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[2,2] parameter(0)
param1 = f32[2,2] parameter(1)
param2 = f32[2,2] parameter(2)
param3 = f32[2,2] parameter(3)
param4 = f32[2,2] parameter(4)
allgather0 = f32[8,2] all-gather(param0), replica_groups={}, dimensions={0}
allgather1 = f32[8,2] all-gather(param1), replica_groups={}, dimensions={0}
allgather2 = f32[2,8] all-gather(param2), replica_groups={}, dimensions={1}
allgather3 = f32[2,8] all-gather(param3), replica_groups={}, dimensions={1}
allgather4 = f32[8,2] all-gather(param4), replica_groups={}, dimensions={0}
ROOT tuple = (f32[8,2], f32[8,2], f32[2,8], f32[2,8], f32[8,2])
tuple(allgather0, allgather1, allgather2, allgather3, allgather4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
true);
ASSERT_EQ(AllGatherCount(*module), 5);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_TRUE(changed);
Matcher<const HloInstruction*> combined_all_gather0 =
op::AllGather(op::Parameter(0), op::Parameter(1), op::Parameter(4));
Matcher<const HloInstruction*> combined_all_gather1 =
op::AllGather(op::Parameter(2), op::Parameter(3));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(combined_all_gather0, 0),
op::GetTupleElement(combined_all_gather0, 1),
op::GetTupleElement(combined_all_gather1, 0),
op::GetTupleElement(combined_all_gather1, 1),
op::GetTupleElement(combined_all_gather0, 2)));
}
TEST_F(AllGatherCombinerTest, DoNotCombineOverThreshold) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[8] parameter(0)
param1 = f32[8] parameter(1)
allgather0 = f32[32] all-gather(param0), replica_groups={}, dimensions={0}
allgather1 = f32[32] all-gather(param1), replica_groups={}, dimensions={0}
ROOT tuple = (f32[32], f32[32]) tuple(allgather0, allgather1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(255, kMaxCombineCount,
true);
ASSERT_EQ(AllGatherCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllGatherCount(*module), 2);
EXPECT_FALSE(changed);
}
TEST_F(AllGatherCombinerTest, CombineUpToThreshold) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[8] parameter(0)
param1 = f32[8] parameter(1)
allgather0 = f32[32] all-gather(param0), replica_groups={}, dimensions={0}
allgather1 = f32[32] all-gather(param1), replica_groups={}, dimensions={0}
ROOT tuple = (f32[32], f32[32]) tuple(allgather0, allgather1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(256, kMaxCombineCount,
true);
ASSERT_EQ(AllGatherCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllGatherCount(*module), 1);
EXPECT_TRUE(changed);
}
TEST_F(AllGatherCombinerTest, NoDependentCombination) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param = f32[1] parameter(0)
allgather0 = f32[2] all-gather(param), replica_groups={}, dimensions={0}
ROOT allgather1 = f32[4] all-gather(allgather0), replica_groups={},
dimensions={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
true);
ASSERT_EQ(AllGatherCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllGatherCount(*module), 2);
EXPECT_FALSE(changed);
}
TEST_F(AllGatherCombinerTest, NoDifferentReplicaGroupsCombination) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[32] parameter(0)
param1 = f32[32] parameter(1)
allgather0 = f32[64] all-gather(param0), replica_groups={{0, 1}, {2, 3}},
dimensions={0}
allgather1 = f32[64] all-gather(param1), replica_groups={{0, 2}, {1, 3}},
dimensions={0}
ROOT tuple = (f32[64], f32[64]) tuple(allgather0, allgather1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
true);
ASSERT_EQ(AllGatherCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllGatherCount(*module), 2);
EXPECT_FALSE(changed);
}
TEST_F(AllGatherCombinerTest, DomainPreventsCombining) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[32] parameter(0), sharding={maximal device=0}
param1 = f32[32] parameter(1), sharding={maximal device=1}
allgather0 = f32[128] all-gather(param0),
replica_groups={}, dimensions={0}, sharding={maximal device=0}
allgather1 = f32[128] all-gather(param1),
replica_groups={}, dimensions={0}, sharding={maximal device=1}
domain0 = f32[128] domain(allgather0),
domain={kind="sharding", entry={{maximal device=0}, {maximal device=1}},
exit={maximal device=0}}
domain1 = f32[128] domain(allgather1),
domain={kind="sharding", entry={{maximal device=0}, {maximal device=1}},
exit={maximal device=1}}
ROOT tuple = (f32[128], f32[128]) tuple(domain0, domain1),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
true);
ASSERT_EQ(AllGatherCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllGatherCount(*module), 2);
EXPECT_FALSE(changed);
}
TEST_F(AllGatherCombinerTest, CombineFromTwoDomainsWithSameMetadata) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[32] parameter(0), sharding={maximal device=0}
param1 = f32[32] parameter(1), sharding={maximal device=1}
param2 = f32[32] parameter(2), sharding={maximal device=1}
allgather0 = f32[128] all-gather(param0),
replica_groups={}, dimensions={0}, sharding={maximal device=0}
allgather1 = f32[128] all-gather(param1),
replica_groups={}, dimensions={0}, sharding={maximal device=1}
allgather2 = f32[128] all-gather(param2),
replica_groups={}, dimensions={0}, sharding={maximal device=0}
domain0 = f32[128] domain(allgather0),
domain={kind="sharding", entry={{maximal device=0}, {maximal device=1},
{maximal device=0}}, exit={maximal device=0}}
domain1 = f32[128] domain(allgather1),
domain={kind="sharding", entry={{maximal device=0}, {maximal device=1},
{maximal device=0}}, exit={maximal device=1}}
domain2 = f32[128] domain(allgather2),
domain={kind="sharding", entry={{maximal device=0}, {maximal device=1},
{maximal device=0}}, exit={maximal device=0}}
ROOT tuple = (f32[128], f32[128], f32[128]) tuple(domain0, domain1,
domain2),
sharding={{maximal device=0}, {maximal device=1}, {maximal device=0}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
true);
ASSERT_EQ(AllGatherCount(*module), 3);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllGatherCount(*module), 2);
EXPECT_TRUE(changed);
const HloInstruction* param0 =
module->entry_computation()->parameter_instruction(0);
ASSERT_EQ(param0->user_count(), 1);
const HloInstruction* combined_ag = param0->users().front();
ASSERT_EQ(combined_ag->opcode(), HloOpcode::kAllGather);
EXPECT_THAT(combined_ag,
op::Sharding("{{maximal device=0}, {maximal device=0}}"));
}
TEST_F(AllGatherCombinerTest, CombineAllGathersDifferentDims) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[2,3]{1,0} parameter(0)
param1 = f32[2,3]{0,1} parameter(1)
allgather0 = f32[8,3]{1,0} all-gather(param0), replica_groups={},
dimensions={0}
allgather1 = f32[2,12]{0,1} all-gather(param1), replica_groups={},
dimensions={1}
ROOT tuple = (f32[8,3]{1,0}, f32[2,12]{0,1}) tuple(allgather0, allgather1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
false);
ASSERT_EQ(AllGatherCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_TRUE(changed);
Matcher<const HloInstruction*> combined_all_gather =
op::AllGather(op::Parameter(0), op::Bitcast(op::Parameter(1)));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(combined_all_gather, 0),
op::Bitcast(op::GetTupleElement(combined_all_gather, 1))));
}
TEST_F(AllGatherCombinerTest, CombineManyAllGathersDifferentDims) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[2,7]{1,0} parameter(0)
param1 = f32[3,8]{1,0} parameter(1)
param2 = f32[4,9]{0,1} parameter(2)
param3 = f32[5,10]{0,1} parameter(3)
param4 = f32[6,11]{1,0} parameter(4)
allgather0 = f32[8,7]{1,0} all-gather(param0), replica_groups={},
dimensions={0}
allgather1 = f32[12,8]{1,0} all-gather(param1), replica_groups={},
dimensions={0}
allgather2 = f32[4,36]{0,1} all-gather(param2), replica_groups={},
dimensions={1}
allgather3 = f32[5,40]{0,1} all-gather(param3), replica_groups={},
dimensions={1}
allgather4 = f32[24,11]{1,0} all-gather(param4), replica_groups={},
dimensions={0}
ROOT tuple = (f32[8,7]{1,0}, f32[12,8]{1,0}, f32[4,36]{0,1}, f32[5,40]{0,1},
f32[24,11]{1,0}) tuple(allgather0, allgather1, allgather2, allgather3,
allgather4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
false);
ASSERT_EQ(AllGatherCount(*module), 5);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_TRUE(changed);
Matcher<const HloInstruction*> combined_all_gather = op::AllGather(
op::Parameter(0), op::Parameter(1), op::Bitcast(op::Parameter(2)),
op::Bitcast(op::Parameter(3)), op::Parameter(4));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(combined_all_gather, 0),
op::GetTupleElement(combined_all_gather, 1),
op::Bitcast(op::GetTupleElement(combined_all_gather, 2)),
op::Bitcast(op::GetTupleElement(combined_all_gather, 3)),
op::GetTupleElement(combined_all_gather, 4)));
std::vector<HloAllGatherInstruction*> all_gathers = FindAllGathers(*module);
ASSERT_EQ(1, all_gathers.size());
ASSERT_EQ(0, all_gathers.front()->all_gather_dimension());
}
TEST_F(AllGatherCombinerTest, CombineManyAllGathersDifferentDimsRank4) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[2,7,2,7]{3,2,1,0} parameter(0)
param1 = f32[3,8,3,8]{3,2,1,0} parameter(1)
param2 = f32[4,9,4,9]{3,0,1,2} parameter(2)
param3 = f32[5,10,5,10]{3,0,1,2} parameter(3)
param4 = f32[6,11,6,11]{3,2,1,0} parameter(4)
allgather0 = f32[8,7,2,7]{3,2,1,0} all-gather(param0), replica_groups={},
dimensions={0}
allgather1 = f32[12,8,3,8]{3,2,1,0} all-gather(param1), replica_groups={},
dimensions={0}
allgather2 = f32[4,9,16,9]{3,0,1,2} all-gather(param2), replica_groups={},
dimensions={2}
allgather3 = f32[5,10,20,10]{3,0,1,2} all-gather(param3), replica_groups={},
dimensions={2}
allgather4 = f32[24,11,6,11]{3,2,1,0} all-gather(param4), replica_groups={},
dimensions={0}
ROOT tuple = (f32[8,7,2,7]{3,2,1,0}, f32[12,8,3,8]{3,2,1,0},
f32[4,9,16,9]{3,0,1,2}, f32[5,10,20,10]{3,0,1,2},
f32[24,11,6,11]{3,2,1,0}) tuple(allgather0, allgather1, allgather2,
allgather3, allgather4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
false);
ASSERT_EQ(AllGatherCount(*module), 5);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_TRUE(changed);
Matcher<const HloInstruction*> combined_all_gather = op::AllGather(
op::Parameter(0), op::Parameter(1), op::Bitcast(op::Parameter(2)),
op::Bitcast(op::Parameter(3)), op::Parameter(4));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(combined_all_gather, 0),
op::GetTupleElement(combined_all_gather, 1),
op::Bitcast(op::GetTupleElement(combined_all_gather, 2)),
op::Bitcast(op::GetTupleElement(combined_all_gather, 3)),
op::GetTupleElement(combined_all_gather, 4)));
std::vector<HloAllGatherInstruction*> all_gathers = FindAllGathers(*module);
ASSERT_EQ(1, all_gathers.size());
ASSERT_EQ(0, all_gathers.front()->all_gather_dimension());
}
TEST_F(AllGatherCombinerTest, CombineManyAllGathersDifferentDimsMixedRanks) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[2,7]{1,0} parameter(0)
param1 = f32[3,8]{1,0} parameter(1)
param2 = f32[4,9]{0,1} parameter(2)
param3 = f32[5,10]{0,1} parameter(3)
param4 = f32[6]{0} parameter(4)
allgather0 = f32[2,28]{1,0} all-gather(param0), replica_groups={},
dimensions={1}
allgather1 = f32[3,32]{1,0} all-gather(param1), replica_groups={},
dimensions={1}
allgather2 = f32[4,36]{0,1} all-gather(param2), replica_groups={},
dimensions={1}
allgather3 = f32[5,40]{0,1} all-gather(param3), replica_groups={},
dimensions={1}
allgather4 = f32[24]{0} all-gather(param4), replica_groups={},
dimensions={0}
ROOT tuple = (f32[2,28]{1,0}, f32[3,32]{1,0}, f32[4,36]{0,1}, f32[5,40]{0,1},
f32[24]{0}) tuple(allgather0, allgather1, allgather2, allgather3,
allgather4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
false);
ASSERT_EQ(AllGatherCount(*module), 5);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_TRUE(changed);
Matcher<const HloInstruction*> combined_all_gather = op::AllGather(
op::Bitcast(op::Parameter(0)), op::Bitcast(op::Parameter(1)),
op::Bitcast(op::Parameter(2)), op::Bitcast(op::Parameter(3)),
op::Parameter(4));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::Bitcast(op::GetTupleElement(combined_all_gather, 0)),
op::Bitcast(op::GetTupleElement(combined_all_gather, 1)),
op::Bitcast(op::GetTupleElement(combined_all_gather, 2)),
op::Bitcast(op::GetTupleElement(combined_all_gather, 3)),
op::GetTupleElement(combined_all_gather, 4)));
std::vector<HloAllGatherInstruction*> all_gathers = FindAllGathers(*module);
ASSERT_EQ(1, all_gathers.size());
ASSERT_EQ(0, all_gathers.front()->all_gather_dimension());
}
TEST_F(AllGatherCombinerTest, CombineAllGathersByDim) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[2,7]{1,0} parameter(0)
param1 = f32[3,8]{1,0} parameter(1)
param2 = f32[4,9]{0,1} parameter(2)
param3 = f32[5,10]{0,1} parameter(3)
param4 = f32[6,11]{1,0} parameter(4)
allgather0 = f32[8,7]{1,0} all-gather(param0), replica_groups={},
dimensions={0}
allgather1 = f32[12,8]{1,0} all-gather(param1), replica_groups={},
dimensions={0}
allgather2 = f32[4,36]{0,1} all-gather(param2), replica_groups={},
dimensions={1}
allgather3 = f32[5,40]{0,1} all-gather(param3), replica_groups={},
dimensions={1}
allgather4 = f32[24,11]{1,0} all-gather(param4), replica_groups={},
dimensions={0}
ROOT tuple = (f32[8,7]{1,0}, f32[12,8]{1,0}, f32[4,36]{0,1}, f32[5,40]{0,1},
f32[24,11]{1,0}) tuple(allgather0, allgather1, allgather2, allgather3,
allgather4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
true);
ASSERT_EQ(AllGatherCount(*module), 5);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_TRUE(changed);
Matcher<const HloInstruction*> combined_all_gather_0 =
op::AllGather(op::Parameter(0), op::Parameter(1), op::Parameter(4));
Matcher<const HloInstruction*> combined_all_gather_1 =
op::AllGather(op::Parameter(2), op::Parameter(3));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(combined_all_gather_0, 0),
op::GetTupleElement(combined_all_gather_0, 1),
op::GetTupleElement(combined_all_gather_1, 0),
op::GetTupleElement(combined_all_gather_1, 1),
op::GetTupleElement(combined_all_gather_0, 2)));
std::vector<HloAllGatherInstruction*> all_gathers = FindAllGathers(*module);
ASSERT_EQ(2, all_gathers.size());
ASSERT_EQ(0, all_gathers[0]->all_gather_dimension());
ASSERT_EQ(1, all_gathers[1]->all_gather_dimension());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_gather_combiner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_gather_combiner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
777cc1b5-5357-4567-95b3-77796b769b4a | cpp | google/tsl | threadpool_async_executor | tsl/platform/threadpool_async_executor.h | tsl/platform/threadpool_async_executor_test.cc | #ifndef TENSORFLOW_TSL_PLATFORM_THREADPOOL_ASYNC_EXECUTOR_H_
#define TENSORFLOW_TSL_PLATFORM_THREADPOOL_ASYNC_EXECUTOR_H_
#include <utility>
#include "xla/tsl/concurrency/async_value.h"
#include "tsl/platform/threadpool.h"
namespace tsl::thread {
class ThreadPoolAsyncExecutor : public AsyncValue::Executor {
public:
explicit ThreadPoolAsyncExecutor(ThreadPool* thread_pool)
: thread_pool_(thread_pool) {}
void Execute(Task task) final {
auto* task_ptr = new Task(std::move(task));
thread_pool_->Schedule([task_ptr] {
(*task_ptr)();
delete task_ptr;
});
}
private:
ThreadPool* thread_pool_;
};
}
#endif | #include "tsl/platform/threadpool_async_executor.h"
#include "absl/synchronization/notification.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace tsl::thread {
namespace {
TEST(ThreadPoolAsyncExecutorTest, ExecuteTasks) {
ThreadPool thread_pool(Env::Default(), "test", 4);
ThreadPoolAsyncExecutor executor(&thread_pool);
absl::Notification notification;
executor.Execute([&] { notification.Notify(); });
notification.WaitForNotification();
}
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/threadpool_async_executor.h | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/threadpool_async_executor_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
b9a823c3-0c38-48fc-9bee-8aaf1bede3f6 | cpp | google/quiche | quic_sendmmsg_batch_writer | quiche/quic/core/batch_writer/quic_sendmmsg_batch_writer.cc | quiche/quic/core/batch_writer/quic_sendmmsg_batch_writer_test.cc | #include "quiche/quic/core/batch_writer/quic_sendmmsg_batch_writer.h"
#include <memory>
#include <utility>
namespace quic {
QuicSendmmsgBatchWriter::QuicSendmmsgBatchWriter(
std::unique_ptr<QuicBatchWriterBuffer> batch_buffer, int fd)
: QuicUdpBatchWriter(std::move(batch_buffer), fd) {}
QuicSendmmsgBatchWriter::CanBatchResult QuicSendmmsgBatchWriter::CanBatch(
const char* , size_t ,
const QuicIpAddress& ,
const QuicSocketAddress& ,
const PerPacketOptions* ,
const QuicPacketWriterParams& , uint64_t ) const {
return CanBatchResult(true, false);
}
QuicSendmmsgBatchWriter::FlushImplResult QuicSendmmsgBatchWriter::FlushImpl() {
return InternalFlushImpl(
kCmsgSpaceForIp,
[](QuicMMsgHdr* mhdr, int i, const BufferedWrite& buffered_write) {
mhdr->SetIpInNextCmsg(i, buffered_write.self_address);
});
}
QuicSendmmsgBatchWriter::FlushImplResult
QuicSendmmsgBatchWriter::InternalFlushImpl(size_t cmsg_space,
const CmsgBuilder& cmsg_builder) {
QUICHE_DCHECK(!IsWriteBlocked());
QUICHE_DCHECK(!buffered_writes().empty());
FlushImplResult result = {WriteResult(WRITE_STATUS_OK, 0),
0, 0};
WriteResult& write_result = result.write_result;
auto first = buffered_writes().cbegin();
const auto last = buffered_writes().cend();
while (first != last) {
QuicMMsgHdr mhdr(first, last, cmsg_space, cmsg_builder);
int num_packets_sent;
write_result = QuicLinuxSocketUtils::WriteMultiplePackets(
fd(), &mhdr, &num_packets_sent);
QUIC_DVLOG(1) << "WriteMultiplePackets sent " << num_packets_sent
<< " out of " << mhdr.num_msgs()
<< " packets. WriteResult=" << write_result;
if (write_result.status != WRITE_STATUS_OK) {
QUICHE_DCHECK_EQ(0, num_packets_sent);
break;
} else if (num_packets_sent == 0) {
QUIC_BUG(quic_bug_10825_1)
<< "WriteMultiplePackets returned OK, but no packets were sent.";
write_result = WriteResult(WRITE_STATUS_ERROR, EIO);
break;
}
first += num_packets_sent;
result.num_packets_sent += num_packets_sent;
result.bytes_written += write_result.bytes_written;
}
batch_buffer().PopBufferedWrite(result.num_packets_sent);
if (write_result.status != WRITE_STATUS_OK) {
return result;
}
QUIC_BUG_IF(quic_bug_12537_1, !buffered_writes().empty())
<< "All packets should have been written on a successful return";
write_result.bytes_written = result.bytes_written;
return result;
}
} | #include "quiche/quic/core/batch_writer/quic_sendmmsg_batch_writer.h"
namespace quic {
namespace test {
namespace {
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/batch_writer/quic_sendmmsg_batch_writer.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/batch_writer/quic_sendmmsg_batch_writer_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
12c45c0c-e7ad-4ffe-8d32-372737070ce5 | cpp | google/cel-cpp | reference_count | common/internal/reference_count.cc | common/internal/reference_count_test.cc | #include "common/internal/reference_count.h"
#include <cstddef>
#include <cstring>
#include <memory>
#include <new>
#include <string>
#include <utility>
#include "absl/base/nullability.h"
#include "absl/log/absl_check.h"
#include "absl/strings/string_view.h"
#include "common/data.h"
#include "internal/new.h"
#include "google/protobuf/message_lite.h"
namespace cel::common_internal {
template class DeletingReferenceCount<google::protobuf::MessageLite>;
template class DeletingReferenceCount<Data>;
namespace {
class ReferenceCountedStdString final : public ReferenceCounted {
public:
explicit ReferenceCountedStdString(std::string&& string) {
(::new (static_cast<void*>(&string_[0])) std::string(std::move(string)))
->shrink_to_fit();
}
const char* data() const noexcept {
return std::launder(reinterpret_cast<const std::string*>(&string_[0]))
->data();
}
size_t size() const noexcept {
return std::launder(reinterpret_cast<const std::string*>(&string_[0]))
->size();
}
private:
void Finalize() noexcept override {
std::destroy_at(std::launder(reinterpret_cast<std::string*>(&string_[0])));
}
alignas(std::string) char string_[sizeof(std::string)];
};
class ReferenceCountedString final : public ReferenceCounted {
public:
static const ReferenceCountedString* New(const char* data, size_t size) {
return ::new (internal::New(offsetof(ReferenceCountedString, data_) + size))
ReferenceCountedString(size, data);
}
const char* data() const noexcept { return data_; }
size_t size() const noexcept { return size_; }
private:
ReferenceCountedString(size_t size, const char* data) noexcept : size_(size) {
std::memcpy(data_, data, size);
}
void Delete() noexcept override {
void* const that = this;
const auto size = size_;
std::destroy_at(this);
internal::SizedDelete(that, offsetof(ReferenceCountedString, data_) + size);
}
const size_t size_;
char data_[];
};
}
std::pair<absl::Nonnull<const ReferenceCount*>, absl::string_view>
MakeReferenceCountedString(absl::string_view value) {
ABSL_DCHECK(!value.empty());
const auto* refcount =
ReferenceCountedString::New(value.data(), value.size());
return std::pair{refcount,
absl::string_view(refcount->data(), refcount->size())};
}
std::pair<absl::Nonnull<const ReferenceCount*>, absl::string_view>
MakeReferenceCountedString(std::string&& value) {
ABSL_DCHECK(!value.empty());
const auto* refcount = new ReferenceCountedStdString(std::move(value));
return std::pair{refcount,
absl::string_view(refcount->data(), refcount->size())};
}
} | #include "common/internal/reference_count.h"
#include <tuple>
#include "google/protobuf/struct.pb.h"
#include "absl/base/nullability.h"
#include "common/data.h"
#include "internal/testing.h"
#include "google/protobuf/arena.h"
#include "google/protobuf/message_lite.h"
namespace cel::common_internal {
namespace {
using ::testing::NotNull;
using ::testing::WhenDynamicCastTo;
class Object : public virtual ReferenceCountFromThis {
public:
explicit Object(bool& destructed) : destructed_(destructed) {}
~Object() { destructed_ = true; }
private:
bool& destructed_;
};
class Subobject : public Object, public virtual ReferenceCountFromThis {
public:
using Object::Object;
};
TEST(ReferenceCount, Strong) {
bool destructed = false;
Object* object;
ReferenceCount* refcount;
std::tie(object, refcount) = MakeReferenceCount<Subobject>(destructed);
EXPECT_EQ(GetReferenceCountForThat(*object), refcount);
EXPECT_EQ(GetReferenceCountForThat(*static_cast<Subobject*>(object)),
refcount);
StrongRef(refcount);
StrongUnref(refcount);
EXPECT_TRUE(IsUniqueRef(refcount));
EXPECT_FALSE(IsExpiredRef(refcount));
EXPECT_FALSE(destructed);
StrongUnref(refcount);
EXPECT_TRUE(destructed);
}
TEST(ReferenceCount, Weak) {
bool destructed = false;
Object* object;
ReferenceCount* refcount;
std::tie(object, refcount) = MakeReferenceCount<Subobject>(destructed);
EXPECT_EQ(GetReferenceCountForThat(*object), refcount);
EXPECT_EQ(GetReferenceCountForThat(*static_cast<Subobject*>(object)),
refcount);
WeakRef(refcount);
ASSERT_TRUE(StrengthenRef(refcount));
StrongUnref(refcount);
EXPECT_TRUE(IsUniqueRef(refcount));
EXPECT_FALSE(IsExpiredRef(refcount));
EXPECT_FALSE(destructed);
StrongUnref(refcount);
EXPECT_TRUE(destructed);
EXPECT_TRUE(IsExpiredRef(refcount));
ASSERT_FALSE(StrengthenRef(refcount));
WeakUnref(refcount);
}
class DataObject final : public Data {
public:
DataObject() noexcept : Data() {}
explicit DataObject(absl::Nullable<google::protobuf::Arena*> arena) noexcept
: Data(arena) {}
char member_[17];
};
struct OtherObject final {
char data[17];
};
TEST(DeletingReferenceCount, Data) {
auto* data = new DataObject();
const auto* refcount = MakeDeletingReferenceCount(data);
EXPECT_THAT(refcount, WhenDynamicCastTo<const DeletingReferenceCount<Data>*>(
NotNull()));
EXPECT_EQ(common_internal::GetDataReferenceCount(data), refcount);
StrongUnref(refcount);
}
TEST(DeletingReferenceCount, MessageLite) {
auto* message_lite = new google::protobuf::Value();
const auto* refcount = MakeDeletingReferenceCount(message_lite);
EXPECT_THAT(
refcount,
WhenDynamicCastTo<const DeletingReferenceCount<google::protobuf::MessageLite>*>(
NotNull()));
StrongUnref(refcount);
}
TEST(DeletingReferenceCount, Other) {
auto* other = new OtherObject();
const auto* refcount = MakeDeletingReferenceCount(other);
EXPECT_THAT(
refcount,
WhenDynamicCastTo<const DeletingReferenceCount<OtherObject>*>(NotNull()));
StrongUnref(refcount);
}
TEST(EmplacedReferenceCount, Data) {
Data* data;
const ReferenceCount* refcount;
std::tie(data, refcount) = MakeEmplacedReferenceCount<DataObject>();
EXPECT_THAT(
refcount,
WhenDynamicCastTo<const EmplacedReferenceCount<DataObject>*>(NotNull()));
EXPECT_EQ(common_internal::GetDataReferenceCount(data), refcount);
StrongUnref(refcount);
}
TEST(EmplacedReferenceCount, MessageLite) {
google::protobuf::Value* message_lite;
const ReferenceCount* refcount;
std::tie(message_lite, refcount) =
MakeEmplacedReferenceCount<google::protobuf::Value>();
EXPECT_THAT(
refcount,
WhenDynamicCastTo<const EmplacedReferenceCount<google::protobuf::Value>*>(
NotNull()));
StrongUnref(refcount);
}
TEST(EmplacedReferenceCount, Other) {
OtherObject* other;
const ReferenceCount* refcount;
std::tie(other, refcount) = MakeEmplacedReferenceCount<OtherObject>();
EXPECT_THAT(
refcount,
WhenDynamicCastTo<const EmplacedReferenceCount<OtherObject>*>(NotNull()));
StrongUnref(refcount);
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/internal/reference_count.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/internal/reference_count_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
3d22aa2e-b468-4a33-978f-ded15f042652 | cpp | tensorflow/tensorflow | gpu_command_buffer | third_party/xla/xla/stream_executor/gpu/gpu_command_buffer.cc | third_party/xla/xla/stream_executor/gpu/gpu_command_buffer_test.cc | #include "xla/stream_executor/gpu/gpu_command_buffer.h"
#include <array>
#include <atomic>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#if GOOGLE_CUDA
#include "third_party/gpus/cuda/include/cuda.h"
#endif
#include "absl/container/inlined_vector.h"
#include "absl/functional/any_invocable.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "xla/stream_executor/command_buffer.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/gpu/gpu_driver.h"
#include "xla/stream_executor/gpu/gpu_executor.h"
#include "xla/stream_executor/gpu/gpu_kernel.h"
#include "xla/stream_executor/gpu/gpu_stream.h"
#include "xla/stream_executor/gpu/gpu_types.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/kernel_spec.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/stream_executor/typed_kernel_factory.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/path.h"
#include "tsl/platform/statusor.h"
namespace stream_executor::gpu {
absl::StatusOr<MultiKernelLoaderSpec> GetSetIfConditionKernelLoaderSpec();
absl::StatusOr<MultiKernelLoaderSpec> GetSetIfElseConditionKernelLoaderSpec();
absl::StatusOr<MultiKernelLoaderSpec> GetSetCaseConditionKernelLoaderSpec();
absl::StatusOr<MultiKernelLoaderSpec> GetSetForConditionKernelLoaderSpec();
absl::StatusOr<MultiKernelLoaderSpec> GetSetWhileConditionKernelLoaderSpec();
absl::StatusOr<MultiKernelLoaderSpec> GetNoOpKernelLoaderSpec();
using Mode = CommandBuffer::Mode;
using State = CommandBuffer::State;
std::string_view to_string(State state) {
switch (state) {
case State::kCreate:
return "create";
case State::kUpdate:
return "update";
case State::kFinalized:
return "finalized";
}
}
absl::Status UnsupportedStateError(State state) {
return absl::InternalError(
absl::StrCat("Unsupported command buffer state: ", to_string(state)));
}
static std::atomic<int64_t> allocated_execs(0);
static std::atomic<int64_t> alive_execs(0);
static int64_t NotifyExecCreated() {
alive_execs.fetch_add(1, std::memory_order_relaxed);
return allocated_execs.fetch_add(1, std::memory_order_relaxed);
}
static int64_t NotifyExecDestroyed() {
DCHECK_GE(alive_execs.load(std::memory_order_relaxed), 1);
return alive_execs.fetch_sub(1, std::memory_order_relaxed) - 1;
}
int64_t GpuCommandBuffer::AliveExecs() {
return alive_execs.load(std::memory_order_relaxed);
}
static std::string_view ModeToString(CommandBuffer::Mode mode) {
switch (mode) {
case CommandBuffer::Mode::kPrimary:
return "primary";
case CommandBuffer::Mode::kNested:
return "nested";
}
}
GpuCommandBuffer::GpuCommandBuffer(Mode mode, GpuExecutor* parent,
GpuGraphHandle graph, bool is_owned_graph)
: mode_(mode),
parent_(parent),
graph_(graph),
is_owned_graph_(is_owned_graph) {
VLOG(5) << "Created command buffer for graph " << graph_
<< "; mode=" << ModeToString(mode)
<< "; is_owned_graph=" << is_owned_graph_;
execution_scopes_.try_emplace(kDefaulExecutionScope);
}
GpuCommandBuffer::~GpuCommandBuffer() {
if (exec_ != nullptr && is_owned_graph_exec_) {
VLOG(5) << "Destroy GPU command buffer executable graph " << exec_ << " "
<< "(remaining alive executable graphs: " << NotifyExecDestroyed()
<< ")";
if (auto status = GpuDriver::DestroyGraphExec(exec_); !status.ok()) {
LOG(ERROR) << "Failed to destroy GPU graph exec: " << status.message();
}
}
if (graph_ != nullptr && is_owned_graph_) {
if (auto status = GpuDriver::DestroyGraph(graph_); !status.ok()) {
LOG(ERROR) << "Failed to destroy GPU graph: " << status.message();
}
}
}
GpuCommandBuffer::ScopedGpuGraphExec::ScopedGpuGraphExec(
GpuCommandBuffer* cmd_buffer, GpuGraphExecHandle exec)
: cmd_buffer(cmd_buffer),
restore(cmd_buffer->exec_),
restore_is_owned(cmd_buffer->is_owned_graph_exec_) {
cmd_buffer->exec_ = exec;
cmd_buffer->is_owned_graph_exec_ = false;
}
GpuCommandBuffer::ScopedGpuGraphExec::~ScopedGpuGraphExec() {
cmd_buffer->exec_ = restore;
cmd_buffer->is_owned_graph_exec_ = restore_is_owned;
}
static GpuDevicePtr AsDevicePtr(const DeviceMemoryBase& mem) {
return reinterpret_cast<GpuDevicePtr>(const_cast<void*>(mem.opaque()));
}
absl::Status GpuCommandBuffer::Trace(
Stream* stream, absl::AnyInvocable<absl::Status()> function) {
TF_RETURN_IF_ERROR(CheckNotFinalized());
#if defined(TENSORFLOW_USE_ROCM)
TF_ASSIGN_OR_RETURN(size_t count, GpuDriver::GraphGetNodeCount(graph_));
if (count != 0 || !is_owned_graph_)
return absl::InternalError(
"Stream can't be traced on non empty command buffer");
#endif
VLOG(5) << "Trace into GPU command buffer graph " << graph_
<< " on a stream: " << stream;
auto gpu_stream = AsGpuStreamValue(stream);
uint64_t start_nanos = tsl::Env::Default()->NowNanos();
#if !defined(TENSORFLOW_USE_ROCM)
TF_RETURN_IF_ERROR(GpuDriver::StreamBeginCaptureToGraph(
gpu_stream, graph_, GpuDriver::StreamCaptureMode::kThreadLocal));
#else
TF_RETURN_IF_ERROR(GpuDriver::StreamBeginCapture(
gpu_stream, GpuDriver::StreamCaptureMode::kThreadLocal));
#endif
auto traced = function();
GpuGraphHandle captured_graph;
TF_RETURN_IF_ERROR(GpuDriver::StreamEndCapture(gpu_stream, &captured_graph));
#if !defined(TENSORFLOW_USE_ROCM)
DCHECK(captured_graph == graph_) << "Stream capture should update graph_";
#else
TF_RETURN_IF_ERROR(
GpuDriver::DestroyGraph(std::exchange(graph_, captured_graph)));
#endif
uint64_t end_nanos = tsl::Env::Default()->NowNanos();
if (!traced.ok())
return absl::InternalError(
absl::StrCat("Failed to capture gpu graph: ", traced.message()));
VLOG(5) << "Traced into the GPU command buffer graph " << graph_ << " (took "
<< (end_nanos - start_nanos) / 1000 << " μs)";
return absl::OkStatus();
}
GpuCommandBuffer::Dependencies GpuCommandBuffer::GetBarrier(
ExecutionScopeId execution_scope_id) {
ExecutionScope& execution_scope = execution_scopes_[execution_scope_id];
return execution_scope.barriers.empty()
? Dependencies{}
: Dependencies{execution_scope.barriers.back().handle};
}
absl::StatusOr<GpuCommandBuffer::SetIfConditionKernel*>
GpuCommandBuffer::GetSetIfConditionKernel() {
if (!set_if_condition_kernel_) {
TF_ASSIGN_OR_RETURN(auto spec, GetSetIfConditionKernelLoaderSpec());
TF_ASSIGN_OR_RETURN(
set_if_condition_kernel_,
SetIfConditionKernel::FactoryType::Create(parent_, spec));
}
return &set_if_condition_kernel_;
}
absl::StatusOr<GpuCommandBuffer::SetIfElseConditionKernel*>
GpuCommandBuffer::GetSetIfElseConditionKernel() {
if (!set_if_else_condition_kernel_) {
TF_ASSIGN_OR_RETURN(auto spec, GetSetIfElseConditionKernelLoaderSpec());
TF_ASSIGN_OR_RETURN(
set_if_else_condition_kernel_,
SetIfElseConditionKernel::FactoryType::Create(parent_, spec));
}
return &set_if_else_condition_kernel_;
}
absl::StatusOr<GpuCommandBuffer::SetCaseConditionKernel*>
GpuCommandBuffer::GetSetCaseConditionKernel() {
if (!set_case_condition_kernel_) {
TF_ASSIGN_OR_RETURN(auto spec, GetSetCaseConditionKernelLoaderSpec());
TF_ASSIGN_OR_RETURN(
set_case_condition_kernel_,
SetCaseConditionKernel::FactoryType::Create(parent_, spec));
}
return &set_case_condition_kernel_;
}
absl::StatusOr<GpuCommandBuffer::SetForConditionKernel*>
GpuCommandBuffer::GetSetForConditionKernel() {
if (!set_for_condition_kernel_) {
TF_ASSIGN_OR_RETURN(auto spec, GetSetForConditionKernelLoaderSpec());
TF_ASSIGN_OR_RETURN(
set_for_condition_kernel_,
SetForConditionKernel::FactoryType::Create(parent_, spec));
}
return &set_for_condition_kernel_;
}
absl::StatusOr<GpuCommandBuffer::SetWhileConditionKernel*>
GpuCommandBuffer::GetSetWhileConditionKernel() {
if (!set_while_condition_kernel_) {
TF_ASSIGN_OR_RETURN(auto spec, GetSetWhileConditionKernelLoaderSpec());
TF_ASSIGN_OR_RETURN(
set_while_condition_kernel_,
SetWhileConditionKernel::FactoryType::Create(parent_, spec));
}
return &set_while_condition_kernel_;
}
absl::StatusOr<GpuCommandBuffer::NoOpKernel*>
GpuCommandBuffer::GetNoOpKernel() {
if (!noop_kernel_) {
TF_ASSIGN_OR_RETURN(auto spec, GetNoOpKernelLoaderSpec());
TF_ASSIGN_OR_RETURN(noop_kernel_,
NoOpKernel::FactoryType::Create(parent_, spec));
}
return &noop_kernel_;
}
absl::Status GpuCommandBuffer::DisableBarriersExecution(
GpuGraphExecHandle exec) {
#if !defined(TENSORFLOW_USE_ROCM)
ExecutionScope& execution_scope = execution_scopes_[kDefaulExecutionScope];
for (GpuGraphBarrierInfo& barrier : execution_scope.barriers) {
if (barrier.is_barrier_node) {
TF_RETURN_IF_ERROR(
GpuDriver::GraphNodeSetEnabled(exec, barrier.handle, false));
}
}
for (ConditionalCommandBuffers& cmd_buffers :
execution_scope.conditional_command_buffers) {
for (auto& cmd_buffer : cmd_buffers.command_buffers) {
TF_RETURN_IF_ERROR(cmd_buffer->DisableBarriersExecution(exec));
}
}
#endif
return absl::OkStatus();
}
absl::Status GpuCommandBuffer::CheckNotFinalized() {
if (state_ == State::kFinalized)
return absl::InternalError(
"Command can't be added to a command buffer after it was finalized");
return absl::OkStatus();
}
absl::Status GpuCommandBuffer::CheckNumCommandBuffers(
const ConditionalCommandBuffers& cmd_buffers, size_t num_cmd_buffers) {
if (cmd_buffers.handles.size() != num_cmd_buffers) {
return absl::InternalError(absl::StrCat(
"Expected to have ", num_cmd_buffers,
" conditional command buffers, got ", cmd_buffers.handles.size()));
}
return absl::OkStatus();
}
absl::StatusOr<GpuGraphNodeHandle> GpuCommandBuffer::CreateBarrierNode(
const Dependencies& dependencies) {
GpuGraphNodeHandle barrier_handle = nullptr;
#if !defined(TENSORFLOW_USE_ROCM) && CUDA_VERSION < 12040
TF_ASSIGN_OR_RETURN(NoOpKernel * noop, GetNoOpKernel());
TF_RETURN_IF_ERROR(GpuDriver::GraphAddKernelNode(
&barrier_handle, graph_, dependencies, "noop",
AsGpuKernel(&**noop)->gpu_function(), 1, 1, 1, 1, 1, 1, 0,
nullptr, nullptr));
#else
TF_RETURN_IF_ERROR(
GpuDriver::GraphAddEmptyNode(&barrier_handle, graph_, dependencies));
#endif
return barrier_handle;
}
GpuCommandBuffer::Dependencies GpuCommandBuffer::GetBarrierDependencies(
ExecutionScopeId execution_scope_id) {
ExecutionScope& execution_scope = execution_scopes_[execution_scope_id];
auto& barriers = execution_scope.barriers;
Dependencies dependencies;
for (size_t i = barriers.empty() ? 0 : barriers.back().nodes_offset;
i < execution_scope.nodes.size(); ++i) {
dependencies.push_back(execution_scope.nodes[i].handle);
}
return dependencies;
}
absl::Status GpuCommandBuffer::Barrier(ExecutionScopeId execution_scope_id) {
ExecutionScope& execution_scope = execution_scopes_[execution_scope_id];
if (state_ == State::kCreate) {
size_t nodes_offset = execution_scope.nodes.size();
Dependencies dependencies = GetBarrierDependencies(execution_scope_id);
if (dependencies.empty() && !execution_scope.barriers.empty()) {
execution_scope.barriers.push_back({execution_scope.barriers.back()});
return absl::OkStatus();
}
if (dependencies.size() == 1) {
execution_scope.barriers.push_back(
{execution_scope.nodes.back().handle, false, nodes_offset});
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(auto barrier_handle, CreateBarrierNode(dependencies));
execution_scope.barriers.push_back({barrier_handle, true, nodes_offset});
return absl::OkStatus();
}
if (state_ == State::kUpdate) {
if (execution_scope.update_state.barrier_idx++ >=
execution_scope.barriers.size()) {
return absl::InternalError(
absl::StrFormat("Execution scope %d barrier index out of range",
execution_scope_id.value()));
}
return absl::OkStatus();
}
return UnsupportedStateError(state_);
}
absl::Status GpuCommandBuffer::Barrier(
absl::Span<const ExecutionScopeId> execution_scope_ids) {
if (execution_scope_ids.empty()) return absl::OkStatus();
if (execution_scope_ids.size() == 1) {
return Barrier(execution_scope_ids[0]);
}
for (ExecutionScopeId execution_scope_id : execution_scope_ids) {
TF_RETURN_IF_ERROR(Barrier(execution_scope_id));
}
if (state_ == State::kCreate) {
Dependencies dependencies;
for (ExecutionScopeId execution_scope_id : execution_scope_ids) {
ExecutionScope& execution_scope = execution_scopes_[execution_scope_id];
dependencies.push_back(execution_scope.barriers.back().handle);
}
TF_ASSIGN_OR_RETURN(auto barrier_handle, CreateBarrierNode(dependencies));
for (ExecutionScopeId execution_scope_id : execution_scope_ids) {
ExecutionScope& execution_scope = execution_scopes_[execution_scope_id];
size_t nodes_offset = execution_scope.nodes.size();
execution_scope.barriers.push_back({barrier_handle, true, nodes_offset});
}
return absl::OkStatus();
}
if (state_ == State::kUpdate) {
for (ExecutionScopeId execution_scope_id : execution_scope_ids) {
ExecutionScope& execution_scope = execution_scopes_[execution_scope_id];
if (execution_scope.update_state.barrier_idx++ >=
execution_scope.barriers.size()) {
return absl::InternalError(
absl::StrFormat("Execution scope %d barrier index out of range",
execution_scope_id.value()));
}
}
return absl::OkStatus();
}
return UnsupportedStateError(state_);
}
absl::Status GpuCommandBuffer::Barrier(ExecutionScopeId from_execution_scope_id,
ExecutionScopeId to_execution_scope_id) {
if (from_execution_scope_id == to_execution_scope_id) {
return Barrier(from_execution_scope_id);
}
TF_RETURN_IF_ERROR(Barrier(from_execution_scope_id));
TF_RETURN_IF_ERROR(Barrier(to_execution_scope_id));
if (state_ == State::kCreate) {
Dependencies dependencies = {
execution_scopes_[from_execution_scope_id].barriers.back().handle,
execution_scopes_[to_execution_scope_id].barriers.back().handle};
TF_ASSIGN_OR_RETURN(auto barrier_handle, CreateBarrierNode(dependencies));
ExecutionScope& execution_scope = execution_scopes_[to_execution_scope_id];
size_t nodes_offset = execution_scope.nodes.size();
execution_scope.barriers.push_back({barrier_handle, true, nodes_offset});
return absl::OkStatus();
}
if (state_ == State::kUpdate) {
ExecutionScope& execution_scope = execution_scopes_[to_execution_scope_id];
if (execution_scope.update_state.barrier_idx++ >=
execution_scope.barriers.size()) {
return absl::InternalError(
absl::StrFormat("Execution scope %d barrier index out of range",
to_execution_scope_id.value()));
}
return absl::OkStatus();
}
return UnsupportedStateError(state_);
}
absl::Status GpuCommandBuffer::LaunchWithPackedArgs(
ExecutionScopeId execution_scope_id, const ThreadDim& threads,
const BlockDim& blocks, const Kernel& kernel,
const KernelArgsPackedArrayBase& packed_args) {
ExecutionScope& execution_scope = execution_scopes_[execution_scope_id];
CHECK_EQ(kernel.Arity() + (packed_args.number_of_shared_bytes() > 0),
packed_args.number_of_arguments());
const GpuKernel* gpu_kernel = AsGpuKernel(&kernel);
GpuFunctionHandle gpu_func = gpu_kernel->gpu_function();
void** kernel_params =
const_cast<void**>(packed_args.argument_addresses().data());
if (state_ == State::kCreate) {
Dependencies barrier = GetBarrier(execution_scope_id);
GpuGraphNodeInfo& node_info = execution_scope.nodes.emplace_back();
return GpuDriver::GraphAddKernelNode(
&node_info.handle, graph_, barrier, kernel.name(), gpu_func, blocks.x,
blocks.y, blocks.z, threads.x, threads.y, threads.z,
packed_args.number_of_shared_bytes(), kernel_params, nullptr);
}
if (state_ == State::kUpdate) {
GpuGraphNodeHandle node =
execution_scope.nodes[execution_scope.update_state.node_idx++].handle;
return GpuDriver::GraphExecKernelNodeSetParams(
exec_, node, kernel.name(), gpu_func, blocks.x, blocks.y, blocks.z,
threads.x, threads.y, threads.z, packed_args.number_of_shared_bytes(),
kernel_params, nullptr);
}
return UnsupportedStateError(state_);
}
absl::Status GpuCommandBuffer::Launch(ExecutionScopeId execution_scope_id,
const ThreadDim& threads,
const BlockDim& blocks,
const Kernel& kernel,
const KernelArgs& args) {
TF_RETURN_IF_ERROR(CheckNotFinalized());
if (auto* packed = DynCast<KernelArgsPackedArrayBase>(&args)) {
return LaunchWithPackedArgs(execution_scope_id, threads, blocks, kernel,
*packed);
}
if (auto* device_mem = DynCast<KernelArgsDeviceMemoryArray>(&args)) {
auto& pack = kernel.args_packing();
if (!pack) {
return absl::InternalError(
"Kernel is missing a custom arguments packing function for device "
"memory arguments array");
}
TF_ASSIGN_OR_RETURN(auto packed, pack(kernel, *device_mem));
return LaunchWithPackedArgs(execution_scope_id, threads, blocks, kernel,
*packed);
}
return absl::InternalError("Unsupported kernel arguments type");
}
absl::Status GpuCommandBuffer::AddNestedCommandBuffer(
ExecutionScopeId execution_scope_id, const CommandBuffer& nested) {
ExecutionScope& execution_scope = execution_scopes_[execution_scope_id];
TF_RETURN_IF_ERROR(CheckNotFinalized());
GpuGraphHandle child_graph = GpuCommandBuffer::Cast(&nested)->graph();
if (state_ == State::kCreate) {
Dependencies barrier = GetBarrier(execution_scope_id);
GpuGraphNodeInfo& node_info = execution_scope.nodes.emplace_back();
return GpuDriver::GraphAddChildNode(&node_info.handle, graph_, barrier,
child_graph);
}
if (state_ == State::kUpdate) {
GpuGraphNodeHandle node =
execution_scope.nodes[execution_scope.update_state.node_idx++].handle;
return GpuDriver::GraphExecChildNodeSetParams(exec_, node, child_graph);
}
return UnsupportedStateError(state_);
}
absl::Status GpuCommandBuffer::MemcpyDeviceToDevice(
ExecutionScopeId execution_scope_id, DeviceMemoryBase* dst,
const DeviceMemoryBase& src, uint64_t size) {
ExecutionScope& execution_scope = execution_scopes_[execution_scope_id];
TF_RETURN_IF_ERROR(CheckNotFinalized());
if (state_ == State::kCreate) {
Dependencies barrier = GetBarrier(execution_scope_id);
GpuGraphNodeInfo& node_info = execution_scope.nodes.emplace_back();
return GpuDriver::GraphAddMemcpyD2DNode(
parent_->gpu_context(), &node_info.handle, graph_, barrier,
AsDevicePtr(*dst), AsDevicePtr(src), size);
}
if (state_ == State::kUpdate) {
GpuGraphNodeHandle node =
execution_scope.nodes[execution_scope.update_state.node_idx++].handle;
return GpuDriver::GraphExecMemcpyD2DNodeSetParams(
parent_->gpu_context(), exec_, node, AsDevicePtr(*dst),
AsDevicePtr(src), size);
}
return UnsupportedStateError(state_);
}
absl::Status GpuCommandBuffer::Memset(ExecutionScopeId execution_scope_id,
DeviceMemoryBase* dst,
CommandBuffer::BitPattern bit_pattern,
size_t num_elements) {
ExecutionScope& execution_scope = execution_scopes_[execution_scope_id];
TF_RETURN_IF_ERROR(CheckNotFinalized());
if (state_ == State::kCreate) {
Dependencies barrier = GetBarrier(execution_scope_id);
GpuGraphNodeInfo& node_info = execution_scope.nodes.emplace_back();
return GpuDriver::GraphAddMemsetNode(
parent_->gpu_context(), &node_info.handle, graph_, barrier,
AsDevicePtr(*dst), bit_pattern, num_elements);
}
if (state_ == State::kUpdate) {
GpuGraphNodeHandle node =
execution_scope.nodes[execution_scope.update_state.node_idx++].handle;
return GpuDriver::GraphExecMemsetNodeSetParams(
parent_->gpu_context(), exec_, node, AsDevicePtr(*dst), bit_pattern,
num_elements);
}
return UnsupportedStateError(state_);
}
using ConditionalHandles = absl::Span<const GpuGraphConditionalHandle>;
GpuCommandBuffer::ConditionBuilder
GpuCommandBuffer::ToConditionBuilder(Builder builder) {
return [builder = std::move(builder)](CommandBuffer* cmd_buffer,
GpuGraphConditionalHandle) {
return builder(cmd_buffer);
};
}
absl::StatusOr<std::vector<GpuGraphConditionalHandle>>
GpuCommandBuffer::CreateConditionalHandles(size_t num_handles) {
std::vector<GpuGraphConditionalHandle> handles;
for (size_t i = 0; i < num_handles; ++i) {
TF_RETURN_IF_ERROR(GpuDriver::GraphConditionalHandleCreate(
&handles.emplace_back(), graph_, parent_->gpu_context(), 0, 0));
}
return handles;
}
absl::StatusOr<std::vector<std::unique_ptr<GpuCommandBuffer>>>
GpuCommandBuffer::CreateConditionalCommandBuffers(
absl::Span<const GpuGraphConditionalHandle> handles,
absl::Span<const GpuGraphHandle> graphs,
absl::Span<const ConditionBuilder> builders) {
std::vector<std::unique_ptr<GpuCommandBuffer>> cmd_buffers;
CommandBuffer::Mode nested = CommandBuffer::Mode::kNested;
bool is_owned_graph = false;
for (size_t i = 0; i < handles.size(); ++i) {
auto command_buffer = std::make_unique<GpuCommandBuffer>(
nested, parent_, graphs[i], is_owned_graph);
TF_RETURN_IF_ERROR(builders[i](command_buffer.get(), handles[i]));
TF_RETURN_IF_ERROR(command_buffer->Finalize());
cmd_buffers.push_back(std::move(command_buffer));
}
return cmd_buffers;
}
absl::Status GpuCommandBuffer::UpdateConditionalCommandBuffers(
absl::Span<const GpuGraphConditionalHandle> handles,
absl::Span<const std::unique_ptr<GpuCommandBuffer>> command_buffers,
absl::Span<const ConditionBuilder> builders) {
for (size_t i = 0; i < command_buffers.size(); ++i) {
ScopedGpuGraphExec scoped_exec(command_buffers[i].get(), exec_);
TF_RETURN_IF_ERROR(command_buffers[i]->Update());
TF_RETURN_IF_ERROR(builders[i](command_buffers[i].get(), handles[i]));
TF_RETURN_IF_ERROR(command_buffers[i]->Finalize());
}
return absl::OkStatus();
}
absl::StatusOr<std::vector<GpuGraphHandle>>
GpuCommandBuffer::CreateConditionalNodes(
ExecutionScopeId execution_scope_id, ConditionType type,
absl::Span<const GpuGraphConditionalHandle> handles) {
ExecutionScope& execution_scope = execution_scopes_[execution_scope_id];
std::vector<GpuGraphHandle> conditional_graphs;
using ConditionalParams = GpuDriver::GpuGraphConditionalNodeParams;
using ConditionalResult = GpuDriver::GpuGraphConditionalNodeParams::Result;
for (GpuGraphConditionalHandle handle : handles) {
Dependencies barrier = GetBarrier(execution_scope_id);
GpuGraphNodeInfo& node_info = execution_scope.nodes.emplace_back();
ConditionalParams params;
params.type = type;
params.handle = handle;
params.context = parent_->gpu_context();
TF_ASSIGN_OR_RETURN(
GpuDriver::GpuGraphNodeResult result,
GpuDriver::GraphAddNode(&node_info.handle, graph_, barrier, params));
conditional_graphs.push_back(std::get<ConditionalResult>(result).graph);
}
return conditional_graphs;
}
absl::Status GpuCommandBuffer::CreateConditionalCommand(
ExecutionScopeId execution_scope_id, ConditionType type,
SetConditionFn set_condition, absl::Span<const ConditionBuilder> builders) {
ExecutionScope& execution_scope = execution_scopes_[execution_scope_id];
TF_RETURN_IF_ERROR(CheckNotFinalized());
size_t num_handles = builders.size();
if (state_ == State::kCreate) {
TF_ASSIGN_OR_RETURN(auto handles, CreateConditionalHandles(num_handles));
TF_RETURN_IF_ERROR(set_condition(execution_scope_id, handles));
TF_RETURN_IF_ERROR(Barrier(execution_scope_id));
TF_ASSIGN_OR_RETURN(
auto graphs, CreateConditionalNodes(execution_scope_id, type, handles));
TF_ASSIGN_OR_RETURN(auto cmd_buffers, CreateConditionalCommandBuffers(
handles, graphs, builders));
execution_scope.conditional_command_buffers.push_back(
{std::move(handles), std::move(cmd_buffers)});
return absl::OkStatus();
}
if (state_ == State::kUpdate) {
ConditionalCommandBuffers& cond_cmd_buffers =
execution_scope.conditional_command_buffers[execution_scope.update_state
.conditional_idx++];
TF_RETURN_IF_ERROR(CheckNumCommandBuffers(cond_cmd_buffers, num_handles));
TF_RETURN_IF_ERROR(
set_condition(execution_scope_id, cond_cmd_buffers.handles));
TF_RETURN_IF_ERROR(Barrier(execution_scope_id));
execution_scope.update_state.node_idx += num_handles;
return UpdateConditionalCommandBuffers(
cond_cmd_buffers.handles,
absl::MakeSpan(cond_cmd_buffers.command_buffers), builders);
}
return UnsupportedStateError(state_);
}
absl::Status GpuCommandBuffer::If(ExecutionScopeId execution_scope_id,
DeviceMemory<bool> predicate,
Builder then_builder) {
TF_ASSIGN_OR_RETURN(SetIfConditionKernel * set_if_condition,
GetSetIfConditionKernel());
auto set_cond_fn = [&](ExecutionScopeId id, ConditionalHandles handles) {
return CommandBuffer::Launch(*set_if_condition, id, ThreadDim(), BlockDim(),
handles[0], predicate);
};
std::array<ConditionBuilder, 1> builders = {
ToConditionBuilder(std::move(then_builder))};
return CreateConditionalCommand(execution_scope_id, ConditionType::kIf,
set_cond_fn, builders);
}
absl::Status GpuCommandBuffer::IfElse(ExecutionScopeId execution_scope_id,
DeviceMemory<bool> predicate,
Builder then_builder,
Builder else_builder) {
TF_ASSIGN_OR_RETURN(SetIfElseConditionKernel * set_if_else_condition,
GetSetIfElseConditionKernel());
auto set_cond_fn = [&](ExecutionScopeId id, ConditionalHandles handles) {
return CommandBuffer::Launch(*set_if_else_condition, id, ThreadDim(),
BlockDim(), handles[0], handles[1], predicate);
};
std::array<ConditionBuilder, 2> builders = {
ToConditionBuilder(std::move(then_builder)),
ToConditionBuilder(std::move(else_builder))};
return CreateConditionalCommand(execution_scope_id, ConditionType::kIf,
set_cond_fn, builders);
}
absl::Status GpuCommandBuffer::Case(ExecutionScopeId execution_scope_id,
DeviceMemory<int32_t> index,
std::vector<Builder> branches) {
TF_ASSIGN_OR_RETURN(SetCaseConditionKernel * set_case_condition,
GetSetCaseConditionKernel());
constexpr size_t kBranchBatchSize = 8;
int32_t batch_offset = 0;
while (batch_offset < branches.size()) {
int32_t remaining_branches = branches.size() - batch_offset;
int32_t batch_size;
bool enable_conditional_default;
if (remaining_branches <= kBranchBatchSize) {
batch_size = remaining_branches;
enable_conditional_default = true;
} else {
batch_size = kBranchBatchSize;
enable_conditional_default = false;
}
auto set_cond_fn = [&, batch_offset, enable_conditional_default](
ExecutionScopeId id, ConditionalHandles handles) {
int32_t num_handles = handles.size();
std::vector<GpuGraphConditionalHandle> padded_handles(handles.begin(),
handles.end());
padded_handles.resize(kBranchBatchSize);
return CommandBuffer::Launch(
*set_case_condition, id, ThreadDim(), BlockDim(), padded_handles[0],
padded_handles[1], padded_handles[2], padded_handles[3],
padded_handles[4], padded_handles[5], padded_handles[6],
padded_handles[7], index, batch_offset, num_handles,
enable_conditional_default);
};
absl::InlinedVector<ConditionBuilder, kBranchBatchSize> builders;
builders.reserve(batch_size);
for (int z = 0; z < batch_size; ++z) {
int branch_offset = z + batch_offset;
builders.push_back(
ToConditionBuilder(std::move(branches[branch_offset])));
}
TF_RETURN_IF_ERROR(CreateConditionalCommand(
execution_scope_id, ConditionType::kIf, set_cond_fn, builders));
batch_offset += batch_size;
}
return absl::OkStatus();
}
absl::Status GpuCommandBuffer::For(ExecutionScopeId execution_scope_id,
int32_t num_iteration,
DeviceMemory<int32_t> loop_counter,
Builder body_builder) {
TF_ASSIGN_OR_RETURN(SetForConditionKernel * set_for_condition,
GetSetForConditionKernel());
TF_RETURN_IF_ERROR(Memset(execution_scope_id, &loop_counter, uint32_t{0}, 1));
TF_RETURN_IF_ERROR(Barrier(execution_scope_id));
auto set_cond_fn = [&](ExecutionScopeId id, ConditionalHandles handles) {
return CommandBuffer::Launch(*set_for_condition, id, ThreadDim(),
BlockDim(), handles[0], loop_counter,
num_iteration);
};
auto body = [&](CommandBuffer* body, GpuGraphConditionalHandle handle) {
TF_RETURN_IF_ERROR(body_builder(body));
TF_RETURN_IF_ERROR(body->Barrier());
return body->Launch(*set_for_condition, ThreadDim(), BlockDim(), handle,
loop_counter, num_iteration);
};
std::array<ConditionBuilder, 1> builders = {std::move(body)};
return CreateConditionalCommand(execution_scope_id, ConditionType::kWhile,
set_cond_fn, builders);
}
absl::Status GpuCommandBuffer::While(ExecutionScopeId execution_scope_id,
DeviceMemory<bool> pred,
ExecutionScopeBuilder cond_builder,
Builder body_builder) {
TF_ASSIGN_OR_RETURN(SetWhileConditionKernel * set_while_condition,
GetSetWhileConditionKernel());
TF_RETURN_IF_ERROR(cond_builder(execution_scope_id, this));
TF_RETURN_IF_ERROR(Barrier(execution_scope_id));
auto set_cond_fn = [&](ExecutionScopeId id, ConditionalHandles handles) {
return CommandBuffer::Launch(*set_while_condition, id, ThreadDim(),
BlockDim(), handles[0], pred);
};
auto body = [&](CommandBuffer* body, GpuGraphConditionalHandle handle) {
TF_RETURN_IF_ERROR(body_builder(body));
TF_RETURN_IF_ERROR(body->Barrier());
TF_RETURN_IF_ERROR(cond_builder(kDefaulExecutionScope, body));
TF_RETURN_IF_ERROR(body->Barrier());
return body->Launch(*set_while_condition, ThreadDim(), BlockDim(), handle,
pred);
};
std::array<ConditionBuilder, 1> builders = {std::move(body)};
return CreateConditionalCommand(execution_scope_id, ConditionType::kWhile,
set_cond_fn, builders);
}
absl::Status GpuCommandBuffer::Finalize() {
TF_RETURN_IF_ERROR(CheckNotFinalized());
#if !defined(TENSORFLOW_USE_ROCM)
TF_ASSIGN_OR_RETURN(auto node_count, GpuDriver::GraphGetNodeCount(graph_));
if (node_count == 0) {
GpuGraphNodeHandle empty_node_handle = nullptr;
TF_ASSIGN_OR_RETURN(NoOpKernel * noop, GetNoOpKernel());
TF_RETURN_IF_ERROR(GpuDriver::GraphAddKernelNode(
&empty_node_handle, graph_, {}, "noop",
AsGpuKernel(&**noop)->gpu_function(), 1, 1, 1, 1, 1, 1, 0,
nullptr, nullptr));
}
#endif
if (state_ == State::kCreate && VLOG_IS_ON(10)) {
std::string path = tsl::io::GetTempFilename("dot");
auto printed = GpuDriver::GraphDebugDotPrint(
graph_, path.c_str(), VLOG_IS_ON(100));
if (VLOG_IS_ON(100) && printed.ok()) {
VLOG(100) << "Printed Gpu graph " << graph_ << " to: " << path << "\n"
<< *printed;
}
}
size_t num_nodes = 0, num_cond_cmd_buffers = 0;
for (auto& [_, execution_scope] : execution_scopes_) {
num_nodes += execution_scope.nodes.size();
num_cond_cmd_buffers += execution_scope.conditional_command_buffers.size();
}
if (mode_ == Mode::kPrimary && state_ == State::kCreate) {
GpuDriver::GraphInstantiateFlags flags;
uint64_t start_nanos = tsl::Env::Default()->NowNanos();
auto instantiated = GpuDriver::GraphInstantiate(&exec_, graph_, flags);
if (instantiated.code() == absl::StatusCode::kResourceExhausted) {
LOG(WARNING) << "Retry CUDA graph instantiation after OOM error"
<< "; execution_scopes: " << execution_scopes_.size()
<< "; nodes: " << num_nodes
<< "; conditionals: " << num_cond_cmd_buffers
<< "; alive executable graphs: " << AliveExecs();
TF_RETURN_IF_ERROR(parent_->TrimGraphMemory());
auto retry = GpuDriver::GraphInstantiate(&exec_, graph_, flags);
if (retry.code() == absl::StatusCode::kResourceExhausted) {
return absl::ResourceExhaustedError(absl::StrFormat(
"CUDA driver ran out of memory trying to instantiate CUDA graph "
"with %d nodes and %d conditionals (total of %d alive CUDA graphs "
"in the process). You can try to (a) Give more memory to CUDA "
"driver by reducing XLA_CLIENT_MEM_FRACTION (b) Disable "
"CUDA graph with 'XLA_FLAGS=--xla_gpu_enable_command_buffer=' "
"(empty set). Original error: %s",
num_nodes, num_cond_cmd_buffers, AliveExecs(), retry.message()));
} else {
TF_RETURN_IF_ERROR(retry);
}
} else {
TF_RETURN_IF_ERROR(instantiated);
}
uint64_t end_nanos = tsl::Env::Default()->NowNanos();
VLOG(5) << "Instantiated executable graph #" << NotifyExecCreated() << " "
<< exec_ << " in " << (end_nanos - start_nanos) / 1000 << " μs"
<< "; execution_scopes: " << execution_scopes_.size()
<< "; nodes: " << num_nodes
<< "; conditionals: " << num_cond_cmd_buffers
<< "; alive executable graphs: " << AliveExecs();
#if !defined(TENSORFLOW_USE_ROCM) && CUDA_VERSION < 12040
TF_RETURN_IF_ERROR(DisableBarriersExecution(exec_));
#endif
} else if (mode_ == Mode::kPrimary && state_ == State::kUpdate) {
VLOG(5) << "Finalize executable graph " << exec_ << " update #"
<< num_updates_++ << " "
<< "(alive executable graphs: " << AliveExecs() << ")";
} else if (mode_ == Mode::kNested) {
VLOG(5) << "Finalize nested command buffer without instantiating "
"executable graph";
}
state_ = State::kFinalized;
return absl::OkStatus();
}
absl::Status GpuCommandBuffer::Update() {
if (exec_ == nullptr) {
return absl::InternalError(
"Command buffer has to have a graph executable to be updated");
}
if (state_ != State::kFinalized) {
return absl::InternalError(
"Command buffer has to be finalized first before it can be updated");
}
VLOG(5) << "Begin " << (mode_ == Mode::kPrimary ? "primary" : "nested")
<< " command buffer update for executable graph " << exec_;
state_ = State::kUpdate;
for (auto& [_, execution_scope] : execution_scopes_) {
execution_scope.update_state = ExecutionScope::UpdateState();
}
return absl::OkStatus();
}
absl::Span<const GpuCommandBuffer::GpuGraphNodeInfo> GpuCommandBuffer::nodes(
ExecutionScopeId id) const {
if (auto it = execution_scopes_.find(id); it != execution_scopes_.end())
return it->second.nodes;
return {};
}
absl::Span<const GpuCommandBuffer::GpuGraphBarrierInfo>
GpuCommandBuffer::barriers(ExecutionScopeId id) const {
if (auto it = execution_scopes_.find(id); it != execution_scopes_.end())
return it->second.barriers;
return {};
}
absl::Status GpuCommandBuffer::Submit(Stream* stream) {
if (mode_ != CommandBuffer::Mode::kPrimary) {
return absl::InvalidArgumentError(
"Can't submit non-primary command buffer for execution");
}
VLOG(3) << "Launch command buffer executable graph " << exec_
<< " on a stream: " << stream;
return GpuDriver::GraphLaunch(exec_, AsGpuStreamValue(stream));
}
} | #include "xla/stream_executor/gpu/gpu_command_buffer.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <vector>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/ascii.h"
#include "xla/service/platform_util.h"
#include "xla/stream_executor/command_buffer.h"
#include "xla/stream_executor/cuda/cuda_platform_id.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/gpu/gpu_driver.h"
#include "xla/stream_executor/gpu/gpu_test_kernels.h"
#include "xla/stream_executor/gpu/gpu_types.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/kernel_spec.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/rocm/rocm_platform_id.h"
#include "xla/stream_executor/semantic_version.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/stream_executor/trace_command_buffer_factory.h"
#include "xla/stream_executor/typed_kernel_factory.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace stream_executor::gpu {
using ExecutionScopeId = CommandBuffer::ExecutionScopeId;
static Platform* GpuPlatform() {
auto name = absl::AsciiStrToUpper(
xla::PlatformUtil::CanonicalPlatformName("gpu").value());
return PlatformManager::PlatformWithName(name).value();
}
static MultiKernelLoaderSpec GetAddI32KernelSpec() {
MultiKernelLoaderSpec spec(3);
spec.AddInProcessSymbol(internal::GetAddI32Kernel(), "AddI32");
return spec;
}
using AddI32Kernel =
TypedKernelFactory<DeviceMemory<int32_t>, DeviceMemory<int32_t>,
DeviceMemory<int32_t>>;
using MulI32Kernel =
TypedKernelFactory<DeviceMemory<int32_t>, DeviceMemory<int32_t>,
DeviceMemory<int32_t>>;
using IncAndCmpKernel =
TypedKernelFactory<DeviceMemory<int32_t>, DeviceMemory<bool>, int32_t>;
using AddI32Ptrs3 = TypedKernelFactory<internal::Ptrs3<int32_t>>;
static constexpr auto nested = CommandBuffer::Mode::kNested;
static constexpr auto primary = CommandBuffer::Mode::kPrimary;
template <typename Info>
static std::vector<GpuGraphNodeHandle> Deps(Info info) {
if (auto deps = GpuDriver::GraphNodeGetDependencies(info.handle); deps.ok()) {
return *deps;
}
return {GpuGraphNodeHandle(0xDEADBEEF)};
}
template <typename... Infos>
static std::vector<GpuGraphNodeHandle> ExpectedDeps(Infos... info) {
return {info.handle...};
}
static bool IsAtLeastCuda12300(
const stream_executor::StreamExecutor* executor) {
if (executor->GetPlatform()->id() != cuda::kCudaPlatformId) {
return false;
}
if (std::min({executor->GetDeviceDescription().runtime_version(),
executor->GetDeviceDescription().driver_version()}) <
SemanticVersion{12, 3, 0}) {
return false;
}
return true;
}
TEST(GpuCommandBufferTest, LaunchSingleKernel) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
MultiKernelLoaderSpec spec(3);
spec.AddInProcessSymbol(internal::GetAddI32Kernel(), "AddI32");
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Kernel::Create(executor, spec));
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> c = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->Memset32(&a, 1, byte_length));
TF_ASSERT_OK(stream->Memset32(&b, 2, byte_length));
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(cmd_buffer->Launch(add, ThreadDim(), BlockDim(4), a, b, c));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::vector<int32_t> dst(4, 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
std::vector<int32_t> expected = {3, 3, 3, 3};
ASSERT_EQ(dst, expected);
DeviceMemory<int32_t> d = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->MemZero(&d, byte_length));
TF_ASSERT_OK(cmd_buffer->Update());
TF_ASSERT_OK(cmd_buffer->Launch(add, ThreadDim(), BlockDim(4), a, b, d));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::fill(dst.begin(), dst.end(), 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), d, byte_length));
ASSERT_EQ(dst, expected);
}
TEST(CudaCommandBufferTest, TraceSingleKernel) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
if (platform->id() == rocm::kROCmPlatformId) {
GTEST_SKIP() << "Not supported on ROCM";
}
if (platform->id() == cuda::kCudaPlatformId &&
executor->GetDeviceDescription().runtime_version() <
SemanticVersion{12, 3, 0}) {
GTEST_SKIP() << "Command buffer tracing is not supported";
}
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
MultiKernelLoaderSpec spec(1, [&](const Kernel& kernel,
const KernelArgs& args) {
auto bufs = Cast<KernelArgsDeviceMemoryArray>(&args)->device_memory_args();
auto cast = [](auto m) { return reinterpret_cast<int32_t*>(m.opaque()); };
return PackKernelArgs(0, internal::Ptrs3<int32_t>{
cast(bufs[0]),
cast(bufs[1]),
cast(bufs[2]),
});
});
spec.AddInProcessSymbol(internal::GetAddI32Ptrs3Kernel(), "AddI32Ptrs3");
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Ptrs3::Create(executor, spec));
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> c = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->Memset32(&a, 1, byte_length));
TF_ASSERT_OK(stream->Memset32(&b, 2, byte_length));
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
KernelArgsDeviceMemoryArray args({a, b, c}, 0);
TF_ASSERT_OK_AND_ASSIGN(auto cmd_buffer, TraceCommandBufferFactory::Create(
executor,
[&](Stream* stream) {
return stream->Launch(
ThreadDim(), BlockDim(4),
*add, args);
},
primary));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::vector<int32_t> dst(4, 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
std::vector<int32_t> expected = {3, 3, 3, 3};
ASSERT_EQ(dst, expected);
}
TEST(GpuCommandBufferTest, LaunchNestedCommandBuffer) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
MultiKernelLoaderSpec spec = GetAddI32KernelSpec();
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Kernel::Create(executor, spec));
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> c = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->Memset32(&a, 1, byte_length));
TF_ASSERT_OK(stream->Memset32(&b, 2, byte_length));
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
auto primary_cmd = executor->CreateCommandBuffer(primary).value();
auto nested_cmd = executor->CreateCommandBuffer(nested).value();
TF_ASSERT_OK(nested_cmd->Launch(add, ThreadDim(), BlockDim(4), a, b, c));
TF_ASSERT_OK(primary_cmd->AddNestedCommandBuffer(*nested_cmd));
TF_ASSERT_OK(primary_cmd->Finalize());
TF_ASSERT_OK(primary_cmd->Submit(stream.get()));
std::vector<int32_t> dst(4, 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
std::vector<int32_t> expected = {3, 3, 3, 3};
ASSERT_EQ(dst, expected);
DeviceMemory<int32_t> d = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->MemZero(&d, byte_length));
nested_cmd = executor->CreateCommandBuffer(nested).value();
TF_ASSERT_OK(nested_cmd->Launch(add, ThreadDim(), BlockDim(4), a, b, d));
TF_ASSERT_OK(primary_cmd->Update());
TF_ASSERT_OK(primary_cmd->AddNestedCommandBuffer(*nested_cmd));
TF_ASSERT_OK(primary_cmd->Finalize());
TF_ASSERT_OK(primary_cmd->Submit(stream.get()));
std::fill(dst.begin(), dst.end(), 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), d, byte_length));
ASSERT_EQ(dst, expected);
}
TEST(GpuCommandBufferTest, MemcpyDeviceToDevice) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->Memset32(&a, 42, byte_length));
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(cmd_buffer->MemcpyDeviceToDevice(&b, a, byte_length));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::vector<int32_t> dst(4, 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), a, byte_length));
std::vector<int32_t> expected = {42, 42, 42, 42};
ASSERT_EQ(dst, expected);
TF_ASSERT_OK(cmd_buffer->Update());
TF_ASSERT_OK(cmd_buffer->MemcpyDeviceToDevice(&a, b, byte_length));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(stream->Memset32(&a, 0, byte_length));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::fill(dst.begin(), dst.end(), 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), a, byte_length));
ASSERT_EQ(dst, expected);
}
TEST(GpuCommandBufferTest, Memset) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(cmd_buffer->Memset(&a, uint32_t{42}, length));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::vector<int32_t> dst(4, 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), a, byte_length));
std::vector<int32_t> expected = {42, 42, 42, 42};
ASSERT_EQ(dst, expected);
TF_ASSERT_OK(cmd_buffer->Update());
TF_ASSERT_OK(cmd_buffer->Memset(&a, uint32_t{43}, length));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::fill(dst.begin(), dst.end(), 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), a, byte_length));
expected = {43, 43, 43, 43};
ASSERT_EQ(dst, expected);
}
TEST(GpuCommandBufferTest, Barriers) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
std::vector<DeviceMemory<int32_t>> buffers;
for (size_t i = 0; i < 6; ++i) {
buffers.push_back(executor->AllocateArray<int32_t>(1, 0));
}
auto transfer_buffers = [&]() -> std::vector<int32_t> {
std::vector<int32_t> dst(buffers.size(), 0);
for (size_t i = 0; i < buffers.size(); ++i) {
TF_CHECK_OK(stream->Memcpy(dst.data() + i, buffers[i], sizeof(int32_t)));
}
return dst;
};
auto record = [&](CommandBuffer* cmd_buffer, uint32_t bit_pattern) {
TF_RETURN_IF_ERROR(cmd_buffer->Barrier());
TF_RETURN_IF_ERROR(cmd_buffer->Memset(&buffers[0], bit_pattern + 0, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Barrier());
TF_RETURN_IF_ERROR(cmd_buffer->Memset(&buffers[1], bit_pattern + 1, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Barrier());
TF_RETURN_IF_ERROR(cmd_buffer->Barrier());
TF_RETURN_IF_ERROR(cmd_buffer->Memset(&buffers[2], bit_pattern + 2, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(&buffers[3], bit_pattern + 3, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Barrier());
TF_RETURN_IF_ERROR(cmd_buffer->Memset(&buffers[4], bit_pattern + 4, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(&buffers[5], bit_pattern + 5, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Barrier());
return cmd_buffer->Finalize();
};
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(record(cmd_buffer.get(), 42));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::vector<int32_t> expected = {42, 43, 44, 45, 46, 47};
ASSERT_EQ(transfer_buffers(), expected);
GpuCommandBuffer* gpu_cmd_buffer = GpuCommandBuffer::Cast(cmd_buffer.get());
ASSERT_EQ(gpu_cmd_buffer->nodes().size(), 6);
ASSERT_EQ(gpu_cmd_buffer->barriers().size(), 6);
auto nodes = gpu_cmd_buffer->nodes();
auto barriers = gpu_cmd_buffer->barriers();
EXPECT_TRUE(barriers[0].is_barrier_node);
EXPECT_TRUE(Deps(barriers[0]).empty());
EXPECT_FALSE(barriers[1].is_barrier_node);
EXPECT_EQ(barriers[1].handle, nodes[0].handle);
EXPECT_FALSE(barriers[2].is_barrier_node);
EXPECT_FALSE(barriers[3].is_barrier_node);
EXPECT_EQ(barriers[2].handle, nodes[1].handle);
EXPECT_EQ(barriers[3].handle, nodes[1].handle);
EXPECT_TRUE(barriers[4].is_barrier_node);
EXPECT_TRUE(barriers[5].is_barrier_node);
EXPECT_EQ(Deps(barriers[4]), ExpectedDeps(nodes[2], nodes[3]));
EXPECT_EQ(Deps(barriers[5]), ExpectedDeps(nodes[4], nodes[5]));
TF_ASSERT_OK(cmd_buffer->Update());
TF_ASSERT_OK(record(cmd_buffer.get(), 43));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
expected = {43, 44, 45, 46, 47, 48};
ASSERT_EQ(transfer_buffers(), expected);
}
TEST(GpuCommandBufferTest, IndependentExecutionScopes) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
CommandBuffer::ExecutionScopeId s0 = CommandBuffer::ExecutionScopeId(0);
CommandBuffer::ExecutionScopeId s1 = CommandBuffer::ExecutionScopeId(1);
std::vector<DeviceMemory<int32_t>> buffers;
for (size_t i = 0; i < 4; ++i) {
buffers.push_back(executor->AllocateArray<int32_t>(1, 0));
}
auto transfer_buffers = [&]() -> std::vector<int32_t> {
std::vector<int32_t> dst(buffers.size(), 0);
for (size_t i = 0; i < buffers.size(); ++i) {
TF_CHECK_OK(stream->Memcpy(dst.data() + i, buffers[i], sizeof(int32_t)));
}
return dst;
};
auto record = [&](CommandBuffer* cmd_buffer, uint32_t bit_pattern) {
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s0, &buffers[0], bit_pattern + 0, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s0, &buffers[1], bit_pattern + 1, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s1, &buffers[2], bit_pattern + 2, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s1, &buffers[3], bit_pattern + 3, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Barrier(s0));
TF_RETURN_IF_ERROR(cmd_buffer->Barrier(s1));
return cmd_buffer->Finalize();
};
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(record(cmd_buffer.get(), 42));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::vector<int32_t> expected = {42, 43, 44, 45};
ASSERT_EQ(transfer_buffers(), expected);
GpuCommandBuffer* gpu_cmd_buffer = GpuCommandBuffer::Cast(cmd_buffer.get());
auto nodes0 = gpu_cmd_buffer->nodes(s0);
auto nodes1 = gpu_cmd_buffer->nodes(s1);
auto barriers0 = gpu_cmd_buffer->barriers(s0);
auto barriers1 = gpu_cmd_buffer->barriers(s1);
ASSERT_EQ(nodes0.size(), 2);
ASSERT_EQ(nodes1.size(), 2);
ASSERT_EQ(barriers0.size(), 1);
ASSERT_EQ(barriers1.size(), 1);
EXPECT_TRUE(barriers0[0].is_barrier_node);
EXPECT_TRUE(barriers1[0].is_barrier_node);
EXPECT_EQ(Deps(barriers0[0]), ExpectedDeps(nodes0[0], nodes0[1]));
EXPECT_EQ(Deps(barriers1[0]), ExpectedDeps(nodes1[0], nodes1[1]));
TF_ASSERT_OK(cmd_buffer->Update());
TF_ASSERT_OK(record(cmd_buffer.get(), 43));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
expected = {43, 44, 45, 46};
ASSERT_EQ(transfer_buffers(), expected);
}
TEST(GpuCommandBufferTest, ExecutionScopeBarriers) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
CommandBuffer::ExecutionScopeId s0 = CommandBuffer::ExecutionScopeId(0);
CommandBuffer::ExecutionScopeId s1 = CommandBuffer::ExecutionScopeId(1);
CommandBuffer::ExecutionScopeId s2 = CommandBuffer::ExecutionScopeId(2);
std::vector<DeviceMemory<int32_t>> buffers;
for (size_t i = 0; i < 7; ++i) {
buffers.push_back(executor->AllocateArray<int32_t>(1, 0));
}
auto transfer_buffers = [&]() -> std::vector<int32_t> {
std::vector<int32_t> dst(buffers.size(), 0);
for (size_t i = 0; i < buffers.size(); ++i) {
TF_CHECK_OK(stream->Memcpy(dst.data() + i, buffers[i], sizeof(int32_t)));
}
return dst;
};
auto record = [&](CommandBuffer* cmd_buffer, uint32_t bit_pattern) {
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s0, &buffers[0], bit_pattern + 0, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s0, &buffers[1], bit_pattern + 1, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s1, &buffers[2], bit_pattern + 2, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s1, &buffers[3], bit_pattern + 3, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Barrier({s0, s1, s2}));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s0, &buffers[4], bit_pattern + 4, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s1, &buffers[5], bit_pattern + 5, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s2, &buffers[6], bit_pattern + 6, 1));
return cmd_buffer->Finalize();
};
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(record(cmd_buffer.get(), 42));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::vector<int32_t> expected = {42, 43, 44, 45, 46, 47, 48};
ASSERT_EQ(transfer_buffers(), expected);
GpuCommandBuffer* gpu_cmd_buffer = GpuCommandBuffer::Cast(cmd_buffer.get());
auto nodes0 = gpu_cmd_buffer->nodes(s0);
auto nodes1 = gpu_cmd_buffer->nodes(s1);
auto nodes2 = gpu_cmd_buffer->nodes(s2);
auto barriers0 = gpu_cmd_buffer->barriers(s0);
auto barriers1 = gpu_cmd_buffer->barriers(s1);
auto barriers2 = gpu_cmd_buffer->barriers(s2);
ASSERT_EQ(nodes0.size(), 3);
ASSERT_EQ(nodes1.size(), 3);
ASSERT_EQ(nodes2.size(), 1);
ASSERT_EQ(barriers0.size(), 2);
ASSERT_EQ(barriers1.size(), 2);
ASSERT_EQ(barriers2.size(), 2);
EXPECT_TRUE(barriers0[0].is_barrier_node && barriers0[1].is_barrier_node);
EXPECT_TRUE(barriers1[0].is_barrier_node && barriers1[1].is_barrier_node);
EXPECT_TRUE(barriers2[0].is_barrier_node && barriers2[1].is_barrier_node);
EXPECT_TRUE(barriers0[1].handle == barriers1[1].handle);
EXPECT_TRUE(barriers1[1].handle == barriers2[1].handle);
EXPECT_EQ(Deps(barriers0[0]), ExpectedDeps(nodes0[0], nodes0[1]));
EXPECT_EQ(Deps(barriers1[0]), ExpectedDeps(nodes1[0], nodes1[1]));
EXPECT_TRUE(Deps(barriers2[0]).empty());
EXPECT_EQ(Deps(barriers2[1]),
ExpectedDeps(barriers0[0], barriers1[0], barriers2[0]));
EXPECT_EQ(Deps(nodes0[2]), ExpectedDeps(barriers0[1]));
EXPECT_EQ(Deps(nodes1[2]), ExpectedDeps(barriers1[1]));
EXPECT_EQ(Deps(nodes2[0]), ExpectedDeps(barriers2[1]));
TF_ASSERT_OK(cmd_buffer->Update());
TF_ASSERT_OK(record(cmd_buffer.get(), 43));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
expected = {43, 44, 45, 46, 47, 48, 49};
ASSERT_EQ(transfer_buffers(), expected);
}
TEST(GpuCommandBufferTest, ExecutionScopeOneDirectionalBarriers) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
CommandBuffer::ExecutionScopeId s0 = CommandBuffer::ExecutionScopeId(0);
CommandBuffer::ExecutionScopeId s1 = CommandBuffer::ExecutionScopeId(1);
std::vector<DeviceMemory<int32_t>> buffers;
for (size_t i = 0; i < 6; ++i) {
buffers.push_back(executor->AllocateArray<int32_t>(1, 0));
}
auto transfer_buffers = [&]() -> std::vector<int32_t> {
std::vector<int32_t> dst(buffers.size(), 0);
for (size_t i = 0; i < buffers.size(); ++i) {
TF_CHECK_OK(stream->Memcpy(dst.data() + i, buffers[i], sizeof(int32_t)));
}
return dst;
};
auto record = [&](CommandBuffer* cmd_buffer, uint32_t bit_pattern) {
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s0, &buffers[0], bit_pattern + 0, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s0, &buffers[1], bit_pattern + 1, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s1, &buffers[2], bit_pattern + 2, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s1, &buffers[3], bit_pattern + 3, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Barrier(s0, s1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s0, &buffers[4], bit_pattern + 4, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s1, &buffers[5], bit_pattern + 5, 1));
return cmd_buffer->Finalize();
};
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(record(cmd_buffer.get(), 42));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::vector<int32_t> expected = {42, 43, 44, 45, 46, 47};
ASSERT_EQ(transfer_buffers(), expected);
GpuCommandBuffer* gpu_cmd_buffer = GpuCommandBuffer::Cast(cmd_buffer.get());
auto nodes0 = gpu_cmd_buffer->nodes(s0);
auto nodes1 = gpu_cmd_buffer->nodes(s1);
auto barriers0 = gpu_cmd_buffer->barriers(s0);
auto barriers1 = gpu_cmd_buffer->barriers(s1);
ASSERT_EQ(nodes0.size(), 3);
ASSERT_EQ(nodes1.size(), 3);
ASSERT_EQ(barriers0.size(), 1);
ASSERT_EQ(barriers1.size(), 2);
EXPECT_TRUE(barriers0[0].is_barrier_node);
EXPECT_TRUE(barriers1[0].is_barrier_node && barriers1[1].is_barrier_node);
EXPECT_EQ(Deps(barriers0[0]), ExpectedDeps(nodes0[0], nodes0[1]));
EXPECT_EQ(Deps(barriers1[0]), ExpectedDeps(nodes1[0], nodes1[1]));
EXPECT_EQ(Deps(barriers1[1]), ExpectedDeps(barriers0[0], barriers1[0]));
EXPECT_EQ(Deps(nodes0[2]), ExpectedDeps(barriers0[0]));
EXPECT_EQ(Deps(nodes1[2]), ExpectedDeps(barriers1[1]));
TF_ASSERT_OK(cmd_buffer->Update());
TF_ASSERT_OK(record(cmd_buffer.get(), 43));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
expected = {43, 44, 45, 46, 47, 48};
ASSERT_EQ(transfer_buffers(), expected);
}
TEST(GpuCommandBufferTest, ConditionalIf) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
if (!IsAtLeastCuda12300(executor)) {
GTEST_SKIP() << "CUDA graph conditionals are not supported";
}
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
MultiKernelLoaderSpec spec(3);
spec.AddInProcessSymbol(internal::GetAddI32Kernel(), "AddI32");
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Kernel::Create(executor, spec));
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<bool> pred = executor->AllocateArray<bool>(1, 0);
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> c = executor->AllocateArray<int32_t>(length, 0);
constexpr bool kTrue = true;
TF_ASSERT_OK(stream->Memcpy(&pred, &kTrue, 1));
TF_ASSERT_OK(stream->Memset32(&a, 1, byte_length));
TF_ASSERT_OK(stream->Memset32(&b, 2, byte_length));
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
CommandBuffer::Builder then_builder = [&](CommandBuffer* then_cmd) {
return then_cmd->Launch(add, ThreadDim(), BlockDim(4), a, b, c);
};
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(cmd_buffer->If(pred, then_builder));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::vector<int32_t> dst(4, 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
std::vector<int32_t> expected = {3, 3, 3, 3};
ASSERT_EQ(dst, expected);
constexpr bool kFalse = false;
TF_ASSERT_OK(stream->Memcpy(&pred, &kFalse, 1));
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
std::vector<int32_t> zeroes = {0, 0, 0, 0};
ASSERT_EQ(dst, zeroes);
DeviceMemory<int32_t> d = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->MemZero(&d, byte_length));
TF_ASSERT_OK(stream->Memcpy(&pred, &kTrue, 1));
then_builder = [&](CommandBuffer* then_cmd) {
return then_cmd->Launch(add, ThreadDim(), BlockDim(4), a, b, d);
};
TF_ASSERT_OK(cmd_buffer->Update());
TF_ASSERT_OK(cmd_buffer->If(pred, then_builder));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::fill(dst.begin(), dst.end(), 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), d, byte_length));
ASSERT_EQ(dst, expected);
}
TEST(GpuCommandBufferTest, ConditionalIfWithMemset) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
if (platform->id() == rocm::kROCmPlatformId) {
GTEST_SKIP() << "Not supported on ROCM";
}
if (platform->id() == cuda::kCudaPlatformId &&
executor->GetDeviceDescription().driver_version() <
SemanticVersion{12, 4, 0}) {
GTEST_SKIP() << "ConditionalsWithMemset are not supported before 12.4.";
}
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<bool> pred = executor->AllocateArray<bool>(1, 0);
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
constexpr bool kTrue = true;
TF_ASSERT_OK(stream->Memcpy(&pred, &kTrue, 1));
TF_ASSERT_OK(stream->Memset32(&a, 0, byte_length));
CommandBuffer::Builder then_builder = [&](CommandBuffer* then_cmd) {
return then_cmd->Memset(&a, uint8_t{1}, byte_length);
};
TF_ASSERT_OK_AND_ASSIGN(auto cmd_buffer,
executor->CreateCommandBuffer(primary));
TF_ASSERT_OK(cmd_buffer->If(pred, then_builder));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::vector<int32_t> dst(length, 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), a, byte_length));
std::vector<int32_t> expected(length, 1 << 24 | 1 << 16 | 1 << 8 | 1);
ASSERT_EQ(dst, expected);
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->MemZero(&a, byte_length));
then_builder = [&](CommandBuffer* then_cmd) {
return then_cmd->Memset(&b, uint8_t{1}, byte_length);
};
TF_ASSERT_OK(cmd_buffer->Update());
TF_ASSERT_OK(cmd_buffer->If(pred, then_builder));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::fill(dst.begin(), dst.end(), 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length));
ASSERT_EQ(dst, expected);
}
TEST(GpuCommandBufferTest, ConditionalIfElse) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
if (!IsAtLeastCuda12300(executor)) {
GTEST_SKIP() << "CUDA graph conditionals are not supported";
}
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
MultiKernelLoaderSpec add_spec(3);
add_spec.AddInProcessSymbol(internal::GetAddI32Kernel(), "AddI32");
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Kernel::Create(executor, add_spec));
MultiKernelLoaderSpec mul_spec(3);
mul_spec.AddInProcessSymbol(internal::GetMulI32Kernel(), "MulI32");
TF_ASSERT_OK_AND_ASSIGN(auto mul, MulI32Kernel::Create(executor, mul_spec));
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<bool> pred = executor->AllocateArray<bool>(1, 0);
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> c = executor->AllocateArray<int32_t>(length, 0);
constexpr bool kTrue = true;
TF_ASSERT_OK(stream->Memcpy(&pred, &kTrue, 1));
TF_ASSERT_OK(stream->Memset32(&a, 2, byte_length));
TF_ASSERT_OK(stream->Memset32(&b, 3, byte_length));
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
CommandBuffer::Builder then_builder = [&](CommandBuffer* then_cmd) {
return then_cmd->Launch(add, ThreadDim(), BlockDim(4), a, b, c);
};
CommandBuffer::Builder else_builder = [&](CommandBuffer* else_cmd) {
return else_cmd->Launch(mul, ThreadDim(), BlockDim(4), a, b, c);
};
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(cmd_buffer->IfElse(pred, then_builder, else_builder));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::vector<int32_t> dst(4, 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
std::vector<int32_t> expected_add = {5, 5, 5, 5};
ASSERT_EQ(dst, expected_add);
constexpr bool kFalse = false;
TF_ASSERT_OK(stream->Memcpy(&pred, &kFalse, 1));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
TF_ASSERT_OK(stream->BlockHostUntilDone());
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
std::vector<int32_t> expected_mul = {6, 6, 6, 6};
ASSERT_EQ(dst, expected_mul);
DeviceMemory<int32_t> d = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->MemZero(&d, byte_length));
else_builder = [&](CommandBuffer* else_cmd) {
return else_cmd->Launch(mul, ThreadDim(), BlockDim(4), a, b, d);
};
TF_ASSERT_OK(cmd_buffer->Update());
TF_ASSERT_OK(cmd_buffer->IfElse(pred, then_builder, else_builder));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::fill(dst.begin(), dst.end(), 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), d, byte_length));
ASSERT_EQ(dst, expected_mul);
}
TEST(GpuCommandBufferTest, ConditionalCaseEmptyGraph) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
if (!IsAtLeastCuda12300(executor)) {
GTEST_SKIP() << "CUDA graph conditionals are not supported";
}
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
MultiKernelLoaderSpec add_spec(3);
add_spec.AddInProcessSymbol(internal::GetAddI32Kernel(), "AddI32");
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Kernel::Create(executor, add_spec));
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<int32_t> index = executor->AllocateArray<int32_t>(1, 0);
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> c = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->Memset32(&index, 0, sizeof(int32_t)));
TF_ASSERT_OK(stream->Memset32(&a, 2, byte_length));
TF_ASSERT_OK(stream->Memset32(&b, 3, byte_length));
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
CommandBuffer::Builder branch0 = [&](CommandBuffer* branch0_cmd) {
return branch0_cmd->Launch(add, ThreadDim(), BlockDim(4), a, b, c);
};
CommandBuffer::Builder branch1 = [&](CommandBuffer* branch1_cmd) {
return absl::OkStatus();
};
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(cmd_buffer->Case(index, {branch0, branch1}));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::vector<int32_t> dst(4, 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
std::vector<int32_t> expected_add = {5, 5, 5, 5};
ASSERT_EQ(dst, expected_add);
TF_ASSERT_OK(stream->Memset32(&index, 1, sizeof(int32_t)));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
TF_ASSERT_OK(stream->BlockHostUntilDone());
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
ASSERT_EQ(dst, expected_add);
TF_ASSERT_OK(stream->Memset32(&index, -1, sizeof(int32_t)));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
TF_ASSERT_OK(stream->BlockHostUntilDone());
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
ASSERT_EQ(dst, expected_add);
TF_ASSERT_OK(stream->Memset32(&index, 2, sizeof(int32_t)));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
TF_ASSERT_OK(stream->BlockHostUntilDone());
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
ASSERT_EQ(dst, expected_add);
}
class GpuCommandBufferCaseTest : public testing::TestWithParam<int> {
protected:
int GetNumCases() { return GetParam(); }
int GetEffectiveIndex(int i) {
return (i < 0 || i >= GetNumCases()) ? GetNumCases() - 1 : i;
}
};
TEST_P(GpuCommandBufferCaseTest, ConditionalMultiCase) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
if (!IsAtLeastCuda12300(executor)) {
GTEST_SKIP() << "CUDA graph conditionals are not supported";
}
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
MultiKernelLoaderSpec mul_spec(3);
mul_spec.AddInProcessSymbol(internal::GetMulI32Kernel(), "MulI32");
TF_ASSERT_OK_AND_ASSIGN(auto mul, MulI32Kernel::Create(executor, mul_spec));
constexpr int64_t kLength = 1;
int64_t byte_length = sizeof(int32_t) * kLength;
DeviceMemory<int32_t> index = executor->AllocateArray<int32_t>(1, 0);
TF_ASSERT_OK(stream->Memset32(&index, 0, sizeof(int32_t)));
const int kNumCases = GetNumCases();
std::vector<DeviceMemory<int32_t>> values;
std::vector<DeviceMemory<int32_t>> results;
std::vector<CommandBuffer::Builder> branches;
values.resize(kNumCases);
results.resize(kNumCases);
branches.resize(kNumCases);
for (int i = 0; i < kNumCases; ++i) {
values[i] = executor->AllocateArray<int32_t>(kLength, 0);
TF_ASSERT_OK(stream->Memset32(&values[i], i, byte_length));
results[i] = executor->AllocateArray<int32_t>(kLength, 0);
TF_ASSERT_OK(stream->Memset32(&results[i], 0, byte_length));
branches[i] = [&, i](CommandBuffer* branch_cmd) {
return branch_cmd->Launch(mul, ThreadDim(), BlockDim(kLength), values[i],
values[i], results[i]);
};
}
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(cmd_buffer->Case(index, branches));
TF_ASSERT_OK(cmd_buffer->Finalize());
for (int i = -1; i <= kNumCases; ++i) {
TF_ASSERT_OK(stream->Memset32(&index, i, sizeof(int32_t)));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
TF_ASSERT_OK(stream->BlockHostUntilDone());
int effective_index = GetEffectiveIndex(i);
for (int z = 0; z < kNumCases; ++z) {
std::vector<int32_t> dst(kLength, 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), results[z], byte_length));
std::vector<int32_t> expected;
expected.resize(kLength);
for (int p = 0; p < kLength; ++p) {
if (effective_index == z) {
expected[p] = effective_index * effective_index;
} else {
expected[p] = 0;
}
}
ASSERT_EQ(dst, expected)
<< "For result " << z << " after running case " << i;
TF_ASSERT_OK(stream->Memset32(&results[z], 0, byte_length));
}
}
}
INSTANTIATE_TEST_SUITE_P(ConditionalMultipleCaseTest, GpuCommandBufferCaseTest,
testing::Range(1, 32),
testing::PrintToStringParamName());
TEST(GpuCommandBufferTest, ConditionalCase) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
if (!IsAtLeastCuda12300(executor)) {
GTEST_SKIP() << "CUDA graph conditionals are not supported";
}
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
MultiKernelLoaderSpec add_spec(3);
add_spec.AddInProcessSymbol(internal::GetAddI32Kernel(), "AddI32");
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Kernel::Create(executor, add_spec));
MultiKernelLoaderSpec mul_spec(3);
mul_spec.AddInProcessSymbol(internal::GetMulI32Kernel(), "MulI32");
TF_ASSERT_OK_AND_ASSIGN(auto mul, MulI32Kernel::Create(executor, mul_spec));
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<int32_t> index = executor->AllocateArray<int32_t>(1, 0);
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> c = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->Memset32(&index, 0, sizeof(int32_t)));
TF_ASSERT_OK(stream->Memset32(&a, 2, byte_length));
TF_ASSERT_OK(stream->Memset32(&b, 3, byte_length));
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
CommandBuffer::Builder branch0 = [&](CommandBuffer* branch0_cmd) {
return branch0_cmd->Launch(add, ThreadDim(), BlockDim(4), a, b, c);
};
CommandBuffer::Builder branch1 = [&](CommandBuffer* branch1_cmd) {
return branch1_cmd->Launch(mul, ThreadDim(), BlockDim(4), a, b, c);
};
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(cmd_buffer->Case(index, {branch0, branch1}));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::vector<int32_t> dst(4, 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
std::vector<int32_t> expected_add = {5, 5, 5, 5};
ASSERT_EQ(dst, expected_add);
TF_ASSERT_OK(stream->Memset32(&index, 1, sizeof(int32_t)));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
TF_ASSERT_OK(stream->BlockHostUntilDone());
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
std::vector<int32_t> expected_mul = {6, 6, 6, 6};
ASSERT_EQ(dst, expected_mul);
TF_ASSERT_OK(stream->Memset32(&index, -1, sizeof(int32_t)));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
TF_ASSERT_OK(stream->BlockHostUntilDone());
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
ASSERT_EQ(dst, expected_mul);
TF_ASSERT_OK(stream->Memset32(&index, 2, sizeof(int32_t)));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
TF_ASSERT_OK(stream->BlockHostUntilDone());
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
ASSERT_EQ(dst, expected_mul);
}
TEST(GpuCommandBufferTest, ConditionalFor) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
if (!IsAtLeastCuda12300(executor)) {
GTEST_SKIP() << "CUDA graph conditionals are not supported";
}
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
MultiKernelLoaderSpec spec(3);
spec.AddInProcessSymbol(internal::GetAddI32Kernel(), "AddI32");
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Kernel::Create(executor, spec));
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<int32_t> loop_counter = executor->AllocateArray<int32_t>(1, 0);
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->Memset32(&loop_counter, 100, sizeof(int32_t)));
TF_ASSERT_OK(stream->Memset32(&a, 1, byte_length));
TF_ASSERT_OK(stream->MemZero(&b, byte_length));
CommandBuffer::Builder body_builder = [&](CommandBuffer* body_cmd) {
return body_cmd->Launch(add, ThreadDim(), BlockDim(4), a, b, b);
};
int32_t num_iters = 10;
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(cmd_buffer->For(num_iters, loop_counter, body_builder));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::vector<int32_t> dst(4, 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length));
std::vector<int32_t> expected = {10, 10, 10, 10};
ASSERT_EQ(dst, expected);
}
TEST(GpuCommandBufferTest, ConditionalWhile) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
if (!IsAtLeastCuda12300(executor)) {
GTEST_SKIP() << "CUDA graph conditionals are not supported";
}
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
MultiKernelLoaderSpec add_spec(3);
add_spec.AddInProcessSymbol(internal::GetAddI32Kernel(), "AddI32");
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Kernel::Create(executor, add_spec));
MultiKernelLoaderSpec icmp_spec(3);
icmp_spec.AddInProcessSymbol(internal::GetIncAndCmpKernel(), "IncAndCmp");
TF_ASSERT_OK_AND_ASSIGN(auto inc_and_cmp,
IncAndCmpKernel::Create(executor, icmp_spec));
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<bool> pred = executor->AllocateArray<bool>(1, 0);
DeviceMemory<int32_t> loop_counter = executor->AllocateArray<int32_t>(1, 0);
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
static constexpr bool kFalse = false;
TF_ASSERT_OK(stream->Memcpy(&pred, &kFalse, 1));
TF_ASSERT_OK(stream->Memset32(&loop_counter, 0, sizeof(int32_t)));
TF_ASSERT_OK(stream->Memset32(&a, 1, byte_length));
TF_ASSERT_OK(stream->MemZero(&b, byte_length));
int32_t num_iters = 10;
CommandBuffer::ExecutionScopeBuilder cond_builder =
[&](ExecutionScopeId id, CommandBuffer* cond_cmd) {
return cond_cmd->Launch(inc_and_cmp, id, ThreadDim(), BlockDim(),
loop_counter, pred, num_iters);
};
CommandBuffer::Builder body_builder = [&](CommandBuffer* body_cmd) {
return body_cmd->Launch(add, ThreadDim(), BlockDim(length), a, b, b);
};
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(cmd_buffer->While(pred, cond_builder, body_builder));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::vector<int32_t> dst(4, 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length));
std::vector<int32_t> expected = {10, 10, 10, 10};
ASSERT_EQ(dst, expected);
}
TEST(GpuCommandBufferTest, DISABLED_WhileNestedConditional) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
if (!IsAtLeastCuda12300(executor)) {
GTEST_SKIP() << "CUDA graph conditionals are not supported";
}
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
MultiKernelLoaderSpec add_spec(3);
add_spec.AddInProcessSymbol(internal::GetAddI32Kernel(), "AddI32");
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Kernel::Create(executor, add_spec));
MultiKernelLoaderSpec icmp_spec(3);
icmp_spec.AddInProcessSymbol(internal::GetIncAndCmpKernel(), "IncAndCmp");
TF_ASSERT_OK_AND_ASSIGN(auto inc_and_cmp,
IncAndCmpKernel::Create(executor, icmp_spec));
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<bool> pred = executor->AllocateArray<bool>(1, 0);
DeviceMemory<bool> pred_then = executor->AllocateArray<bool>(1, 0);
DeviceMemory<int32_t> loop_counter = executor->AllocateArray<int32_t>(1, 0);
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
static constexpr bool kFalse = false;
static constexpr bool kTrue = true;
TF_ASSERT_OK(stream->Memcpy(&pred, &kFalse, 1));
TF_ASSERT_OK(stream->Memcpy(&pred_then, &kTrue, 1));
TF_ASSERT_OK(stream->Memset32(&loop_counter, 0, sizeof(int32_t)));
TF_ASSERT_OK(stream->Memset32(&a, 1, byte_length));
TF_ASSERT_OK(stream->MemZero(&b, byte_length));
int32_t num_iters = 10;
CommandBuffer::Builder then_builder =
[&](CommandBuffer* then_cmd) {
return then_cmd->Launch(add, ThreadDim(), BlockDim(length), a, b, b);
};
auto nested_cmd = executor->CreateCommandBuffer(nested).value();
TF_ASSERT_OK(nested_cmd->If(pred_then, then_builder));
CommandBuffer::ExecutionScopeBuilder cond_builder =
[&](ExecutionScopeId id, CommandBuffer* cond_cmd) {
return cond_cmd->Launch(inc_and_cmp, id, ThreadDim(), BlockDim(length),
loop_counter, pred, num_iters);
};
CommandBuffer::Builder body_builder =
[&](CommandBuffer* body_cmd) -> absl::Status {
CHECK_OK(body_cmd->AddNestedCommandBuffer(*nested_cmd));
return absl::OkStatus();
};
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(cmd_buffer->While(pred, cond_builder, body_builder));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::vector<int32_t> dst(4, 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length));
std::vector<int32_t> expected = {10, 10, 10, 10};
ASSERT_EQ(dst, expected);
}
TEST(GpuCommandBufferTest, ConditionalIfInExecutionScope) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
if (!IsAtLeastCuda12300(executor)) {
GTEST_SKIP() << "CUDA graph conditionals are not supported";
}
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
CommandBuffer::ExecutionScopeId s0 = CommandBuffer::ExecutionScopeId(0);
CommandBuffer::ExecutionScopeId s1 = CommandBuffer::ExecutionScopeId(1);
DeviceMemory<bool> pred = executor->AllocateArray<bool>(1, 0);
constexpr bool kTrue = true;
TF_ASSERT_OK(stream->Memcpy(&pred, &kTrue, 1));
std::vector<DeviceMemory<int32_t>> buffers;
for (size_t i = 0; i < 3; ++i) {
buffers.push_back(executor->AllocateArray<int32_t>(1, 0));
}
auto transfer_buffers = [&]() -> std::vector<int32_t> {
std::vector<int32_t> dst(buffers.size(), 0);
for (size_t i = 0; i < buffers.size(); ++i) {
stream->Memcpy(dst.data() + i, buffers[i], sizeof(int32_t)).IgnoreError();
}
return dst;
};
auto record = [&](CommandBuffer* cmd_buffer, uint32_t bit_pattern) {
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s0, &buffers[0], bit_pattern + 0, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s0, &buffers[1], bit_pattern + 1, 1));
TF_RETURN_IF_ERROR(cmd_buffer->If(s1, pred, [&](CommandBuffer* then_cmd) {
return then_cmd->Memset(&buffers[2], bit_pattern + 2, 1);
}));
TF_RETURN_IF_ERROR(cmd_buffer->Barrier(s0));
TF_RETURN_IF_ERROR(cmd_buffer->Barrier({s0, s1}));
return cmd_buffer->Finalize();
};
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(record(cmd_buffer.get(), 42));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::vector<int32_t> expected = {42, 43, 44};
ASSERT_EQ(transfer_buffers(), expected);
GpuCommandBuffer* gpu_cmd_buffer = GpuCommandBuffer::Cast(cmd_buffer.get());
auto nodes0 = gpu_cmd_buffer->nodes(s0);
auto nodes1 = gpu_cmd_buffer->nodes(s1);
auto barriers0 = gpu_cmd_buffer->barriers(s0);
auto barriers1 = gpu_cmd_buffer->barriers(s1);
ASSERT_EQ(nodes0.size(), 2);
ASSERT_EQ(nodes1.size(), 2);
ASSERT_EQ(barriers0.size(), 3);
ASSERT_EQ(barriers1.size(), 3);
EXPECT_EQ(Deps(barriers0[0]), ExpectedDeps(nodes0[0], nodes0[1]));
EXPECT_EQ(barriers0[0].handle, barriers0[1].handle);
EXPECT_EQ(barriers1[0].handle, nodes1[0].handle);
EXPECT_EQ(barriers1[1].handle, nodes1[1].handle);
EXPECT_TRUE(barriers0[2].handle == barriers1[2].handle);
EXPECT_EQ(Deps(barriers0[2]), ExpectedDeps(barriers0[1], nodes1[1]));
constexpr bool kFalse = false;
TF_ASSERT_OK(stream->Memcpy(&pred, &kFalse, 1));
TF_ASSERT_OK(stream->MemZero(&buffers[2], sizeof(int32_t)));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
expected = {42, 43, 0};
ASSERT_EQ(transfer_buffers(), expected);
}
TEST(GpuCommandBufferTest, ConditionalWhileInExecutionScope) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
if (!IsAtLeastCuda12300(executor)) {
GTEST_SKIP() << "CUDA graph conditionals are not supported";
}
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
CommandBuffer::ExecutionScopeId s0 = CommandBuffer::ExecutionScopeId(0);
CommandBuffer::ExecutionScopeId s1 = CommandBuffer::ExecutionScopeId(1);
MultiKernelLoaderSpec add_spec(3);
add_spec.AddInProcessSymbol(internal::GetAddI32Kernel(), "AddI32");
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Kernel::Create(executor, add_spec));
MultiKernelLoaderSpec icmp_spec(3);
icmp_spec.AddInProcessSymbol(internal::GetIncAndCmpKernel(), "IncAndCmp");
TF_ASSERT_OK_AND_ASSIGN(auto inc_and_cmp,
IncAndCmpKernel::Create(executor, icmp_spec));
DeviceMemory<bool> pred = executor->AllocateArray<bool>(1, 0);
DeviceMemory<int32_t> loop_counter = executor->AllocateArray<int32_t>(1, 0);
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(1, 0);
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(1, 0);
DeviceMemory<int32_t> c = executor->AllocateArray<int32_t>(1, 0);
TF_ASSERT_OK(stream->MemZero(&loop_counter, sizeof(int32_t)));
TF_ASSERT_OK(stream->Memset32(&a, 1, sizeof(int32_t)));
TF_ASSERT_OK(stream->MemZero(&b, sizeof(int32_t)));
auto record = [&](CommandBuffer* cmd_buffer, uint32_t bit_pattern,
int32_t num_iters) {
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s0, &c, bit_pattern, 1));
TF_RETURN_IF_ERROR(cmd_buffer->While(
s1, pred,
[&](ExecutionScopeId id, CommandBuffer* cond_cmd) {
return cond_cmd->Launch(inc_and_cmp, id, ThreadDim(), BlockDim(),
loop_counter, pred, num_iters);
},
[&](CommandBuffer* body_cmd) {
return body_cmd->Launch(add, ThreadDim(), BlockDim(), a, b, b);
}));
TF_RETURN_IF_ERROR(cmd_buffer->Barrier({s0, s1}));
return cmd_buffer->Finalize();
};
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(record(cmd_buffer.get(), 42, 10));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
int32_t b_dst, c_dst;
TF_ASSERT_OK(stream->Memcpy(&b_dst, b, sizeof(int32_t)));
TF_ASSERT_OK(stream->Memcpy(&c_dst, c, sizeof(int32_t)));
EXPECT_EQ(b_dst, 10);
EXPECT_EQ(c_dst, 42);
GpuCommandBuffer* gpu_cmd_buffer = GpuCommandBuffer::Cast(cmd_buffer.get());
auto nodes0 = gpu_cmd_buffer->nodes(s0);
auto nodes1 = gpu_cmd_buffer->nodes(s1);
auto barriers0 = gpu_cmd_buffer->barriers(s0);
auto barriers1 = gpu_cmd_buffer->barriers(s1);
ASSERT_EQ(nodes0.size(), 1);
ASSERT_EQ(nodes1.size(), 3);
ASSERT_EQ(barriers0.size(), 2);
ASSERT_EQ(barriers1.size(), 4);
EXPECT_EQ(Deps(barriers0[1]), ExpectedDeps(nodes0[0], nodes1[2]));
TF_ASSERT_OK(cmd_buffer->Update());
TF_ASSERT_OK(record(cmd_buffer.get(), 43, 20));
TF_ASSERT_OK(stream->MemZero(&loop_counter, sizeof(int32_t)));
TF_ASSERT_OK(stream->MemZero(&b, sizeof(int32_t)));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
TF_ASSERT_OK(stream->Memcpy(&b_dst, b, sizeof(int32_t)));
TF_ASSERT_OK(stream->Memcpy(&c_dst, c, sizeof(int32_t)));
EXPECT_EQ(b_dst, 20);
EXPECT_EQ(c_dst, 43);
}
#define BENCHMARK_SIZES(NAME) \
BENCHMARK(NAME)->Arg(8)->Arg(32)->Arg(128)->Arg(512)->Arg(1024);
static void BM_CreateCommandBuffer(benchmark::State& state) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
MultiKernelLoaderSpec spec(3);
spec.AddInProcessSymbol(internal::GetAddI32Kernel(), "AddI32");
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Kernel::Create(executor, spec));
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(1, 0);
for (auto s : state) {
auto cmd_buffer = executor->CreateCommandBuffer(nested).value();
for (int i = 1; i < state.range(0); ++i) {
CHECK_OK(cmd_buffer->Launch(add, ThreadDim(), BlockDim(4), b, b, b));
}
CHECK_OK(cmd_buffer->Finalize());
}
}
BENCHMARK_SIZES(BM_CreateCommandBuffer);
static void BM_TraceCommandBuffer(benchmark::State& state) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
MultiKernelLoaderSpec spec(3);
spec.AddInProcessSymbol(internal::GetAddI32Kernel(), "AddI32");
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Kernel::Create(executor, spec));
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(1, 0);
for (auto s : state) {
auto launch_kernels = [&](Stream* stream) {
for (int i = 1; i < state.range(0); ++i) {
CHECK_OK(stream->ThenLaunch(ThreadDim(), BlockDim(4), add, b, b, b));
}
return absl::OkStatus();
};
CHECK_OK(
TraceCommandBufferFactory::Create(executor, launch_kernels, nested));
}
}
BENCHMARK_SIZES(BM_TraceCommandBuffer);
static void BM_UpdateCommandBuffer(benchmark::State& state) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
MultiKernelLoaderSpec spec(3);
spec.AddInProcessSymbol(internal::GetAddI32Kernel(), "AddI32");
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Kernel::Create(executor, spec));
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(1, 0);
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
for (int i = 1; i < state.range(0); ++i) {
CHECK_OK(cmd_buffer->Launch(add, ThreadDim(), BlockDim(4), b, b, b));
}
CHECK_OK(cmd_buffer->Finalize());
for (auto s : state) {
CHECK_OK(cmd_buffer->Update());
for (int i = 1; i < state.range(0); ++i) {
CHECK_OK(cmd_buffer->Launch(add, ThreadDim(), BlockDim(4), b, b, b));
}
CHECK_OK(cmd_buffer->Finalize());
}
}
BENCHMARK_SIZES(BM_UpdateCommandBuffer);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/gpu/gpu_command_buffer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/gpu/gpu_command_buffer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2c06befe-7026-4dfe-9872-70e10918a4b2 | cpp | tensorflow/tensorflow | input_generator | tensorflow/lite/testing/kernel_test/input_generator.cc | tensorflow/lite/testing/kernel_test/input_generator_test.cc | #include "tensorflow/lite/testing/kernel_test/input_generator.h"
#include <cstdio>
#include <fstream>
#include <limits>
#include <random>
#include <string>
#include <unordered_map>
#include <utility>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/testing/join.h"
#include "tensorflow/lite/testing/split.h"
namespace tflite {
namespace testing {
namespace {
static constexpr char kDefaultServingSignatureDefKey[] = "serving_default";
template <typename T>
std::vector<T> GenerateRandomTensor(TfLiteIntArray* dims,
const std::function<T(int)>& random_func) {
int64_t num_elements = 1;
for (int i = 0; i < dims->size; i++) {
num_elements *= dims->data[i];
}
std::vector<T> result(num_elements);
for (int i = 0; i < num_elements; i++) {
result[i] = random_func(i);
}
return result;
}
template <typename T>
std::vector<T> GenerateUniform(TfLiteIntArray* dims, float min, float max) {
auto random_float = [](float min, float max) {
return min + (max - min) * static_cast<float>(rand()) / RAND_MAX;
};
std::function<T(int)> random_t = [&](int) {
return static_cast<T>(random_float(min, max));
};
std::vector<T> data = GenerateRandomTensor(dims, random_t);
return data;
}
template <typename T>
std::vector<T> GenerateGaussian(TfLiteIntArray* dims, float min, float max) {
auto random_float = [](float min, float max) {
static std::default_random_engine generator;
static std::normal_distribution<double> distribution(0.5, 1.0 / 3);
auto rand_n = distribution(generator);
while (rand_n < 0 || rand_n >= 1) {
rand_n = distribution(generator);
}
return min + (max - min) * static_cast<float>(rand_n);
};
std::function<T(int)> random_t = [&](int) {
return static_cast<T>(random_float(min, max));
};
std::vector<T> data = GenerateRandomTensor(dims, random_t);
return data;
}
}
TfLiteStatus InputGenerator::LoadModel(const string& model_dir) {
return LoadModel(model_dir, kDefaultServingSignatureDefKey);
}
TfLiteStatus InputGenerator::LoadModel(const string& model_dir,
const string& signature) {
model_ = FlatBufferModel::BuildFromFile(model_dir.c_str());
if (!model_) {
fprintf(stderr, "Cannot load model %s", model_dir.c_str());
return kTfLiteError;
}
::tflite::ops::builtin::BuiltinOpResolver builtin_ops;
InterpreterBuilder(*model_, builtin_ops)(&interpreter_);
if (!interpreter_) {
fprintf(stderr, "Failed to build interpreter.");
return kTfLiteError;
}
signature_runner_ = interpreter_->GetSignatureRunner(signature.c_str());
if (!signature_runner_) {
fprintf(stderr, "Failed to get SignatureRunner.\n");
return kTfLiteError;
}
return kTfLiteOk;
}
TfLiteStatus InputGenerator::ReadInputsFromFile(const string& filename) {
if (filename.empty()) {
fprintf(stderr, "Empty input file name.");
return kTfLiteError;
}
std::ifstream input_file(filename);
string input;
while (std::getline(input_file, input, '\n')) {
std::vector<string> parts = Split<string>(input, ":");
if (parts.size() != 2) {
fprintf(stderr, "Expected <name>:<value>, got %s", input.c_str());
return kTfLiteError;
}
inputs_.push_back(std::make_pair(parts[0], parts[1]));
}
input_file.close();
return kTfLiteOk;
}
TfLiteStatus InputGenerator::WriteInputsToFile(const string& filename) {
if (filename.empty()) {
fprintf(stderr, "Empty input file name.");
return kTfLiteError;
}
std::ofstream output_file;
output_file.open(filename, std::fstream::out | std::fstream::trunc);
if (!output_file) {
fprintf(stderr, "Failed to open output file %s.", filename.c_str());
return kTfLiteError;
}
for (const auto& input : inputs_) {
output_file << input.first << ":" << input.second << "\n";
}
output_file.close();
return kTfLiteOk;
}
TfLiteStatus InputGenerator::GenerateInput(const string& distribution) {
auto input_tensor_names = signature_runner_->input_names();
for (const char* name : input_tensor_names) {
auto* tensor = signature_runner_->input_tensor(name);
if (distribution == "UNIFORM") {
switch (tensor->type) {
case kTfLiteInt8: {
auto data = GenerateUniform<int8_t>(
tensor->dims, std::numeric_limits<int8_t>::min(),
std::numeric_limits<int8_t>::max());
inputs_.push_back(
std::make_pair(name, Join(data.data(), data.size(), ",")));
break;
}
case kTfLiteUInt8: {
auto data = GenerateUniform<uint8_t>(
tensor->dims, std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max());
inputs_.push_back(
std::make_pair(name, Join(data.data(), data.size(), ",")));
break;
}
case kTfLiteFloat32: {
auto data = GenerateUniform<float>(tensor->dims, -1, 1);
inputs_.push_back(
std::make_pair(name, Join(data.data(), data.size(), ",")));
break;
}
default:
fprintf(stderr, "Unsupported input tensor type %s.",
TfLiteTypeGetName(tensor->type));
break;
}
} else if (distribution == "GAUSSIAN") {
switch (tensor->type) {
case kTfLiteInt8: {
auto data = GenerateGaussian<int8_t>(
tensor->dims, std::numeric_limits<int8_t>::min(),
std::numeric_limits<int8_t>::max());
inputs_.push_back(
std::make_pair(name, Join(data.data(), data.size(), ",")));
break;
}
case kTfLiteUInt8: {
auto data = GenerateGaussian<uint8_t>(
tensor->dims, std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max());
inputs_.push_back(
std::make_pair(name, Join(data.data(), data.size(), ",")));
break;
}
case kTfLiteFloat32: {
auto data = GenerateGaussian<float>(tensor->dims, -1, 1);
inputs_.push_back(
std::make_pair(name, Join(data.data(), data.size(), ",")));
break;
}
default:
fprintf(stderr, "Unsupported input tensor type %s.",
TfLiteTypeGetName(tensor->type));
break;
}
} else {
fprintf(stderr, "Unsupported distribution %s.", distribution.c_str());
return kTfLiteError;
}
}
return kTfLiteOk;
}
}
} | #include "tensorflow/lite/testing/kernel_test/input_generator.h"
#include <fstream>
#include <map>
#include <string>
#include <unordered_map>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite {
namespace testing {
namespace {
TEST(InputGeneratorTest, LoadModel) {
InputGenerator input_generator;
ASSERT_EQ(input_generator.LoadModel(
"tensorflow/lite/testdata/multi_add.bin"),
kTfLiteOk);
}
TEST(InputGeneratorTest, ReadWriteSimpleFile) {
InputGenerator input_generator;
ASSERT_EQ(
input_generator.ReadInputsFromFile("tensorflow/lite/testing/"
"kernel_test/testdata/test_input.csv"),
kTfLiteOk);
std::string content = "1";
for (int i = 0; i < 1 * 8 * 8 * 3 - 1; i++) {
content.append(",1");
}
std::vector<std::pair<string, string>> inputs = {{"a", content}};
ASSERT_EQ(input_generator.GetInputs(), inputs);
auto output_filename = ::testing::TempDir() + "/out.csv";
ASSERT_EQ(input_generator.WriteInputsToFile(output_filename), kTfLiteOk);
std::ifstream in(output_filename);
std::string out;
std::getline(in, out, '\n');
std::string expected_out = "a:";
expected_out.append(content);
ASSERT_EQ(out, expected_out);
}
TEST(InputGeneratorTest, GenerateUniformInput) {
InputGenerator input_generator;
ASSERT_EQ(input_generator.LoadModel(
"tensorflow/lite/testdata/multi_add.bin"),
kTfLiteOk);
input_generator.GenerateInput("UNIFORM");
auto inputs = input_generator.GetInputs();
ASSERT_EQ(inputs.size(), 4);
}
TEST(InputGeneratorTest, GenerateGaussianInput) {
InputGenerator input_generator;
ASSERT_EQ(input_generator.LoadModel(
"tensorflow/lite/testdata/multi_add.bin"),
kTfLiteOk);
input_generator.GenerateInput("GAUSSIAN");
auto inputs = input_generator.GetInputs();
ASSERT_EQ(inputs.size(), 4);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/kernel_test/input_generator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/kernel_test/input_generator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cc8f58bf-dadd-4b33-9288-7382a0f4e746 | cpp | google/arolla | operator_fixture | arolla/qexpr/testing/operator_fixture.h | arolla/qexpr/testing/operator_fixture_test.cc | #ifndef AROLLA_QEXPR_OPERATOR_FIXTURE_H_
#define AROLLA_QEXPR_OPERATOR_FIXTURE_H_
#include <array>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <tuple>
#include <typeindex>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/memory/frame.h"
#include "arolla/qexpr/eval_context.h"
#include "arolla/qexpr/operators.h"
#include "arolla/qexpr/qexpr_operator_signature.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/tuple_qtype.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
template <typename ARG_Ts, typename RES_Ts>
class OperatorFixture;
template <typename... ARG_Ts, typename... RES_Ts>
class OperatorFixture<std::tuple<ARG_Ts...>, std::tuple<RES_Ts...>> {
public:
static absl::StatusOr<OperatorFixture> Create(const QExprOperator& op) {
return CreateImpl(op, std::make_index_sequence<sizeof...(ARG_Ts)>(),
std::make_index_sequence<sizeof...(RES_Ts)>());
}
OperatorFixture(OperatorFixture&& other) = default;
OperatorFixture& operator=(OperatorFixture&& other) = default;
absl::StatusOr<std::tuple<RES_Ts...>> Call(ARG_Ts&&... args) const {
return CallImpl(std::forward<ARG_Ts&&>(args)...,
std::make_index_sequence<sizeof...(RES_Ts)>());
}
private:
OperatorFixture(std::unique_ptr<BoundOperator> bound_op, FrameLayout&& layout,
std::tuple<FrameLayout::Slot<ARG_Ts>...> input_slots,
std::tuple<FrameLayout::Slot<RES_Ts>...> output_slots)
: bound_op_(std::move(bound_op)),
layout_(std::move(layout)),
input_slots_(input_slots),
output_slots_(output_slots) {}
template <typename... Ts>
static absl::Status VerifyTypes(absl::Span<const QTypePtr> types) {
if (sizeof...(Ts) != types.size()) {
return absl::FailedPreconditionError(
absl::StrFormat("argument count mismatch; got %d expected %d",
types.size(), sizeof...(Ts)));
}
std::array<std::type_index, sizeof...(Ts)> expected_types = {
std::type_index(typeid(Ts))...};
for (size_t i = 0; i < types.size(); ++i) {
if (expected_types[i] != std::type_index(types[i]->type_info())) {
return absl::FailedPreconditionError(
absl::StrFormat("type mismatch at position %d", i));
}
}
return absl::OkStatus();
}
template <typename... Ts>
static absl::Status VerifyTypes(absl::Span<const TypedSlot> slots) {
std::vector<QTypePtr> types;
types.reserve(slots.size());
for (auto slot : slots) {
types.push_back(slot.GetType());
}
return VerifyTypes<Ts...>(types);
}
template <size_t... ARG_Is, size_t... RES_Is>
static absl::StatusOr<OperatorFixture> CreateImpl(
const QExprOperator& op, std::index_sequence<ARG_Is...> arg_seq,
std::index_sequence<RES_Is...> res_seq) {
FrameLayout::Builder layout_builder;
auto input_slots = std::make_tuple(layout_builder.AddSlot<ARG_Ts>()...);
const QExprOperatorSignature* op_signature = op.signature();
auto input_types = op_signature->input_types();
RETURN_IF_ERROR(VerifyTypes<ARG_Ts...>(input_types)) << "on input types";
auto output_type = op_signature->output_type();
auto output_typed_slot = AddSlot(output_type, &layout_builder);
std::vector<TypedSlot> output_typed_subslots;
if (IsTupleQType(output_type)) {
output_typed_subslots.reserve(output_typed_slot.SubSlotCount());
for (int64_t i = 0; i < output_typed_slot.SubSlotCount(); ++i) {
output_typed_subslots.push_back(output_typed_slot.SubSlot(i));
}
} else {
output_typed_subslots = {output_typed_slot};
}
ASSIGN_OR_RETURN(auto output_slots,
TypedSlot::ToSlots<RES_Ts...>(output_typed_subslots));
RETURN_IF_ERROR(VerifyTypes<RES_Ts...>(output_typed_subslots))
<< "on output types";
ASSIGN_OR_RETURN(auto bound_op,
op.Bind({TypedSlot::FromSlot(std::get<ARG_Is>(input_slots),
input_types[ARG_Is])...},
output_typed_slot));
auto layout = std::move(layout_builder).Build();
return OperatorFixture(std::move(bound_op), std::move(layout), input_slots,
output_slots);
}
template <size_t... ARG_Is>
void SetInputs(FramePtr frame
ABSL_ATTRIBUTE_UNUSED,
ARG_Ts&&... args, std::index_sequence<ARG_Is...>) const {
(frame.Set(std::get<ARG_Is>(input_slots_), std::move(args)), ...);
}
template <size_t... RES_Is>
absl::StatusOr<std::tuple<RES_Ts...>> CallImpl(
ARG_Ts&&... args, std::index_sequence<RES_Is...>) const {
RootEvaluationContext root_ctx(&layout_);
SetInputs(root_ctx.frame(), std::move(args)...,
std::make_index_sequence<sizeof...(ARG_Ts)>());
EvaluationContext ctx(root_ctx);
bound_op_->Run(&ctx, root_ctx.frame());
if (!ctx.status().ok()) {
return std::move(ctx).status();
}
return std::make_tuple(
std::move(*root_ctx.GetMutable(std::get<RES_Is>(output_slots_)))...);
}
std::unique_ptr<BoundOperator> bound_op_;
FrameLayout layout_;
std::tuple<FrameLayout::Slot<ARG_Ts>...> input_slots_;
std::tuple<FrameLayout::Slot<RES_Ts>...> output_slots_;
};
template <template <typename...> class TYPE_LIST, typename... ARG_Ts,
typename RES_T>
class OperatorFixture<TYPE_LIST<ARG_Ts...>, RES_T> {
public:
OperatorFixture(OperatorFixture&& other) = default;
OperatorFixture& operator=(OperatorFixture&& other) = default;
static absl::StatusOr<OperatorFixture> Create(const QExprOperator& op) {
ASSIGN_OR_RETURN(auto delegate, DelegateT::Create(op));
return OperatorFixture(std::move(delegate));
}
absl::StatusOr<RES_T> Call(ARG_Ts&&... args) const {
ASSIGN_OR_RETURN(auto tuple,
delegate_.Call(std::forward<ARG_Ts&&>(args)...));
return std::get<0>(std::move(tuple));
}
private:
using DelegateT = OperatorFixture<TYPE_LIST<ARG_Ts...>, TYPE_LIST<RES_T>>;
explicit OperatorFixture(DelegateT&& delegate)
: delegate_(std::move(delegate)) {}
DelegateT delegate_;
};
}
#endif | #include "arolla/qexpr/testing/operator_fixture.h"
#include <cstdint>
#include <tuple>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status_matchers.h"
#include "arolla/codegen/qexpr/testing/test_operators.h"
#include "arolla/memory/frame.h"
#include "arolla/qexpr/operator_factory.h"
#include "arolla/qexpr/operators.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/tuple_qtype.h"
namespace arolla {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::arolla::testing::Vector3;
using ::testing::Eq;
template <typename T>
using Slot = FrameLayout::Slot<T>;
TEST(OperatorFixtureTest, TestSingleResultOperator) {
auto float_type = GetQType<float>();
auto op =
OperatorRegistry::GetInstance()
->LookupOperator("test.add", {float_type, float_type}, float_type)
.value();
using ARG_Ts = std::tuple<float, float>;
using RES_Ts = float;
auto fixture = OperatorFixture<ARG_Ts, RES_Ts>::Create(*op).value();
float result = fixture.Call(10.0f, 20.0f).value();
EXPECT_THAT(result, Eq(30.0f));
}
TEST(OperatorFixtureTest, TestMultipleResultOperator) {
auto vector_type = GetQType<Vector3<float>>();
auto op =
OperatorRegistry::GetInstance()
->LookupOperator("test.vector_components", {vector_type},
MakeTupleQType({GetQType<float>(), GetQType<float>(),
GetQType<float>()}))
.value();
using ARG_Ts = std::tuple<Vector3<float>>;
using RES_Ts = std::tuple<float, float, float>;
auto fixture = OperatorFixture<ARG_Ts, RES_Ts>::Create(*op).value();
float a, b, c;
std::tie(a, b, c) = fixture.Call(Vector3<float>(10.0f, 20.0f, 30.0f)).value();
EXPECT_THAT(a, Eq(10.0f));
EXPECT_THAT(b, Eq(20.0f));
EXPECT_THAT(c, Eq(30.0f));
}
TEST(OperatorFixtureTest, TestReturningTupleOperator) {
ASSERT_OK_AND_ASSIGN(auto op, QExprOperatorFromFunction([]() {
return std::tuple<int32_t, float>(57, 57.75f);
}));
using ARG_Ts = std::tuple<>;
using RES_Ts = std::tuple<int32_t, float>;
auto fixture = OperatorFixture<ARG_Ts, RES_Ts>::Create(*op).value();
EXPECT_THAT(fixture.Call(), IsOkAndHolds(Eq(std::tuple(57, 57.75f))));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/testing/operator_fixture.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/testing/operator_fixture_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
85e435bf-b69e-489b-a72a-a03f354f1baa | cpp | tensorflow/tensorflow | server_lib | tensorflow/core/distributed_runtime/server_lib.cc | tensorflow/core/distributed_runtime/server_lib_test.cc | #include "tensorflow/core/distributed_runtime/server_lib.h"
#include <unordered_map>
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/mutex.h"
namespace tensorflow {
namespace {
mutex* get_server_factory_lock() {
static mutex server_factory_lock(LINKER_INITIALIZED);
return &server_factory_lock;
}
typedef std::unordered_map<string, ServerFactory*> ServerFactories;
ServerFactories* server_factories() {
static ServerFactories* factories = new ServerFactories;
return factories;
}
}
void ServerFactory::Register(const string& server_type,
ServerFactory* factory) {
mutex_lock l(*get_server_factory_lock());
if (!server_factories()->insert({server_type, factory}).second) {
LOG(ERROR) << "Two server factories are being registered under "
<< server_type;
}
}
Status ServerFactory::GetFactory(const ServerDef& server_def,
ServerFactory** out_factory) {
mutex_lock l(*get_server_factory_lock());
for (const auto& server_factory : *server_factories()) {
if (server_factory.second->AcceptsOptions(server_def)) {
*out_factory = server_factory.second;
return absl::OkStatus();
}
}
std::vector<string> server_names;
for (const auto& server_factory : *server_factories()) {
server_names.push_back(server_factory.first);
}
return errors::NotFound(
"No server factory registered for the given ServerDef: ",
server_def.DebugString(), "\nThe available server factories are: [ ",
absl::StrJoin(server_names, ", "), " ]");
}
Status NewServer(const ServerDef& server_def,
std::unique_ptr<ServerInterface>* out_server) {
ServerFactory* factory;
TF_RETURN_IF_ERROR(ServerFactory::GetFactory(server_def, &factory));
return factory->NewServer(server_def, ServerFactory::Options(), out_server);
}
Status NewServerWithOptions(const ServerDef& server_def,
const ServerFactory::Options& options,
std::unique_ptr<ServerInterface>* out_server) {
ServerFactory* factory;
TF_RETURN_IF_ERROR(ServerFactory::GetFactory(server_def, &factory));
return factory->NewServer(server_def, options, out_server);
}
} | #include "tensorflow/core/distributed_runtime/server_lib.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
class TestServerFactory : public ServerFactory {
public:
bool AcceptsOptions(const ServerDef& server_def) override {
return server_def.protocol() == "test_protocol";
}
Status NewServer(const ServerDef& server_def, const Options& options,
std::unique_ptr<ServerInterface>* out_server) override {
return absl::OkStatus();
}
};
TEST(ServerLibTest, NewServerFactoryAccepts) {
ServerFactory::Register("TEST_SERVER", new TestServerFactory());
ServerDef server_def;
server_def.set_protocol("test_protocol");
std::unique_ptr<ServerInterface> server;
TF_EXPECT_OK(NewServer(server_def, &server));
}
TEST(ServerLibTest, NewServerNoFactoriesAccept) {
ServerDef server_def;
server_def.set_protocol("fake_protocol");
std::unique_ptr<ServerInterface> server;
Status s = NewServer(server_def, &server);
ASSERT_NE(s, absl::OkStatus());
EXPECT_TRUE(absl::StrContains(
s.message(), "No server factory registered for the given ServerDef"));
EXPECT_TRUE(
absl::StrContains(s.message(), "The available server factories are: ["));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/server_lib.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/server_lib_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c6bacac8-24d4-4a31-850e-7da0d49e71ea | cpp | abseil/abseil-cpp | cordz_handle | absl/strings/internal/cordz_handle.cc | absl/strings/internal/cordz_handle_test.cc | #include "absl/strings/internal/cordz_handle.h"
#include <atomic>
#include "absl/base/internal/raw_logging.h"
#include "absl/base/no_destructor.h"
#include "absl/synchronization/mutex.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
namespace {
struct Queue {
Queue() = default;
absl::Mutex mutex;
std::atomic<CordzHandle*> dq_tail ABSL_GUARDED_BY(mutex){nullptr};
bool IsEmpty() const ABSL_NO_THREAD_SAFETY_ANALYSIS {
return dq_tail.load(std::memory_order_acquire) == nullptr;
}
};
static Queue& GlobalQueue() {
static absl::NoDestructor<Queue> global_queue;
return *global_queue;
}
}
CordzHandle::CordzHandle(bool is_snapshot) : is_snapshot_(is_snapshot) {
Queue& global_queue = GlobalQueue();
if (is_snapshot) {
MutexLock lock(&global_queue.mutex);
CordzHandle* dq_tail = global_queue.dq_tail.load(std::memory_order_acquire);
if (dq_tail != nullptr) {
dq_prev_ = dq_tail;
dq_tail->dq_next_ = this;
}
global_queue.dq_tail.store(this, std::memory_order_release);
}
}
CordzHandle::~CordzHandle() {
Queue& global_queue = GlobalQueue();
if (is_snapshot_) {
std::vector<CordzHandle*> to_delete;
{
MutexLock lock(&global_queue.mutex);
CordzHandle* next = dq_next_;
if (dq_prev_ == nullptr) {
while (next && !next->is_snapshot_) {
to_delete.push_back(next);
next = next->dq_next_;
}
} else {
dq_prev_->dq_next_ = next;
}
if (next) {
next->dq_prev_ = dq_prev_;
} else {
global_queue.dq_tail.store(dq_prev_, std::memory_order_release);
}
}
for (CordzHandle* handle : to_delete) {
delete handle;
}
}
}
bool CordzHandle::SafeToDelete() const {
return is_snapshot_ || GlobalQueue().IsEmpty();
}
void CordzHandle::Delete(CordzHandle* handle) {
assert(handle);
if (handle) {
Queue& queue = GlobalQueue();
if (!handle->SafeToDelete()) {
MutexLock lock(&queue.mutex);
CordzHandle* dq_tail = queue.dq_tail.load(std::memory_order_acquire);
if (dq_tail != nullptr) {
handle->dq_prev_ = dq_tail;
dq_tail->dq_next_ = handle;
queue.dq_tail.store(handle, std::memory_order_release);
return;
}
}
delete handle;
}
}
std::vector<const CordzHandle*> CordzHandle::DiagnosticsGetDeleteQueue() {
std::vector<const CordzHandle*> handles;
Queue& global_queue = GlobalQueue();
MutexLock lock(&global_queue.mutex);
CordzHandle* dq_tail = global_queue.dq_tail.load(std::memory_order_acquire);
for (const CordzHandle* p = dq_tail; p; p = p->dq_prev_) {
handles.push_back(p);
}
return handles;
}
bool CordzHandle::DiagnosticsHandleIsSafeToInspect(
const CordzHandle* handle) const {
if (!is_snapshot_) return false;
if (handle == nullptr) return true;
if (handle->is_snapshot_) return false;
bool snapshot_found = false;
Queue& global_queue = GlobalQueue();
MutexLock lock(&global_queue.mutex);
for (const CordzHandle* p = global_queue.dq_tail; p; p = p->dq_prev_) {
if (p == handle) return !snapshot_found;
if (p == this) snapshot_found = true;
}
ABSL_ASSERT(snapshot_found);
return true;
}
std::vector<const CordzHandle*>
CordzHandle::DiagnosticsGetSafeToInspectDeletedHandles() {
std::vector<const CordzHandle*> handles;
if (!is_snapshot()) {
return handles;
}
Queue& global_queue = GlobalQueue();
MutexLock lock(&global_queue.mutex);
for (const CordzHandle* p = dq_next_; p != nullptr; p = p->dq_next_) {
if (!p->is_snapshot()) {
handles.push_back(p);
}
}
return handles;
}
}
ABSL_NAMESPACE_END
} | #include "absl/strings/internal/cordz_handle.h"
#include <random>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/memory/memory.h"
#include "absl/synchronization/internal/thread_pool.h"
#include "absl/synchronization/notification.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
namespace {
using ::testing::ElementsAre;
using ::testing::Gt;
using ::testing::IsEmpty;
using ::testing::SizeIs;
std::vector<const CordzHandle*> DeleteQueue() {
return CordzHandle::DiagnosticsGetDeleteQueue();
}
struct CordzHandleDeleteTracker : public CordzHandle {
bool* deleted;
explicit CordzHandleDeleteTracker(bool* deleted) : deleted(deleted) {}
~CordzHandleDeleteTracker() override { *deleted = true; }
};
TEST(CordzHandleTest, DeleteQueueIsEmpty) {
EXPECT_THAT(DeleteQueue(), SizeIs(0));
}
TEST(CordzHandleTest, CordzHandleCreateDelete) {
bool deleted = false;
auto* handle = new CordzHandleDeleteTracker(&deleted);
EXPECT_FALSE(handle->is_snapshot());
EXPECT_TRUE(handle->SafeToDelete());
EXPECT_THAT(DeleteQueue(), SizeIs(0));
CordzHandle::Delete(handle);
EXPECT_THAT(DeleteQueue(), SizeIs(0));
EXPECT_TRUE(deleted);
}
TEST(CordzHandleTest, CordzSnapshotCreateDelete) {
auto* snapshot = new CordzSnapshot();
EXPECT_TRUE(snapshot->is_snapshot());
EXPECT_TRUE(snapshot->SafeToDelete());
EXPECT_THAT(DeleteQueue(), ElementsAre(snapshot));
delete snapshot;
EXPECT_THAT(DeleteQueue(), SizeIs(0));
}
TEST(CordzHandleTest, CordzHandleCreateDeleteWithSnapshot) {
bool deleted = false;
auto* snapshot = new CordzSnapshot();
auto* handle = new CordzHandleDeleteTracker(&deleted);
EXPECT_FALSE(handle->SafeToDelete());
CordzHandle::Delete(handle);
EXPECT_THAT(DeleteQueue(), ElementsAre(handle, snapshot));
EXPECT_FALSE(deleted);
EXPECT_FALSE(handle->SafeToDelete());
delete snapshot;
EXPECT_THAT(DeleteQueue(), SizeIs(0));
EXPECT_TRUE(deleted);
}
TEST(CordzHandleTest, MultiSnapshot) {
bool deleted[3] = {false, false, false};
CordzSnapshot* snapshot[3];
CordzHandleDeleteTracker* handle[3];
for (int i = 0; i < 3; ++i) {
snapshot[i] = new CordzSnapshot();
handle[i] = new CordzHandleDeleteTracker(&deleted[i]);
CordzHandle::Delete(handle[i]);
}
EXPECT_THAT(DeleteQueue(), ElementsAre(handle[2], snapshot[2], handle[1],
snapshot[1], handle[0], snapshot[0]));
EXPECT_THAT(deleted, ElementsAre(false, false, false));
delete snapshot[1];
EXPECT_THAT(DeleteQueue(), ElementsAre(handle[2], snapshot[2], handle[1],
handle[0], snapshot[0]));
EXPECT_THAT(deleted, ElementsAre(false, false, false));
delete snapshot[0];
EXPECT_THAT(DeleteQueue(), ElementsAre(handle[2], snapshot[2]));
EXPECT_THAT(deleted, ElementsAre(true, true, false));
delete snapshot[2];
EXPECT_THAT(DeleteQueue(), SizeIs(0));
EXPECT_THAT(deleted, ElementsAre(true, true, deleted));
}
TEST(CordzHandleTest, DiagnosticsHandleIsSafeToInspect) {
CordzSnapshot snapshot1;
EXPECT_TRUE(snapshot1.DiagnosticsHandleIsSafeToInspect(nullptr));
auto* handle1 = new CordzHandle();
EXPECT_TRUE(snapshot1.DiagnosticsHandleIsSafeToInspect(handle1));
CordzHandle::Delete(handle1);
EXPECT_TRUE(snapshot1.DiagnosticsHandleIsSafeToInspect(handle1));
CordzSnapshot snapshot2;
auto* handle2 = new CordzHandle();
EXPECT_TRUE(snapshot1.DiagnosticsHandleIsSafeToInspect(handle1));
EXPECT_TRUE(snapshot1.DiagnosticsHandleIsSafeToInspect(handle2));
EXPECT_FALSE(snapshot2.DiagnosticsHandleIsSafeToInspect(handle1));
EXPECT_TRUE(snapshot2.DiagnosticsHandleIsSafeToInspect(handle2));
CordzHandle::Delete(handle2);
EXPECT_TRUE(snapshot1.DiagnosticsHandleIsSafeToInspect(handle1));
}
TEST(CordzHandleTest, DiagnosticsGetSafeToInspectDeletedHandles) {
EXPECT_THAT(DeleteQueue(), IsEmpty());
auto* handle = new CordzHandle();
auto* snapshot1 = new CordzSnapshot();
EXPECT_THAT(DeleteQueue(), ElementsAre(snapshot1));
EXPECT_TRUE(snapshot1->DiagnosticsHandleIsSafeToInspect(handle));
EXPECT_THAT(snapshot1->DiagnosticsGetSafeToInspectDeletedHandles(),
IsEmpty());
CordzHandle::Delete(handle);
auto* snapshot2 = new CordzSnapshot();
EXPECT_THAT(DeleteQueue(), ElementsAre(snapshot2, handle, snapshot1));
EXPECT_TRUE(snapshot1->DiagnosticsHandleIsSafeToInspect(handle));
EXPECT_FALSE(snapshot2->DiagnosticsHandleIsSafeToInspect(handle));
EXPECT_THAT(snapshot1->DiagnosticsGetSafeToInspectDeletedHandles(),
ElementsAre(handle));
EXPECT_THAT(snapshot2->DiagnosticsGetSafeToInspectDeletedHandles(),
IsEmpty());
CordzHandle::Delete(snapshot1);
EXPECT_THAT(DeleteQueue(), ElementsAre(snapshot2));
CordzHandle::Delete(snapshot2);
EXPECT_THAT(DeleteQueue(), IsEmpty());
}
TEST(CordzHandleTest, MultiThreaded) {
Notification stop;
static constexpr int kNumThreads = 4;
static constexpr int kNumHandles = 10;
std::vector<std::atomic<CordzHandle*>> handles(kNumHandles);
std::atomic<bool> found_safe_to_inspect(false);
{
absl::synchronization_internal::ThreadPool pool(kNumThreads);
for (int i = 0; i < kNumThreads; ++i) {
pool.Schedule([&stop, &handles, &found_safe_to_inspect]() {
std::minstd_rand gen;
std::uniform_int_distribution<int> dist_type(0, 2);
std::uniform_int_distribution<int> dist_handle(0, kNumHandles - 1);
while (!stop.HasBeenNotified()) {
CordzHandle* handle;
switch (dist_type(gen)) {
case 0:
handle = new CordzHandle();
break;
case 1:
handle = new CordzSnapshot();
break;
default:
handle = nullptr;
break;
}
CordzHandle* old_handle = handles[dist_handle(gen)].exchange(handle);
if (old_handle != nullptr) {
std::vector<const CordzHandle*> safe_to_inspect =
old_handle->DiagnosticsGetSafeToInspectDeletedHandles();
for (const CordzHandle* handle : safe_to_inspect) {
ASSERT_FALSE(handle->is_snapshot());
}
if (!safe_to_inspect.empty()) {
found_safe_to_inspect.store(true);
}
CordzHandle::Delete(old_handle);
}
}
for (auto& h : handles) {
if (CordzHandle* handle = h.exchange(nullptr)) {
CordzHandle::Delete(handle);
}
}
});
}
absl::SleepFor(absl::Seconds(3));
stop.Notify();
}
EXPECT_TRUE(found_safe_to_inspect.load());
}
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/cordz_handle.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/cordz_handle_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
6019610a-5519-4d8e-9c8b-b44273146c5f | cpp | tensorflow/tensorflow | transpose_dimension_grouper | third_party/xla/xla/service/gpu/transforms/transpose_dimension_grouper.cc | third_party/xla/xla/service/gpu/transforms/transpose_dimension_grouper_test.cc | #include "xla/service/gpu/transforms/transpose_dimension_grouper.h"
#include <cstddef>
#include <cstdint>
#include <functional>
#include <numeric>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/layout_util.h"
#include "xla/permutation_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
absl::InlinedVector<size_t, 3> ConsecutiveSegments(
absl::Span<const int64_t> xs) {
absl::InlinedVector<size_t, 3> is = {0};
for (size_t i = 1; i < xs.size(); ++i) {
if (1 != xs[i] - xs[i - 1]) {
is.push_back(i);
}
}
return is;
}
Shape MergeDimensions(absl::Span<const size_t> segs, const Shape &shape) {
std::vector<int64_t> dimensions;
const auto size = segs.size();
dimensions.reserve(size);
for (size_t i = 1; i <= size; ++i) {
dimensions.push_back(std::accumulate(
shape.dimensions().begin() + segs[i - 1],
shape.dimensions().begin() +
(segs.size() == i ? shape.dimensions().size() : segs[i]),
int64_t{1}, std::multiplies<int64_t>()));
}
return ShapeUtil::MakeShapeWithDescendingLayout(shape.element_type(),
dimensions);
}
absl::InlinedVector<int64_t, 3> GetNormalizedTransposeShapeHelper(
const Shape &output_shape, absl::Span<int64_t const> output_to_input,
absl::InlinedVector<int64_t, 3> &permutation) {
absl::InlinedVector<size_t, 3> segments =
ConsecutiveSegments(output_to_input);
Shape normalized_shape = MergeDimensions(segments, output_shape);
absl::InlinedVector<int64_t, 3> normalized_dims(
normalized_shape.dimensions().begin(),
normalized_shape.dimensions().end());
if (segments.size() == 1) {
return normalized_dims;
}
std::vector<int64_t> segment_to_normalized_dim(output_shape.rank(), -1);
for (size_t segment : segments) {
segment_to_normalized_dim[output_to_input[segment]] = 0;
}
int64_t normalized_dim = 0;
for (int64_t i = 0; i < segment_to_normalized_dim.size(); ++i) {
if (segment_to_normalized_dim[i] >= 0) {
segment_to_normalized_dim[i] = normalized_dim++;
}
}
permutation.reserve(segments.size());
for (int64_t i = 0; i < segments.size(); ++i) {
permutation.push_back(
segment_to_normalized_dim[output_to_input[segments[i]]]);
}
return normalized_dims;
}
absl::InlinedVector<int64_t, 3> GetNormalizedLogicalTransposeShape(
const Shape &output_shape, absl::Span<int64_t const> dimensions,
absl::InlinedVector<int64_t, 3> &permutation) {
permutation.clear();
absl::InlinedVector<int64_t, 3> delta(output_shape.rank() + 1, 0);
auto input_dimensions = ComposePermutations(output_shape.dimensions(),
InversePermutation(dimensions));
for (int i = 0; i < output_shape.rank(); ++i) {
delta[i + 1] = delta[i];
if (input_dimensions[i] == static_cast<int64_t>(1)) {
++delta[i + 1];
}
}
absl::InlinedVector<int64_t, 3> new_dimensions;
for (int i = 0; i < dimensions.size(); i++) {
if (output_shape.dimensions(i) != 1) {
new_dimensions.push_back(dimensions[i] - delta[dimensions[i]]);
}
}
return GetNormalizedTransposeShapeHelper(
ShapeUtil::DropDegenerateDimensions(output_shape), new_dimensions,
permutation);
}
class TransposeDimensionGroupVisitor : public DfsHloRewriteVisitor {
public:
absl::Status HandleTranspose(HloInstruction *transpose) override {
VLOG(4) << "Input: " << transpose->ToString();
if (!LayoutUtil::IsMonotonicWithDim0Major(transpose->shape().layout()) ||
!LayoutUtil::IsMonotonicWithDim0Major(
transpose->operand(0)->shape().layout())) {
return FailedPrecondition(
"Layout normalization should have assigned the default layout to "
"transpose and its operand");
}
absl::InlinedVector<int64_t, 3> permutation;
auto normalized_dims = GetNormalizedLogicalTransposeShape(
transpose->shape(), transpose->dimensions(), permutation);
if (normalized_dims.size() == 1 ||
normalized_dims == transpose->shape().dimensions()) {
return absl::OkStatus();
}
auto normalized_operand_dims =
ComposePermutations(normalized_dims, InversePermutation(permutation));
Shape grouped_operand_shape = ShapeUtil::MakeShapeWithDescendingLayout(
transpose->shape().element_type(), normalized_operand_dims);
auto new_operand = transpose->AddInstruction(HloInstruction::CreateBitcast(
grouped_operand_shape, transpose->mutable_operand(0)));
Shape grouped_shape = ShapeUtil::MakeShapeWithDescendingLayout(
transpose->shape().element_type(), normalized_dims);
auto new_transpose =
transpose->AddInstruction(HloInstruction::CreateTranspose(
grouped_shape, new_operand, permutation));
VLOG(5) << "Generated new transpose: " << new_transpose->ToString();
return ReplaceWithNewInstruction(
transpose,
HloInstruction::CreateBitcast(transpose->shape(), new_transpose));
}
};
}
absl::StatusOr<bool> TransposeDimensionGrouper::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
TF_ASSIGN_OR_RETURN(
bool changed,
TransposeDimensionGroupVisitor().RunOnModule(module, execution_threads));
return changed;
}
}
} | #include "xla/service/gpu/transforms/transpose_dimension_grouper.h"
#include <optional>
#include "absl/strings/string_view.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::HasSubstr;
using ::tsl::testing::StatusIs;
class TransposeDimensionGrouperTest : public HloTestBase {
public:
void CheckDimensionGrouper(absl::string_view hlo,
std::optional<absl::string_view> expected) {
RunAndFilecheckHloRewrite(hlo, TransposeDimensionGrouper{}, expected);
}
void CheckDimensionGrouperUnchanged(absl::string_view hlo) {
CheckDimensionGrouper(hlo, std::nullopt);
}
};
TEST_F(TransposeDimensionGrouperTest, NoTranspose) {
const char* hlo = R"(
HloModule NoTranspose
ENTRY main {
input = f32[64,128,1]{2,1,0} parameter(0)
ROOT out = f32[64,1,128]{2,1,0} transpose(input), dimensions={0,2,1}
}
)";
CheckDimensionGrouperUnchanged(hlo);
}
TEST_F(TransposeDimensionGrouperTest, NoTranspose2) {
const char* hlo = R"(
HloModule NoTranspose2
ENTRY main {
input = f32[32,128,64]{2,1,0} parameter(0)
ROOT out = f32[32,64,128]{0,1,2} transpose(input), dimensions={0,2,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
TransposeDimensionGrouper dimension_grouper;
EXPECT_THAT(dimension_grouper.Run(module.get()),
StatusIs(tsl::error::FAILED_PRECONDITION,
HasSubstr("Layout normalization")));
}
TEST_F(TransposeDimensionGrouperTest, NoTranspose3) {
const char* hlo = R"(
HloModule NoTranspose3
ENTRY main {
input = f32[32,128,64]{0,1,2} parameter(0)
ROOT out = f32[32,64,128]{2,1,0} transpose(input), dimensions={0,2,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
TransposeDimensionGrouper dimension_grouper;
EXPECT_THAT(dimension_grouper.Run(module.get()),
StatusIs(tsl::error::FAILED_PRECONDITION,
HasSubstr("Layout normalization")));
}
TEST_F(TransposeDimensionGrouperTest, Simple2D) {
const char* hlo = R"(
HloModule Simple2D
ENTRY main {
input = f32[128,64]{1,0} parameter(0)
ROOT out = f32[64,128]{1,0} transpose(input), dimensions={1,0}
}
)";
CheckDimensionGrouperUnchanged(hlo);
}
TEST_F(TransposeDimensionGrouperTest, Simple3D_021) {
const char* hlo = R"(
HloModule Simple3D_021
ENTRY main {
input = f32[8,32768,16]{2,1,0} parameter(0)
ROOT out = f32[8,16,32768]{2,1,0} transpose(input), dimensions={0,2,1}
}
)";
CheckDimensionGrouperUnchanged(hlo);
}
TEST_F(TransposeDimensionGrouperTest, Simple3D_210) {
const char* hlo = R"(
HloModule Simple3D_210
ENTRY main {
input = f32[8,32768,16]{2,1,0} parameter(0)
ROOT out = f32[16,32768,8]{2,1,0} transpose(input), dimensions={2,1,0}
}
)";
CheckDimensionGrouperUnchanged(hlo);
}
TEST_F(TransposeDimensionGrouperTest, Simple4D) {
const char* hlo = R"(
HloModule Simple4D
ENTRY main {
input = f32[32768,4,16,8]{3,2,1,0} parameter(0)
ROOT out = f32[16,32768,8,4]{3,2,1,0} transpose(input), dimensions={2,0,3,1}
}
)";
CheckDimensionGrouperUnchanged(hlo);
}
TEST_F(TransposeDimensionGrouperTest, NormalizeTo3D) {
const char* hlo = R"(
HloModule NormalizeTo3D
ENTRY main {
input = f32[8,32,32,32,16]{4,3,2,1,0} parameter(0)
ROOT out = f32[8,16,32,32,32]{4,3,2,1,0} transpose(input), dimensions={0,4,1,2,3}
}
)";
CheckDimensionGrouper(hlo,
R"(
)");
}
TEST_F(TransposeDimensionGrouperTest, LargeShapeSizeOverflow) {
const char* hlo = R"(
HloModule LargeShapeSizeOverflow
ENTRY main {
input = f32[4096,4096,128,16]{3,2,1,0} parameter(0)
ROOT out = f32[16,4096,4096,128]{3,2,1,0} transpose(input), dimensions={3,0,1,2}
}
)";
CheckDimensionGrouper(hlo,
R"(
)");
}
TEST_F(TransposeDimensionGrouperTest, DegenerateDims) {
const char* hlo = R"(
HloModule DegenerateDims
ENTRY main {
input = f32[1,32,1,3,1,64,1]{6,5,4,3,2,1,0} parameter(0)
ROOT out = f32[1,32,1,64,1,3,1]{6,5,4,3,2,1,0} transpose(input), dimensions={6,1,4,5,2,3,0}
}
)";
CheckDimensionGrouper(hlo,
R"(
)");
}
TEST_F(TransposeDimensionGrouperTest, TransposeWithGrouping) {
const char* hlo = R"(
HloModule TransposeWithGrouping
ENTRY main {
input = f32[100,1,10,32,2]{4,3,2,1,0} parameter(0)
ROOT out = f32[10,1,32,100,2]{4,3,2,1,0} transpose(input), dimensions={2,1,3,0,4}
}
)";
CheckDimensionGrouper(hlo,
R"(
)");
}
TEST_F(TransposeDimensionGrouperTest, NormalizeTo2D) {
const char* hlo = R"(
HloModule Normalize2DTo3D
ENTRY main {
input = f32[50,20,30]{2,1,0} parameter(0)
ROOT out = f32[20,30,50]{2,1,0} transpose(input), dimensions={1,2,0}
}
)";
CheckDimensionGrouper(hlo,
R"(
)");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/transpose_dimension_grouper.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/transpose_dimension_grouper_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b64966ca-5157-4ae3-9ed8-a270cc43f1ea | cpp | google/cel-cpp | json | common/json.cc | common/json_test.cc | #include "common/json.h"
#include <initializer_list>
#include <string>
#include <utility>
#include "absl/base/no_destructor.h"
#include "absl/functional/overload.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/variant.h"
#include "common/any.h"
#include "internal/copy_on_write.h"
#include "internal/proto_wire.h"
#include "internal/status_macros.h"
namespace cel {
internal::CopyOnWrite<typename JsonArray::Container> JsonArray::Empty() {
static const absl::NoDestructor<internal::CopyOnWrite<Container>> empty;
return *empty;
}
internal::CopyOnWrite<typename JsonObject::Container> JsonObject::Empty() {
static const absl::NoDestructor<internal::CopyOnWrite<Container>> empty;
return *empty;
}
Json JsonInt(int64_t value) {
if (value < kJsonMinInt || value > kJsonMaxInt) {
return JsonString(absl::StrCat(value));
}
return Json(static_cast<double>(value));
}
Json JsonUint(uint64_t value) {
if (value > kJsonMaxUint) {
return JsonString(absl::StrCat(value));
}
return Json(static_cast<double>(value));
}
Json JsonBytes(absl::string_view value) {
return JsonString(absl::Base64Escape(value));
}
Json JsonBytes(const absl::Cord& value) {
if (auto flat = value.TryFlat(); flat.has_value()) {
return JsonBytes(*flat);
}
return JsonBytes(absl::string_view(static_cast<std::string>(value)));
}
bool JsonArrayBuilder::empty() const { return impl_.get().empty(); }
JsonArray JsonArrayBuilder::Build() && { return JsonArray(std::move(impl_)); }
JsonArrayBuilder::JsonArrayBuilder(JsonArray array)
: impl_(std::move(array.impl_)) {}
JsonObjectBuilder::JsonObjectBuilder(JsonObject object)
: impl_(std::move(object.impl_)) {}
void JsonObjectBuilder::insert(std::initializer_list<value_type> il) {
impl_.mutable_get().insert(il);
}
JsonArrayBuilder::size_type JsonArrayBuilder::size() const {
return impl_.get().size();
}
JsonArrayBuilder::iterator JsonArrayBuilder::begin() {
return impl_.mutable_get().begin();
}
JsonArrayBuilder::const_iterator JsonArrayBuilder::begin() const {
return impl_.get().begin();
}
JsonArrayBuilder::iterator JsonArrayBuilder::end() {
return impl_.mutable_get().end();
}
JsonArrayBuilder::const_iterator JsonArrayBuilder::end() const {
return impl_.get().end();
}
JsonArrayBuilder::reverse_iterator JsonArrayBuilder::rbegin() {
return impl_.mutable_get().rbegin();
}
JsonArrayBuilder::reverse_iterator JsonArrayBuilder::rend() {
return impl_.mutable_get().rend();
}
JsonArrayBuilder::reference JsonArrayBuilder::at(size_type index) {
return impl_.mutable_get().at(index);
}
JsonArrayBuilder::reference JsonArrayBuilder::operator[](size_type index) {
return (impl_.mutable_get())[index];
}
void JsonArrayBuilder::reserve(size_type n) {
if (n != 0) {
impl_.mutable_get().reserve(n);
}
}
void JsonArrayBuilder::clear() { impl_.mutable_get().clear(); }
void JsonArrayBuilder::push_back(Json json) {
impl_.mutable_get().push_back(std::move(json));
}
void JsonArrayBuilder::pop_back() { impl_.mutable_get().pop_back(); }
JsonArrayBuilder::operator JsonArray() && { return std::move(*this).Build(); }
bool JsonArray::empty() const { return impl_.get().empty(); }
JsonArray::JsonArray(internal::CopyOnWrite<Container> impl)
: impl_(std::move(impl)) {
if (impl_.get().empty()) {
impl_ = Empty();
}
}
JsonArray::size_type JsonArray::size() const { return impl_.get().size(); }
JsonArray::const_iterator JsonArray::begin() const {
return impl_.get().begin();
}
JsonArray::const_iterator JsonArray::cbegin() const { return begin(); }
JsonArray::const_iterator JsonArray::end() const { return impl_.get().end(); }
JsonArray::const_iterator JsonArray::cend() const { return begin(); }
JsonArray::const_reverse_iterator JsonArray::rbegin() const {
return impl_.get().rbegin();
}
JsonArray::const_reverse_iterator JsonArray::crbegin() const {
return impl_.get().crbegin();
}
JsonArray::const_reverse_iterator JsonArray::rend() const {
return impl_.get().rend();
}
JsonArray::const_reverse_iterator JsonArray::crend() const {
return impl_.get().crend();
}
JsonArray::const_reference JsonArray::at(size_type index) const {
return impl_.get().at(index);
}
JsonArray::const_reference JsonArray::operator[](size_type index) const {
return (impl_.get())[index];
}
bool operator==(const JsonArray& lhs, const JsonArray& rhs) {
return lhs.impl_.get() == rhs.impl_.get();
}
bool operator!=(const JsonArray& lhs, const JsonArray& rhs) {
return lhs.impl_.get() != rhs.impl_.get();
}
JsonObjectBuilder::operator JsonObject() && { return std::move(*this).Build(); }
bool JsonObjectBuilder::empty() const { return impl_.get().empty(); }
JsonObjectBuilder::size_type JsonObjectBuilder::size() const {
return impl_.get().size();
}
JsonObjectBuilder::iterator JsonObjectBuilder::begin() {
return impl_.mutable_get().begin();
}
JsonObjectBuilder::const_iterator JsonObjectBuilder::begin() const {
return impl_.get().begin();
}
JsonObjectBuilder::iterator JsonObjectBuilder::end() {
return impl_.mutable_get().end();
}
JsonObjectBuilder::const_iterator JsonObjectBuilder::end() const {
return impl_.get().end();
}
void JsonObjectBuilder::clear() { impl_.mutable_get().clear(); }
JsonObject JsonObjectBuilder::Build() && {
return JsonObject(std::move(impl_));
}
void JsonObjectBuilder::erase(const_iterator pos) {
impl_.mutable_get().erase(std::move(pos));
}
void JsonObjectBuilder::reserve(size_type n) {
if (n != 0) {
impl_.mutable_get().reserve(n);
}
}
JsonObject MakeJsonObject(
std::initializer_list<std::pair<JsonString, Json>> il) {
JsonObjectBuilder builder;
builder.reserve(il.size());
for (const auto& entry : il) {
builder.insert(entry);
}
return std::move(builder).Build();
}
JsonObject::JsonObject(internal::CopyOnWrite<Container> impl)
: impl_(std::move(impl)) {
if (impl_.get().empty()) {
impl_ = Empty();
}
}
bool JsonObject::empty() const { return impl_.get().empty(); }
JsonObject::size_type JsonObject::size() const { return impl_.get().size(); }
JsonObject::const_iterator JsonObject::begin() const {
return impl_.get().begin();
}
JsonObject::const_iterator JsonObject::cbegin() const { return begin(); }
JsonObject::const_iterator JsonObject::end() const { return impl_.get().end(); }
JsonObject::const_iterator JsonObject::cend() const { return end(); }
bool operator==(const JsonObject& lhs, const JsonObject& rhs) {
return lhs.impl_.get() == rhs.impl_.get();
}
bool operator!=(const JsonObject& lhs, const JsonObject& rhs) {
return lhs.impl_.get() != rhs.impl_.get();
}
namespace {
using internal::ProtoWireEncoder;
using internal::ProtoWireTag;
using internal::ProtoWireType;
inline constexpr absl::string_view kJsonTypeName = "google.protobuf.Value";
inline constexpr absl::string_view kJsonArrayTypeName =
"google.protobuf.ListValue";
inline constexpr absl::string_view kJsonObjectTypeName =
"google.protobuf.Struct";
inline constexpr ProtoWireTag kValueNullValueFieldTag =
ProtoWireTag(1, ProtoWireType::kVarint);
inline constexpr ProtoWireTag kValueBoolValueFieldTag =
ProtoWireTag(4, ProtoWireType::kVarint);
inline constexpr ProtoWireTag kValueNumberValueFieldTag =
ProtoWireTag(2, ProtoWireType::kFixed64);
inline constexpr ProtoWireTag kValueStringValueFieldTag =
ProtoWireTag(3, ProtoWireType::kLengthDelimited);
inline constexpr ProtoWireTag kValueListValueFieldTag =
ProtoWireTag(6, ProtoWireType::kLengthDelimited);
inline constexpr ProtoWireTag kValueStructValueFieldTag =
ProtoWireTag(5, ProtoWireType::kLengthDelimited);
inline constexpr ProtoWireTag kListValueValuesFieldTag =
ProtoWireTag(1, ProtoWireType::kLengthDelimited);
inline constexpr ProtoWireTag kStructFieldsEntryKeyFieldTag =
ProtoWireTag(1, ProtoWireType::kLengthDelimited);
inline constexpr ProtoWireTag kStructFieldsEntryValueFieldTag =
ProtoWireTag(2, ProtoWireType::kLengthDelimited);
absl::StatusOr<absl::Cord> JsonObjectEntryToAnyValue(const absl::Cord& key,
const Json& value) {
absl::Cord data;
ProtoWireEncoder encoder("google.protobuf.Struct.FieldsEntry", data);
absl::Cord subdata;
CEL_RETURN_IF_ERROR(JsonToAnyValue(value, subdata));
CEL_RETURN_IF_ERROR(encoder.WriteTag(kStructFieldsEntryKeyFieldTag));
CEL_RETURN_IF_ERROR(encoder.WriteLengthDelimited(std::move(key)));
CEL_RETURN_IF_ERROR(encoder.WriteTag(kStructFieldsEntryValueFieldTag));
CEL_RETURN_IF_ERROR(encoder.WriteLengthDelimited(std::move(subdata)));
encoder.EnsureFullyEncoded();
return data;
}
inline constexpr ProtoWireTag kStructFieldsFieldTag =
ProtoWireTag(1, ProtoWireType::kLengthDelimited);
}
absl::Status JsonToAnyValue(const Json& json, absl::Cord& data) {
ProtoWireEncoder encoder(kJsonTypeName, data);
absl::Status status = absl::visit(
absl::Overload(
[&encoder](JsonNull) -> absl::Status {
CEL_RETURN_IF_ERROR(encoder.WriteTag(kValueNullValueFieldTag));
return encoder.WriteVarint(0);
},
[&encoder](JsonBool value) -> absl::Status {
CEL_RETURN_IF_ERROR(encoder.WriteTag(kValueBoolValueFieldTag));
return encoder.WriteVarint(value);
},
[&encoder](JsonNumber value) -> absl::Status {
CEL_RETURN_IF_ERROR(encoder.WriteTag(kValueNumberValueFieldTag));
return encoder.WriteFixed64(value);
},
[&encoder](const JsonString& value) -> absl::Status {
CEL_RETURN_IF_ERROR(encoder.WriteTag(kValueStringValueFieldTag));
return encoder.WriteLengthDelimited(value);
},
[&encoder](const JsonArray& value) -> absl::Status {
absl::Cord subdata;
CEL_RETURN_IF_ERROR(JsonArrayToAnyValue(value, subdata));
CEL_RETURN_IF_ERROR(encoder.WriteTag(kValueListValueFieldTag));
return encoder.WriteLengthDelimited(std::move(subdata));
},
[&encoder](const JsonObject& value) -> absl::Status {
absl::Cord subdata;
CEL_RETURN_IF_ERROR(JsonObjectToAnyValue(value, subdata));
CEL_RETURN_IF_ERROR(encoder.WriteTag(kValueStructValueFieldTag));
return encoder.WriteLengthDelimited(std::move(subdata));
}),
json);
CEL_RETURN_IF_ERROR(status);
encoder.EnsureFullyEncoded();
return absl::OkStatus();
}
absl::Status JsonArrayToAnyValue(const JsonArray& json, absl::Cord& data) {
ProtoWireEncoder encoder(kJsonArrayTypeName, data);
for (const auto& element : json) {
absl::Cord subdata;
CEL_RETURN_IF_ERROR(JsonToAnyValue(element, subdata));
CEL_RETURN_IF_ERROR(encoder.WriteTag(kListValueValuesFieldTag));
CEL_RETURN_IF_ERROR(encoder.WriteLengthDelimited(std::move(subdata)));
}
encoder.EnsureFullyEncoded();
return absl::OkStatus();
}
absl::Status JsonObjectToAnyValue(const JsonObject& json, absl::Cord& data) {
ProtoWireEncoder encoder(kJsonObjectTypeName, data);
for (const auto& entry : json) {
CEL_ASSIGN_OR_RETURN(auto subdata,
JsonObjectEntryToAnyValue(entry.first, entry.second));
CEL_RETURN_IF_ERROR(encoder.WriteTag(kStructFieldsFieldTag));
CEL_RETURN_IF_ERROR(encoder.WriteLengthDelimited(std::move(subdata)));
}
encoder.EnsureFullyEncoded();
return absl::OkStatus();
}
absl::StatusOr<google::protobuf::Any> JsonToAny(const Json& json) {
absl::Cord data;
CEL_RETURN_IF_ERROR(JsonToAnyValue(json, data));
return MakeAny(MakeTypeUrl(kJsonTypeName), std::move(data));
}
absl::StatusOr<google::protobuf::Any> JsonArrayToAny(const JsonArray& json) {
absl::Cord data;
CEL_RETURN_IF_ERROR(JsonArrayToAnyValue(json, data));
return MakeAny(MakeTypeUrl(kJsonArrayTypeName), std::move(data));
}
absl::StatusOr<google::protobuf::Any> JsonObjectToAny(const JsonObject& json) {
absl::Cord data;
CEL_RETURN_IF_ERROR(JsonObjectToAnyValue(json, data));
return MakeAny(MakeTypeUrl(kJsonObjectTypeName), std::move(data));
}
} | #include "common/json.h"
#include "absl/hash/hash_testing.h"
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "internal/testing.h"
namespace cel::internal {
namespace {
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::IsFalse;
using ::testing::IsTrue;
using ::testing::UnorderedElementsAre;
using ::testing::VariantWith;
TEST(Json, DefaultConstructor) {
EXPECT_THAT(Json(), VariantWith<JsonNull>(Eq(kJsonNull)));
}
TEST(Json, NullConstructor) {
EXPECT_THAT(Json(kJsonNull), VariantWith<JsonNull>(Eq(kJsonNull)));
}
TEST(Json, FalseConstructor) {
EXPECT_THAT(Json(false), VariantWith<JsonBool>(IsFalse()));
}
TEST(Json, TrueConstructor) {
EXPECT_THAT(Json(true), VariantWith<JsonBool>(IsTrue()));
}
TEST(Json, NumberConstructor) {
EXPECT_THAT(Json(1.0), VariantWith<JsonNumber>(1));
}
TEST(Json, StringConstructor) {
EXPECT_THAT(Json(JsonString("foo")), VariantWith<JsonString>(Eq("foo")));
}
TEST(Json, ArrayConstructor) {
EXPECT_THAT(Json(JsonArray()), VariantWith<JsonArray>(Eq(JsonArray())));
}
TEST(Json, ObjectConstructor) {
EXPECT_THAT(Json(JsonObject()), VariantWith<JsonObject>(Eq(JsonObject())));
}
TEST(Json, ImplementsAbslHashCorrectly) {
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
{Json(), Json(true), Json(1.0), Json(JsonString("foo")),
Json(JsonArray()), Json(JsonObject())}));
}
TEST(JsonArrayBuilder, DefaultConstructor) {
JsonArrayBuilder builder;
EXPECT_TRUE(builder.empty());
EXPECT_EQ(builder.size(), 0);
}
TEST(JsonArrayBuilder, OneOfEach) {
JsonArrayBuilder builder;
builder.reserve(6);
builder.push_back(kJsonNull);
builder.push_back(true);
builder.push_back(1.0);
builder.push_back(JsonString("foo"));
builder.push_back(JsonArray());
builder.push_back(JsonObject());
EXPECT_FALSE(builder.empty());
EXPECT_EQ(builder.size(), 6);
EXPECT_THAT(builder, ElementsAre(kJsonNull, true, 1.0, JsonString("foo"),
JsonArray(), JsonObject()));
builder.pop_back();
EXPECT_FALSE(builder.empty());
EXPECT_EQ(builder.size(), 5);
EXPECT_THAT(builder, ElementsAre(kJsonNull, true, 1.0, JsonString("foo"),
JsonArray()));
builder.clear();
EXPECT_TRUE(builder.empty());
EXPECT_EQ(builder.size(), 0);
}
TEST(JsonObjectBuilder, DefaultConstructor) {
JsonObjectBuilder builder;
EXPECT_TRUE(builder.empty());
EXPECT_EQ(builder.size(), 0);
}
TEST(JsonObjectBuilder, OneOfEach) {
JsonObjectBuilder builder;
builder.reserve(6);
builder.insert_or_assign(JsonString("foo"), kJsonNull);
builder.insert_or_assign(JsonString("bar"), true);
builder.insert_or_assign(JsonString("baz"), 1.0);
builder.insert_or_assign(JsonString("qux"), JsonString("foo"));
builder.insert_or_assign(JsonString("quux"), JsonArray());
builder.insert_or_assign(JsonString("corge"), JsonObject());
EXPECT_FALSE(builder.empty());
EXPECT_EQ(builder.size(), 6);
EXPECT_THAT(builder, UnorderedElementsAre(
std::make_pair(JsonString("foo"), kJsonNull),
std::make_pair(JsonString("bar"), true),
std::make_pair(JsonString("baz"), 1.0),
std::make_pair(JsonString("qux"), JsonString("foo")),
std::make_pair(JsonString("quux"), JsonArray()),
std::make_pair(JsonString("corge"), JsonObject())));
builder.erase(JsonString("corge"));
EXPECT_FALSE(builder.empty());
EXPECT_EQ(builder.size(), 5);
EXPECT_THAT(builder, UnorderedElementsAre(
std::make_pair(JsonString("foo"), kJsonNull),
std::make_pair(JsonString("bar"), true),
std::make_pair(JsonString("baz"), 1.0),
std::make_pair(JsonString("qux"), JsonString("foo")),
std::make_pair(JsonString("quux"), JsonArray())));
builder.clear();
EXPECT_TRUE(builder.empty());
EXPECT_EQ(builder.size(), 0);
}
TEST(JsonInt, Basic) {
EXPECT_THAT(JsonInt(1), VariantWith<JsonNumber>(1.0));
EXPECT_THAT(JsonInt(std::numeric_limits<int64_t>::max()),
VariantWith<JsonString>(
Eq(absl::StrCat(std::numeric_limits<int64_t>::max()))));
}
TEST(JsonUint, Basic) {
EXPECT_THAT(JsonUint(1), VariantWith<JsonNumber>(1.0));
EXPECT_THAT(JsonUint(std::numeric_limits<uint64_t>::max()),
VariantWith<JsonString>(
Eq(absl::StrCat(std::numeric_limits<uint64_t>::max()))));
}
TEST(JsonBytes, Basic) {
EXPECT_THAT(JsonBytes("foo"),
VariantWith<JsonString>(Eq(absl::Base64Escape("foo"))));
EXPECT_THAT(JsonBytes(absl::Cord("foo")),
VariantWith<JsonString>(Eq(absl::Base64Escape("foo"))));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/json.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/json_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
1986c596-9933-436d-a46c-46859567f49a | cpp | tensorflow/tensorflow | cycle_detector | third_party/xla/xla/mlir_hlo/utils/cycle_detector.cc | third_party/xla/xla/mlir_hlo/utils/cycle_detector_test.cc | #include "utils/cycle_detector.h"
#include <algorithm>
#include <optional>
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallVector.h"
namespace mlir {
namespace {
using NodeSet = llvm::DenseSet<int32_t>;
using OrderedNodeSet = OrderedSet<int32_t>;
template <typename T>
struct VecStruct {
using type = llvm::SmallVector<T, 4>;
};
template <typename T>
using Vec = typename VecStruct<T>::type;
struct Node {
int32_t rank;
bool visited;
void* data;
OrderedNodeSet in;
OrderedNodeSet out;
};
}
struct GraphCycles::Rep {
Vec<Node*> nodes;
Vec<int32_t> freeNodes;
Vec<int32_t> deltaf;
Vec<int32_t> deltab;
Vec<int32_t> list;
Vec<int32_t> merged;
Vec<int32_t> stack;
};
GraphCycles::GraphCycles(int32_t numNodes) : rep_(new Rep) {
rep_->nodes.reserve(numNodes);
for (int32_t i = 0; i < numNodes; ++i) {
Node* n = new Node;
n->visited = false;
n->data = nullptr;
n->rank = rep_->nodes.size();
rep_->nodes.push_back(n);
}
}
GraphCycles::~GraphCycles() {
for (Vec<Node*>::size_type i = 0, e = rep_->nodes.size(); i < e; ++i) {
delete rep_->nodes[i];
}
delete rep_;
}
bool GraphCycles::HasEdge(int32_t x, int32_t y) const {
return rep_->nodes[x]->out.Contains(y);
}
void GraphCycles::RemoveEdge(int32_t x, int32_t y) {
rep_->nodes[x]->out.Erase(y);
rep_->nodes[y]->in.Erase(x);
}
static bool forwardDfs(GraphCycles::Rep* r, int32_t n, int32_t upperBound);
static void backwardDfs(GraphCycles::Rep* r, int32_t n, int32_t lowerBound);
static void reorder(GraphCycles::Rep* r);
static void sort(const Vec<Node*>&, Vec<int32_t>* delta);
static void moveToList(GraphCycles::Rep* r, Vec<int32_t>* src,
Vec<int32_t>* dst);
static void clearVisitedBits(GraphCycles::Rep* r, const Vec<int32_t>& nodes);
bool GraphCycles::InsertEdge(int32_t x, int32_t y) {
if (x == y) return false;
Rep* r = rep_;
Node* nx = r->nodes[x];
if (!nx->out.Insert(y)) {
return true;
}
Node* ny = r->nodes[y];
ny->in.Insert(x);
if (nx->rank <= ny->rank) {
return true;
}
if (forwardDfs(r, y, nx->rank)) {
nx->out.Erase(y);
ny->in.Erase(x);
clearVisitedBits(r, r->deltaf);
return false;
}
backwardDfs(r, x, ny->rank);
reorder(r);
return true;
}
static bool forwardDfs(GraphCycles::Rep* r, int32_t n, int32_t upperBound) {
r->deltaf.clear();
r->stack.clear();
r->stack.push_back(n);
while (!r->stack.empty()) {
n = r->stack.back();
r->stack.pop_back();
Node* nn = r->nodes[n];
if (nn->visited) continue;
nn->visited = true;
r->deltaf.push_back(n);
for (auto w : nn->out.GetSequence()) {
Node* nw = r->nodes[w];
if (nw->rank == upperBound) {
return true;
}
if (!nw->visited && nw->rank < upperBound) {
r->stack.push_back(w);
}
}
}
return false;
}
static void backwardDfs(GraphCycles::Rep* r, int32_t n, int32_t lowerBound) {
r->deltab.clear();
r->stack.clear();
r->stack.push_back(n);
while (!r->stack.empty()) {
n = r->stack.back();
r->stack.pop_back();
Node* nn = r->nodes[n];
if (nn->visited) continue;
nn->visited = true;
r->deltab.push_back(n);
for (auto w : nn->in.GetSequence()) {
Node* nw = r->nodes[w];
if (!nw->visited && lowerBound < nw->rank) {
r->stack.push_back(w);
}
}
}
}
static void reorder(GraphCycles::Rep* r) {
sort(r->nodes, &r->deltab);
sort(r->nodes, &r->deltaf);
r->list.clear();
moveToList(r, &r->deltab, &r->list);
moveToList(r, &r->deltaf, &r->list);
r->merged.resize(r->deltab.size() + r->deltaf.size());
std::merge(r->deltab.begin(), r->deltab.end(), r->deltaf.begin(),
r->deltaf.end(), r->merged.begin());
for (Vec<int32_t>::size_type i = 0, e = r->list.size(); i < e; ++i) {
r->nodes[r->list[i]]->rank = r->merged[i];
}
}
static void sort(const Vec<Node*>& nodes, Vec<int32_t>* delta) {
struct ByRank {
const Vec<Node*>* nodes;
bool operator()(int32_t a, int32_t b) const {
return (*nodes)[a]->rank < (*nodes)[b]->rank;
}
};
ByRank cmp;
cmp.nodes = &nodes;
std::sort(delta->begin(), delta->end(), cmp);
}
static void moveToList(GraphCycles::Rep* r, Vec<int32_t>* src,
Vec<int32_t>* dst) {
for (Vec<int32_t>::size_type i = 0, e = src->size(); i < e; i++) {
int32_t w = (*src)[i];
(*src)[i] = r->nodes[w]->rank;
r->nodes[w]->visited = false;
dst->push_back(w);
}
}
static void clearVisitedBits(GraphCycles::Rep* r, const Vec<int32_t>& nodes) {
for (Vec<int32_t>::size_type i = 0, e = nodes.size(); i < e; i++) {
r->nodes[nodes[i]]->visited = false;
}
}
bool GraphCycles::IsReachable(int32_t x, int32_t y) {
if (x == y) return true;
Rep* r = rep_;
Node* nx = r->nodes[x];
Node* ny = r->nodes[y];
if (nx->rank >= ny->rank) {
return false;
}
bool reachable = forwardDfs(r, x, ny->rank);
clearVisitedBits(r, r->deltaf);
return reachable;
}
std::optional<int32_t> GraphCycles::ContractEdge(int32_t a, int32_t b) {
assert(HasEdge(a, b));
RemoveEdge(a, b);
if (IsReachable(a, b)) {
InsertEdge(a, b);
return {};
}
if (rep_->nodes[b]->in.Size() + rep_->nodes[b]->out.Size() >
rep_->nodes[a]->in.Size() + rep_->nodes[a]->out.Size()) {
std::swap(a, b);
}
Node* nb = rep_->nodes[b];
OrderedNodeSet out = std::move(nb->out);
OrderedNodeSet in = std::move(nb->in);
for (int32_t y : out.GetSequence()) {
rep_->nodes[y]->in.Erase(b);
}
for (int32_t y : in.GetSequence()) {
rep_->nodes[y]->out.Erase(b);
}
rep_->freeNodes.push_back(b);
rep_->nodes[a]->out.Reserve(rep_->nodes[a]->out.Size() + out.Size());
for (int32_t y : out.GetSequence()) {
InsertEdge(a, y);
}
rep_->nodes[a]->in.Reserve(rep_->nodes[a]->in.Size() + in.Size());
for (int32_t y : in.GetSequence()) {
InsertEdge(y, a);
}
return a;
}
std::vector<int32_t> GraphCycles::SuccessorsCopy(int32_t node) const {
return rep_->nodes[node]->out.GetSequence();
}
namespace {
void sortInPostOrder(const Vec<Node*>& nodes, std::vector<int32_t>* toSort) {
std::sort(toSort->begin(), toSort->end(), [&](int32_t a, int32_t b) {
return nodes[a]->rank > nodes[b]->rank;
});
}
}
std::vector<int32_t> GraphCycles::AllNodesInPostOrder() const {
llvm::DenseSet<int32_t> freeNodesSet;
for (int32_t n : rep_->freeNodes) freeNodesSet.insert(n);
std::vector<int32_t> allNodes;
allNodes.reserve(rep_->nodes.size() - freeNodesSet.size());
for (size_t i = 0, e = rep_->nodes.size(); i < e; i++) {
if (!freeNodesSet.count(i)) {
allNodes.push_back(i);
}
}
sortInPostOrder(rep_->nodes, &allNodes);
return allNodes;
}
} | #include "utils/cycle_detector.h"
#include "xla/test.h"
class GraphCyclesTest : public ::testing::Test {
public:
GraphCyclesTest() : g_(100) {}
bool AddEdge(int x, int y) { return g_.InsertEdge(x, y); }
void AddMultiples() {
for (int x = 1; x < 25; x++) {
EXPECT_TRUE(AddEdge(x, 2 * x)) << x;
EXPECT_TRUE(AddEdge(x, 3 * x)) << x;
}
}
mlir::GraphCycles g_;
};
TEST_F(GraphCyclesTest, NoCycle) { AddMultiples(); }
TEST_F(GraphCyclesTest, SimpleCycle) {
AddMultiples();
EXPECT_FALSE(AddEdge(8, 4));
}
TEST_F(GraphCyclesTest, IndirectCycle) {
AddMultiples();
EXPECT_TRUE(AddEdge(16, 9));
EXPECT_FALSE(AddEdge(9, 2));
}
TEST_F(GraphCyclesTest, RemoveEdge) {
EXPECT_TRUE(AddEdge(1, 2));
EXPECT_TRUE(AddEdge(2, 3));
EXPECT_TRUE(AddEdge(3, 4));
EXPECT_TRUE(AddEdge(4, 5));
g_.RemoveEdge(2, 3);
EXPECT_FALSE(g_.HasEdge(2, 3));
}
TEST_F(GraphCyclesTest, IsReachable) {
EXPECT_TRUE(AddEdge(1, 2));
EXPECT_TRUE(AddEdge(2, 3));
EXPECT_TRUE(AddEdge(3, 4));
EXPECT_TRUE(AddEdge(4, 5));
EXPECT_TRUE(g_.IsReachable(1, 5));
EXPECT_FALSE(g_.IsReachable(5, 1));
}
TEST_F(GraphCyclesTest, ContractEdge) {
ASSERT_TRUE(AddEdge(1, 2));
ASSERT_TRUE(AddEdge(1, 3));
ASSERT_TRUE(AddEdge(2, 3));
ASSERT_TRUE(AddEdge(2, 4));
ASSERT_TRUE(AddEdge(3, 4));
EXPECT_FALSE(g_.ContractEdge(1, 3).has_value());
EXPECT_TRUE(g_.HasEdge(1, 3));
EXPECT_EQ(*g_.ContractEdge(1, 2), 2);
EXPECT_TRUE(g_.HasEdge(2, 3));
EXPECT_TRUE(g_.HasEdge(2, 4));
EXPECT_TRUE(g_.HasEdge(3, 4));
EXPECT_EQ(*g_.ContractEdge(2, 3), 2);
EXPECT_TRUE(g_.HasEdge(2, 4));
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/mlir_hlo/utils/cycle_detector.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/mlir_hlo/utils/cycle_detector_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
86c445d1-3acf-4a4e-b997-db710d49539f | cpp | google/quiche | quic_socket_address_coder | quiche/quic/core/quic_socket_address_coder.cc | quiche/quic/core/quic_socket_address_coder_test.cc | #include "quiche/quic/core/quic_socket_address_coder.h"
#include <cstring>
#include <string>
#include <vector>
#include "quiche/quic/platform/api/quic_ip_address_family.h"
namespace quic {
namespace {
const uint16_t kIPv4 = 2;
const uint16_t kIPv6 = 10;
}
QuicSocketAddressCoder::QuicSocketAddressCoder() {}
QuicSocketAddressCoder::QuicSocketAddressCoder(const QuicSocketAddress& address)
: address_(address) {}
QuicSocketAddressCoder::~QuicSocketAddressCoder() {}
std::string QuicSocketAddressCoder::Encode() const {
std::string serialized;
uint16_t address_family;
switch (address_.host().address_family()) {
case IpAddressFamily::IP_V4:
address_family = kIPv4;
break;
case IpAddressFamily::IP_V6:
address_family = kIPv6;
break;
default:
return serialized;
}
serialized.append(reinterpret_cast<const char*>(&address_family),
sizeof(address_family));
serialized.append(address_.host().ToPackedString());
uint16_t port = address_.port();
serialized.append(reinterpret_cast<const char*>(&port), sizeof(port));
return serialized;
}
bool QuicSocketAddressCoder::Decode(const char* data, size_t length) {
uint16_t address_family;
if (length < sizeof(address_family)) {
return false;
}
memcpy(&address_family, data, sizeof(address_family));
data += sizeof(address_family);
length -= sizeof(address_family);
size_t ip_length;
switch (address_family) {
case kIPv4:
ip_length = QuicIpAddress::kIPv4AddressSize;
break;
case kIPv6:
ip_length = QuicIpAddress::kIPv6AddressSize;
break;
default:
return false;
}
if (length < ip_length) {
return false;
}
std::vector<uint8_t> ip(ip_length);
memcpy(&ip[0], data, ip_length);
data += ip_length;
length -= ip_length;
uint16_t port;
if (length != sizeof(port)) {
return false;
}
memcpy(&port, data, length);
QuicIpAddress ip_address;
ip_address.FromPackedString(reinterpret_cast<const char*>(&ip[0]), ip_length);
address_ = QuicSocketAddress(ip_address, port);
return true;
}
} | #include "quiche/quic/core/quic_socket_address_coder.h"
#include <string>
#include "absl/base/macros.h"
#include "quiche/quic/platform/api/quic_ip_address.h"
#include "quiche/quic/platform/api/quic_ip_address_family.h"
#include "quiche/quic/platform/api/quic_test.h"
namespace quic {
namespace test {
class QuicSocketAddressCoderTest : public QuicTest {};
TEST_F(QuicSocketAddressCoderTest, EncodeIPv4) {
QuicIpAddress ip;
ip.FromString("4.31.198.44");
QuicSocketAddressCoder coder(QuicSocketAddress(ip, 0x1234));
std::string serialized = coder.Encode();
std::string expected("\x02\x00\x04\x1f\xc6\x2c\x34\x12", 8);
EXPECT_EQ(expected, serialized);
}
TEST_F(QuicSocketAddressCoderTest, EncodeIPv6) {
QuicIpAddress ip;
ip.FromString("2001:700:300:1800::f");
QuicSocketAddressCoder coder(QuicSocketAddress(ip, 0x5678));
std::string serialized = coder.Encode();
std::string expected(
"\x0a\x00"
"\x20\x01\x07\x00\x03\x00\x18\x00"
"\x00\x00\x00\x00\x00\x00\x00\x0f"
"\x78\x56",
20);
EXPECT_EQ(expected, serialized);
}
TEST_F(QuicSocketAddressCoderTest, DecodeIPv4) {
std::string serialized("\x02\x00\x04\x1f\xc6\x2c\x34\x12", 8);
QuicSocketAddressCoder coder;
ASSERT_TRUE(coder.Decode(serialized.data(), serialized.length()));
EXPECT_EQ(IpAddressFamily::IP_V4, coder.ip().address_family());
std::string expected_addr("\x04\x1f\xc6\x2c");
EXPECT_EQ(expected_addr, coder.ip().ToPackedString());
EXPECT_EQ(0x1234, coder.port());
}
TEST_F(QuicSocketAddressCoderTest, DecodeIPv6) {
std::string serialized(
"\x0a\x00"
"\x20\x01\x07\x00\x03\x00\x18\x00"
"\x00\x00\x00\x00\x00\x00\x00\x0f"
"\x78\x56",
20);
QuicSocketAddressCoder coder;
ASSERT_TRUE(coder.Decode(serialized.data(), serialized.length()));
EXPECT_EQ(IpAddressFamily::IP_V6, coder.ip().address_family());
std::string expected_addr(
"\x20\x01\x07\x00\x03\x00\x18\x00"
"\x00\x00\x00\x00\x00\x00\x00\x0f",
16);
EXPECT_EQ(expected_addr, coder.ip().ToPackedString());
EXPECT_EQ(0x5678, coder.port());
}
TEST_F(QuicSocketAddressCoderTest, DecodeBad) {
std::string serialized(
"\x0a\x00"
"\x20\x01\x07\x00\x03\x00\x18\x00"
"\x00\x00\x00\x00\x00\x00\x00\x0f"
"\x78\x56",
20);
QuicSocketAddressCoder coder;
EXPECT_TRUE(coder.Decode(serialized.data(), serialized.length()));
serialized.push_back('\0');
EXPECT_FALSE(coder.Decode(serialized.data(), serialized.length()));
serialized.resize(20);
EXPECT_TRUE(coder.Decode(serialized.data(), serialized.length()));
serialized[0] = '\x03';
EXPECT_FALSE(coder.Decode(serialized.data(), serialized.length()));
serialized[0] = '\x0a';
EXPECT_TRUE(coder.Decode(serialized.data(), serialized.length()));
size_t len = serialized.length();
for (size_t i = 0; i < len; i++) {
ASSERT_FALSE(serialized.empty());
serialized.erase(serialized.length() - 1);
EXPECT_FALSE(coder.Decode(serialized.data(), serialized.length()));
}
EXPECT_TRUE(serialized.empty());
}
TEST_F(QuicSocketAddressCoderTest, EncodeAndDecode) {
struct {
const char* ip_literal;
uint16_t port;
} test_case[] = {
{"93.184.216.119", 0x1234},
{"199.204.44.194", 80},
{"149.20.4.69", 443},
{"127.0.0.1", 8080},
{"2001:700:300:1800::", 0x5678},
{"::1", 65534},
};
for (size_t i = 0; i < ABSL_ARRAYSIZE(test_case); i++) {
QuicIpAddress ip;
ASSERT_TRUE(ip.FromString(test_case[i].ip_literal));
QuicSocketAddressCoder encoder(QuicSocketAddress(ip, test_case[i].port));
std::string serialized = encoder.Encode();
QuicSocketAddressCoder decoder;
ASSERT_TRUE(decoder.Decode(serialized.data(), serialized.length()));
EXPECT_EQ(encoder.ip(), decoder.ip());
EXPECT_EQ(encoder.port(), decoder.port());
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_socket_address_coder.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_socket_address_coder_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
40a77d2d-ac7a-47c8-aed8-486396bd535b | cpp | tensorflow/tensorflow | device_utils | tensorflow/core/common_runtime/device/device_utils.cc | third_party/xla/xla/tsl/profiler/utils/device_utils_test.cc | #include "tensorflow/core/common_runtime/device/device_utils.h"
#include "tensorflow/core/platform/regexp.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/stringpiece.h"
namespace tensorflow {
namespace device_utils {
Status ValidateDeviceType(StringPiece type) {
static const LazyRE2 kTfDeviceTypeRegEx = {"[A-Z][A-Z_]*"};
bool matches = RE2::FullMatch(type, *kTfDeviceTypeRegEx);
if (!matches) {
return Status(absl::StatusCode::kFailedPrecondition,
strings::StrCat("Device name/type '", type, "' must match ",
kTfDeviceTypeRegEx->pattern(), "."));
}
return absl::OkStatus();
}
}
} | #include "xla/tsl/profiler/utils/device_utils.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace profiler {
namespace {
tensorflow::profiler::XPlane CreateXPlane(absl::string_view name) {
tensorflow::profiler::XPlane plane;
plane.set_name(name.data(), name.size());
return plane;
}
TEST(DeviceUtilsTest, GetDeviceType) {
EXPECT_EQ(GetDeviceType(CreateXPlane(kHostThreadsPlaneName)),
DeviceType::kCpu);
EXPECT_EQ(GetDeviceType(CreateXPlane(absl::StrCat(kTpuPlanePrefix, 0))),
DeviceType::kTpu);
EXPECT_EQ(GetDeviceType(CreateXPlane(absl::StrCat(kGpuPlanePrefix, 0))),
DeviceType::kGpu);
EXPECT_EQ(GetDeviceType(CreateXPlane("unknown")), DeviceType::kUnknown);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/device/device_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/device_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9696c01c-ac60-49df-98b7-8c5747ddbfef | cpp | google/quiche | quic_simple_server_session | quiche/quic/tools/quic_simple_server_session.cc | quiche/quic/tools/quic_simple_server_session_test.cc | #include "quiche/quic/tools/quic_simple_server_session.h"
#include <memory>
#include <utility>
#include "absl/memory/memory.h"
#include "quiche/quic/core/http/quic_server_initiated_spdy_stream.h"
#include "quiche/quic/core/http/quic_spdy_session.h"
#include "quiche/quic/core/quic_connection.h"
#include "quiche/quic/core/quic_stream_priority.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/tools/quic_simple_server_stream.h"
namespace quic {
QuicSimpleServerSession::QuicSimpleServerSession(
const QuicConfig& config, const ParsedQuicVersionVector& supported_versions,
QuicConnection* connection, QuicSession::Visitor* visitor,
QuicCryptoServerStreamBase::Helper* helper,
const QuicCryptoServerConfig* crypto_config,
QuicCompressedCertsCache* compressed_certs_cache,
QuicSimpleServerBackend* quic_simple_server_backend)
: QuicServerSessionBase(config, supported_versions, connection, visitor,
helper, crypto_config, compressed_certs_cache),
quic_simple_server_backend_(quic_simple_server_backend) {
QUICHE_DCHECK(quic_simple_server_backend_);
set_max_streams_accepted_per_loop(5u);
}
QuicSimpleServerSession::~QuicSimpleServerSession() { DeleteConnection(); }
std::unique_ptr<QuicCryptoServerStreamBase>
QuicSimpleServerSession::CreateQuicCryptoServerStream(
const QuicCryptoServerConfig* crypto_config,
QuicCompressedCertsCache* compressed_certs_cache) {
return CreateCryptoServerStream(crypto_config, compressed_certs_cache, this,
stream_helper());
}
void QuicSimpleServerSession::OnStreamFrame(const QuicStreamFrame& frame) {
if (!IsIncomingStream(frame.stream_id) && !WillNegotiateWebTransport()) {
QUIC_LOG(WARNING) << "Client shouldn't send data on server push stream";
connection()->CloseConnection(
QUIC_INVALID_STREAM_ID, "Client sent data on server push stream",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
QuicSpdySession::OnStreamFrame(frame);
}
QuicSpdyStream* QuicSimpleServerSession::CreateIncomingStream(QuicStreamId id) {
if (!ShouldCreateIncomingStream(id)) {
return nullptr;
}
QuicSpdyStream* stream = new QuicSimpleServerStream(
id, this, BIDIRECTIONAL, quic_simple_server_backend_);
ActivateStream(absl::WrapUnique(stream));
return stream;
}
QuicSpdyStream* QuicSimpleServerSession::CreateIncomingStream(
PendingStream* pending) {
QuicSpdyStream* stream =
new QuicSimpleServerStream(pending, this, quic_simple_server_backend_);
ActivateStream(absl::WrapUnique(stream));
return stream;
}
QuicSpdyStream* QuicSimpleServerSession::CreateOutgoingBidirectionalStream() {
if (!WillNegotiateWebTransport()) {
QUIC_BUG(QuicSimpleServerSession CreateOutgoingBidirectionalStream without
WebTransport support)
<< "QuicSimpleServerSession::CreateOutgoingBidirectionalStream called "
"in a session without WebTransport support.";
return nullptr;
}
if (!ShouldCreateOutgoingBidirectionalStream()) {
return nullptr;
}
QuicServerInitiatedSpdyStream* stream = new QuicServerInitiatedSpdyStream(
GetNextOutgoingBidirectionalStreamId(), this, BIDIRECTIONAL);
ActivateStream(absl::WrapUnique(stream));
return stream;
}
QuicSimpleServerStream*
QuicSimpleServerSession::CreateOutgoingUnidirectionalStream() {
if (!ShouldCreateOutgoingUnidirectionalStream()) {
return nullptr;
}
QuicSimpleServerStream* stream = new QuicSimpleServerStream(
GetNextOutgoingUnidirectionalStreamId(), this, WRITE_UNIDIRECTIONAL,
quic_simple_server_backend_);
ActivateStream(absl::WrapUnique(stream));
return stream;
}
QuicStream* QuicSimpleServerSession::ProcessBidirectionalPendingStream(
PendingStream* pending) {
QUICHE_DCHECK(IsEncryptionEstablished());
return CreateIncomingStream(pending);
}
} | #include "quiche/quic/tools/quic_simple_server_session.h"
#include <algorithm>
#include <memory>
#include <utility>
#include "absl/strings/str_cat.h"
#include "quiche/quic/core/crypto/null_encrypter.h"
#include "quiche/quic/core/crypto/quic_crypto_server_config.h"
#include "quiche/quic/core/crypto/quic_random.h"
#include "quiche/quic/core/http/http_encoder.h"
#include "quiche/quic/core/proto/cached_network_parameters_proto.h"
#include "quiche/quic/core/quic_connection.h"
#include "quiche/quic/core/quic_crypto_server_stream.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/core/tls_server_handshaker.h"
#include "quiche/quic/platform/api/quic_expect_bug.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/crypto_test_utils.h"
#include "quiche/quic/test_tools/mock_quic_session_visitor.h"
#include "quiche/quic/test_tools/quic_config_peer.h"
#include "quiche/quic/test_tools/quic_connection_peer.h"
#include "quiche/quic/test_tools/quic_sent_packet_manager_peer.h"
#include "quiche/quic/test_tools/quic_session_peer.h"
#include "quiche/quic/test_tools/quic_spdy_session_peer.h"
#include "quiche/quic/test_tools/quic_stream_peer.h"
#include "quiche/quic/test_tools/quic_sustained_bandwidth_recorder_peer.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/quic/tools/quic_backend_response.h"
#include "quiche/quic/tools/quic_memory_cache_backend.h"
#include "quiche/quic/tools/quic_simple_server_stream.h"
using testing::_;
using testing::AtLeast;
using testing::InSequence;
using testing::Invoke;
using testing::Return;
using testing::StrictMock;
namespace quic {
namespace test {
namespace {
const char* const kStreamData = "\1z";
}
class QuicSimpleServerSessionPeer {
public:
static void SetCryptoStream(QuicSimpleServerSession* s,
QuicCryptoServerStreamBase* crypto_stream) {
s->crypto_stream_.reset(crypto_stream);
}
static QuicSpdyStream* CreateIncomingStream(QuicSimpleServerSession* s,
QuicStreamId id) {
return s->CreateIncomingStream(id);
}
static QuicSimpleServerStream* CreateOutgoingUnidirectionalStream(
QuicSimpleServerSession* s) {
return s->CreateOutgoingUnidirectionalStream();
}
};
namespace {
const size_t kMaxStreamsForTest = 10;
class MockQuicCryptoServerStream : public QuicCryptoServerStream {
public:
explicit MockQuicCryptoServerStream(
const QuicCryptoServerConfig* crypto_config,
QuicCompressedCertsCache* compressed_certs_cache, QuicSession* session,
QuicCryptoServerStreamBase::Helper* helper)
: QuicCryptoServerStream(crypto_config, compressed_certs_cache, session,
helper) {}
MockQuicCryptoServerStream(const MockQuicCryptoServerStream&) = delete;
MockQuicCryptoServerStream& operator=(const MockQuicCryptoServerStream&) =
delete;
~MockQuicCryptoServerStream() override {}
MOCK_METHOD(void, SendServerConfigUpdate, (const CachedNetworkParameters*),
(override));
bool encryption_established() const override { return true; }
};
class MockTlsServerHandshaker : public TlsServerHandshaker {
public:
explicit MockTlsServerHandshaker(QuicSession* session,
const QuicCryptoServerConfig* crypto_config)
: TlsServerHandshaker(session, crypto_config) {}
MockTlsServerHandshaker(const MockTlsServerHandshaker&) = delete;
MockTlsServerHandshaker& operator=(const MockTlsServerHandshaker&) = delete;
~MockTlsServerHandshaker() override {}
MOCK_METHOD(void, SendServerConfigUpdate, (const CachedNetworkParameters*),
(override));
bool encryption_established() const override { return true; }
};
class MockQuicConnectionWithSendStreamData : public MockQuicConnection {
public:
MockQuicConnectionWithSendStreamData(
MockQuicConnectionHelper* helper, MockAlarmFactory* alarm_factory,
Perspective perspective,
const ParsedQuicVersionVector& supported_versions)
: MockQuicConnection(helper, alarm_factory, perspective,
supported_versions) {
auto consume_all_data = [](QuicStreamId , size_t write_length,
QuicStreamOffset ,
StreamSendingState state) {
return QuicConsumedData(write_length, state != NO_FIN);
};
ON_CALL(*this, SendStreamData(_, _, _, _))
.WillByDefault(Invoke(consume_all_data));
}
MOCK_METHOD(QuicConsumedData, SendStreamData,
(QuicStreamId id, size_t write_length, QuicStreamOffset offset,
StreamSendingState state),
(override));
};
class MockQuicSimpleServerSession : public QuicSimpleServerSession {
public:
MockQuicSimpleServerSession(
const QuicConfig& config, QuicConnection* connection,
QuicSession::Visitor* visitor, QuicCryptoServerStreamBase::Helper* helper,
const QuicCryptoServerConfig* crypto_config,
QuicCompressedCertsCache* compressed_certs_cache,
QuicSimpleServerBackend* quic_simple_server_backend)
: QuicSimpleServerSession(
config, CurrentSupportedVersions(), connection, visitor, helper,
crypto_config, compressed_certs_cache, quic_simple_server_backend) {
}
MOCK_METHOD(void, SendBlocked, (QuicStreamId, QuicStreamOffset), (override));
MOCK_METHOD(bool, WriteControlFrame,
(const QuicFrame& frame, TransmissionType type), (override));
};
class QuicSimpleServerSessionTest
: public QuicTestWithParam<ParsedQuicVersion> {
public:
bool ClearMaxStreamsControlFrame(const QuicFrame& frame) {
if (frame.type == MAX_STREAMS_FRAME) {
DeleteFrame(&const_cast<QuicFrame&>(frame));
return true;
}
return false;
}
protected:
QuicSimpleServerSessionTest()
: crypto_config_(QuicCryptoServerConfig::TESTING,
QuicRandom::GetInstance(),
crypto_test_utils::ProofSourceForTesting(),
KeyExchangeSource::Default()),
compressed_certs_cache_(
QuicCompressedCertsCache::kQuicCompressedCertsCacheSize) {
config_.SetMaxBidirectionalStreamsToSend(kMaxStreamsForTest);
QuicConfigPeer::SetReceivedMaxBidirectionalStreams(&config_,
kMaxStreamsForTest);
config_.SetMaxUnidirectionalStreamsToSend(kMaxStreamsForTest);
config_.SetInitialStreamFlowControlWindowToSend(
kInitialStreamFlowControlWindowForTest);
config_.SetInitialMaxStreamDataBytesIncomingBidirectionalToSend(
kInitialStreamFlowControlWindowForTest);
config_.SetInitialMaxStreamDataBytesOutgoingBidirectionalToSend(
kInitialStreamFlowControlWindowForTest);
config_.SetInitialMaxStreamDataBytesUnidirectionalToSend(
kInitialStreamFlowControlWindowForTest);
config_.SetInitialSessionFlowControlWindowToSend(
kInitialSessionFlowControlWindowForTest);
if (VersionUsesHttp3(transport_version())) {
QuicConfigPeer::SetReceivedMaxUnidirectionalStreams(
&config_, kMaxStreamsForTest + 3);
} else {
QuicConfigPeer::SetReceivedMaxUnidirectionalStreams(&config_,
kMaxStreamsForTest);
}
ParsedQuicVersionVector supported_versions = SupportedVersions(version());
connection_ = new StrictMock<MockQuicConnectionWithSendStreamData>(
&helper_, &alarm_factory_, Perspective::IS_SERVER, supported_versions);
connection_->AdvanceTime(QuicTime::Delta::FromSeconds(1));
connection_->SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<NullEncrypter>(connection_->perspective()));
session_ = std::make_unique<MockQuicSimpleServerSession>(
config_, connection_, &owner_, &stream_helper_, &crypto_config_,
&compressed_certs_cache_, &memory_cache_backend_);
MockClock clock;
handshake_message_ = crypto_config_.AddDefaultConfig(
QuicRandom::GetInstance(), &clock,
QuicCryptoServerConfig::ConfigOptions());
session_->Initialize();
if (VersionHasIetfQuicFrames(transport_version())) {
EXPECT_CALL(*session_, WriteControlFrame(_, _))
.WillRepeatedly(Invoke(&ClearControlFrameWithTransmissionType));
}
session_->OnConfigNegotiated();
}
QuicStreamId GetNthClientInitiatedBidirectionalId(int n) {
return GetNthClientInitiatedBidirectionalStreamId(transport_version(), n);
}
QuicStreamId GetNthServerInitiatedUnidirectionalId(int n) {
return quic::test::GetNthServerInitiatedUnidirectionalStreamId(
transport_version(), n);
}
ParsedQuicVersion version() const { return GetParam(); }
QuicTransportVersion transport_version() const {
return version().transport_version;
}
void InjectStopSending(QuicStreamId stream_id,
QuicRstStreamErrorCode rst_stream_code) {
if (!VersionHasIetfQuicFrames(transport_version())) {
return;
}
EXPECT_CALL(owner_, OnStopSendingReceived(_)).Times(1);
QuicStopSendingFrame stop_sending(kInvalidControlFrameId, stream_id,
rst_stream_code);
EXPECT_CALL(*connection_, OnStreamReset(stream_id, rst_stream_code));
session_->OnStopSendingFrame(stop_sending);
}
StrictMock<MockQuicSessionVisitor> owner_;
StrictMock<MockQuicCryptoServerStreamHelper> stream_helper_;
MockQuicConnectionHelper helper_;
MockAlarmFactory alarm_factory_;
StrictMock<MockQuicConnectionWithSendStreamData>* connection_;
QuicConfig config_;
QuicCryptoServerConfig crypto_config_;
QuicCompressedCertsCache compressed_certs_cache_;
QuicMemoryCacheBackend memory_cache_backend_;
std::unique_ptr<MockQuicSimpleServerSession> session_;
std::unique_ptr<CryptoHandshakeMessage> handshake_message_;
};
INSTANTIATE_TEST_SUITE_P(Tests, QuicSimpleServerSessionTest,
::testing::ValuesIn(AllSupportedVersions()),
::testing::PrintToStringParamName());
TEST_P(QuicSimpleServerSessionTest, CloseStreamDueToReset) {
QuicStreamFrame data1(GetNthClientInitiatedBidirectionalId(0), false, 0,
kStreamData);
session_->OnStreamFrame(data1);
EXPECT_EQ(1u, QuicSessionPeer::GetNumOpenDynamicStreams(session_.get()));
QuicRstStreamFrame rst1(kInvalidControlFrameId,
GetNthClientInitiatedBidirectionalId(0),
QUIC_ERROR_PROCESSING_STREAM, 0);
EXPECT_CALL(owner_, OnRstStreamReceived(_)).Times(1);
EXPECT_CALL(*session_, WriteControlFrame(_, _));
if (!VersionHasIetfQuicFrames(transport_version())) {
EXPECT_CALL(*connection_,
OnStreamReset(GetNthClientInitiatedBidirectionalId(0),
QUIC_RST_ACKNOWLEDGEMENT));
}
session_->OnRstStream(rst1);
InjectStopSending(GetNthClientInitiatedBidirectionalId(0),
QUIC_ERROR_PROCESSING_STREAM);
EXPECT_EQ(0u, QuicSessionPeer::GetNumOpenDynamicStreams(session_.get()));
session_->OnStreamFrame(data1);
EXPECT_EQ(0u, QuicSessionPeer::GetNumOpenDynamicStreams(session_.get()));
EXPECT_TRUE(connection_->connected());
}
TEST_P(QuicSimpleServerSessionTest, NeverOpenStreamDueToReset) {
QuicRstStreamFrame rst1(kInvalidControlFrameId,
GetNthClientInitiatedBidirectionalId(0),
QUIC_ERROR_PROCESSING_STREAM, 0);
EXPECT_CALL(owner_, OnRstStreamReceived(_)).Times(1);
if (!VersionHasIetfQuicFrames(transport_version())) {
EXPECT_CALL(*session_, WriteControlFrame(_, _));
EXPECT_CALL(*connection_,
OnStreamReset(GetNthClientInitiatedBidirectionalId(0),
QUIC_RST_ACKNOWLEDGEMENT));
}
session_->OnRstStream(rst1);
InjectStopSending(GetNthClientInitiatedBidirectionalId(0),
QUIC_ERROR_PROCESSING_STREAM);
EXPECT_EQ(0u, QuicSessionPeer::GetNumOpenDynamicStreams(session_.get()));
QuicStreamFrame data1(GetNthClientInitiatedBidirectionalId(0), false, 0,
kStreamData);
session_->OnStreamFrame(data1);
EXPECT_EQ(0u, QuicSessionPeer::GetNumOpenDynamicStreams(session_.get()));
EXPECT_TRUE(connection_->connected());
}
TEST_P(QuicSimpleServerSessionTest, AcceptClosedStream) {
QuicStreamFrame frame1(GetNthClientInitiatedBidirectionalId(0), false, 0,
kStreamData);
QuicStreamFrame frame2(GetNthClientInitiatedBidirectionalId(1), false, 0,
kStreamData);
session_->OnStreamFrame(frame1);
session_->OnStreamFrame(frame2);
EXPECT_EQ(2u, QuicSessionPeer::GetNumOpenDynamicStreams(session_.get()));
QuicRstStreamFrame rst(kInvalidControlFrameId,
GetNthClientInitiatedBidirectionalId(0),
QUIC_ERROR_PROCESSING_STREAM, 0);
EXPECT_CALL(owner_, OnRstStreamReceived(_)).Times(1);
if (!VersionHasIetfQuicFrames(transport_version())) {
EXPECT_CALL(*session_, WriteControlFrame(_, _));
EXPECT_CALL(*connection_,
OnStreamReset(GetNthClientInitiatedBidirectionalId(0),
QUIC_RST_ACKNOWLEDGEMENT));
}
session_->OnRstStream(rst);
InjectStopSending(GetNthClientInitiatedBidirectionalId(0),
QUIC_ERROR_PROCESSING_STREAM);
QuicStreamFrame frame3(GetNthClientInitiatedBidirectionalId(0), false, 2,
kStreamData);
QuicStreamFrame frame4(GetNthClientInitiatedBidirectionalId(1), false, 2,
kStreamData);
session_->OnStreamFrame(frame3);
session_->OnStreamFrame(frame4);
EXPECT_EQ(1u, QuicSessionPeer::GetNumOpenDynamicStreams(session_.get()));
EXPECT_TRUE(connection_->connected());
}
TEST_P(QuicSimpleServerSessionTest, CreateIncomingStreamDisconnected) {
if (version() != AllSupportedVersions()[0]) {
return;
}
size_t initial_num_open_stream =
QuicSessionPeer::GetNumOpenDynamicStreams(session_.get());
QuicConnectionPeer::TearDownLocalConnectionState(connection_);
EXPECT_QUIC_BUG(QuicSimpleServerSessionPeer::CreateIncomingStream(
session_.get(), GetNthClientInitiatedBidirectionalId(0)),
"ShouldCreateIncomingStream called when disconnected");
EXPECT_EQ(initial_num_open_stream,
QuicSessionPeer::GetNumOpenDynamicStreams(session_.get()));
}
TEST_P(QuicSimpleServerSessionTest, CreateIncomingStream) {
QuicSpdyStream* stream = QuicSimpleServerSessionPeer::CreateIncomingStream(
session_.get(), GetNthClientInitiatedBidirectionalId(0));
EXPECT_NE(nullptr, stream);
EXPECT_EQ(GetNthClientInitiatedBidirectionalId(0), stream->id());
}
TEST_P(QuicSimpleServerSessionTest, CreateOutgoingDynamicStreamDisconnected) {
if (version() != AllSupportedVersions()[0]) {
return;
}
size_t initial_num_open_stream =
QuicSessionPeer::GetNumOpenDynamicStreams(session_.get());
QuicConnectionPeer::TearDownLocalConnectionState(connection_);
EXPECT_QUIC_BUG(
QuicSimpleServerSessionPeer::CreateOutgoingUnidirectionalStream(
session_.get()),
"ShouldCreateOutgoingUnidirectionalStream called when disconnected");
EXPECT_EQ(initial_num_open_stream,
QuicSessionPeer::GetNumOpenDynamicStreams(session_.get()));
}
TEST_P(QuicSimpleServerSessionTest, CreateOutgoingDynamicStreamUnencrypted) {
if (version() != AllSupportedVersions()[0]) {
return;
}
size_t initial_num_open_stream =
QuicSessionPeer::GetNumOpenDynamicStreams(session_.get());
EXPECT_QUIC_BUG(
QuicSimpleServerSessionPeer::CreateOutgoingUnidirectionalStream(
session_.get()),
"Encryption not established so no outgoing stream created.");
EXPECT_EQ(initial_num_open_stream,
QuicSessionPeer::GetNumOpenDynamicStreams(session_.get()));
}
TEST_P(QuicSimpleServerSessionTest, GetEvenIncomingError) {
const size_t initial_num_open_stream =
QuicSessionPeer::GetNumOpenDynamicStreams(session_.get());
const QuicErrorCode expected_error = VersionUsesHttp3(transport_version())
? QUIC_HTTP_STREAM_WRONG_DIRECTION
: QUIC_INVALID_STREAM_ID;
EXPECT_CALL(*connection_, CloseConnection(expected_error,
"Data for nonexistent stream", _));
EXPECT_EQ(nullptr,
QuicSessionPeer::GetOrCreateStream(
session_.get(), GetNthServerInitiatedUnidirectionalId(3)));
EXPECT_EQ(initial_num_open_stream,
QuicSessionPeer::GetNumOpenDynamicStreams(session_.get()));
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/tools/quic_simple_server_session.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/tools/quic_simple_server_session_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
427bb8a3-c26e-4c32-83fa-f82be047f902 | cpp | tensorflow/tensorflow | spmd_partitioner | third_party/xla/xla/service/spmd/spmd_partitioner.cc | third_party/xla/xla/service/spmd/spmd_partitioner_test.cc | #include "xla/service/spmd/spmd_partitioner.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <functional>
#include <limits>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/array.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/ir/tile_assignment.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/hlo/utils/hlo_sharding_util.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/protobuf_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/computation_layout.h"
#include "xla/service/flatten_call_graph.h"
#include "xla/service/hlo_cse.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/shape_inference.h"
#include "xla/service/spmd/custom_call_handler.h"
#include "xla/service/spmd/spmd_partitioner_util.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/numbers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace spmd {
namespace {
using hlo_sharding_util::GroupedSharding;
}
std::string SpmdLogger::MakeReport() {
std::string report;
absl::StrAppend(&report,
"\n\n***** SPMD memory during transformation *****\n");
std::sort(entries_.begin(), entries_.end(),
[](auto const& entry0, auto const& entry1) {
return entry0.first > entry1.first;
});
for (int64_t i = 0;
i < std::min<int64_t>(report_instruction_count_, entries_.size()); ++i) {
absl::StrAppend(&report, "\n ",
tsl::strings::HumanReadableNumBytes(entries_[i].first),
" : ", entries_[i].second, "\n");
}
return report;
}
void SpmdLogger::RegisterLogEntry(HloInstruction* hlo,
const std::vector<HloInstruction*>& group) {
if (disabled_) {
return;
}
std::string report = hlo->ToString();
int64_t max_value = -1;
for (HloInstruction* inst : group) {
if (!inst->shape().IsArray()) {
continue;
}
max_value = std::max<int64_t>(max_value, ShapeSizeInBytes(inst->shape()));
absl::StrAppend(&report, " * ", inst->ToString(), "\n");
}
entries_.push_back(std::make_pair(max_value, report));
}
std::string SpmdLogger::ReportBeforePartition(
const HloModule& module, int64_t report_instruction_count) {
std::string report;
absl::StrAppend(&report,
"\n\n***** SPMD memory usage before partition *****\n");
absl::StrAppend(&report, "\n ** Replicated instructions\n");
absl::StrAppend(&report, ReportMemoryUsage(
module,
[](const HloInstruction* hlo) {
return !hlo->has_sharding() ||
hlo->sharding().IsReplicated();
},
report_instruction_count));
absl::StrAppend(&report, "\n ** All instructions\n");
absl::StrAppend(&report, ReportMemoryUsage(module, HloPredicateTrue,
report_instruction_count));
return report;
}
std::string SpmdLogger::ReportAfterPartition(
const HloModule& module, int64_t report_instruction_count) {
std::string report;
absl::StrAppend(&report,
"\n\n***** SPMD memory usage after partition *****\n");
absl::StrAppend(&report, ReportMemoryUsage(module, HloPredicateTrue,
report_instruction_count));
return report;
}
template <typename F>
std::string SpmdLogger::ReportMemoryUsage(
const HloModule& module, const F& filter,
int64_t report_instruction_count) {
std::string report;
std::vector<HloInstruction*> instructions;
instructions.reserve(module.instruction_count());
for (auto computation : module.computations()) {
if (computation->IsFusionComputation()) {
continue;
}
for (auto hlo : computation->instructions()) {
if (!hlo->shape().IsArray() ||
ShapeUtil::IsEffectiveScalar(hlo->shape())) {
continue;
}
if (filter(hlo)) {
instructions.push_back(hlo);
}
}
}
const auto add_report = [&](std::vector<HloInstruction*>* insts) {
std::sort(insts->begin(), insts->end(),
[](const HloInstruction* inst0, const HloInstruction* inst1) {
return ShapeSizeInBytes(inst0->shape()) >
ShapeSizeInBytes(inst1->shape());
});
for (int64_t i = 0;
i < std::min<int64_t>(report_instruction_count, insts->size()); ++i) {
absl::StrAppend(&report, " ",
tsl::strings::HumanReadableNumBytes(
ShapeSizeInBytes((*insts)[i]->shape())),
" : ", (*insts)[i]->ToString(), "\n");
}
};
add_report(&instructions);
return report;
}
namespace {
bool ShouldKeepSharding(const HloInstruction* hlo) {
if (hlo->opcode() == HloOpcode::kInfeed ||
hlo->opcode() == HloOpcode::kOutfeed ||
DynCast<HloSendRecvInstruction>(hlo) != nullptr) {
return true;
}
if (hlo->opcode() == HloOpcode::kParameter &&
hlo->parent() == hlo->GetModule()->entry_computation()) {
return true;
}
return false;
}
absl::Status ClearShardingAttributes(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
for (HloComputation* computation : module->computations(execution_threads)) {
for (HloInstruction* hlo : computation->instructions()) {
if (ShouldKeepSharding(hlo)) {
continue;
}
hlo->clear_sharding();
}
}
return absl::OkStatus();
}
HloSharding GetShardingReplicatedOnWindowedDimension(
const HloSharding& sharding, const Window& window) {
std::vector<int64_t> dimensions_to_replicate;
for (int i = 0; i < window.dimensions_size(); ++i) {
const WindowDimension& wd = window.dimensions(i);
if (window_util::IsTrivialWindowDimension(wd)) {
continue;
}
dimensions_to_replicate.push_back(i);
}
return hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(
sharding, dimensions_to_replicate);
}
}
HloInstruction* SpmdBuilder::AddInstruction(
std::unique_ptr<HloInstruction> instruction) {
HloInstruction* hlo =
HloComputation::Builder::AddInstruction(std::move(instruction));
if (visiting_hlo_) {
hlo->set_metadata(visiting_hlo_->metadata());
instructions_[visiting_hlo_].push_back(hlo);
}
if (hlo->opcode() == HloOpcode::kBroadcast) {
for (int64_t i = 0; i < hlo->shape().rank(); ++i) {
if (!absl::c_linear_search(hlo->dimensions(), i)) {
broadcast_dims_[hlo].insert(i);
}
}
}
if (hlo->IsElementwise() && hlo->operand_count() > 0 &&
hlo->shape().IsArray()) {
absl::flat_hash_set<int64_t> broadcast_dims;
for (int64_t i = 0; i < hlo->shape().rank(); ++i) {
broadcast_dims.insert(i);
}
for (int64_t i = 0; i < hlo->operand_count(); ++i) {
auto it = broadcast_dims_.find(hlo->operand(i));
if (it == broadcast_dims_.end()) {
broadcast_dims.clear();
break;
}
for (int64_t i = 0; i < hlo->shape().rank(); ++i) {
if (!it->second.contains(i)) {
broadcast_dims.erase(i);
}
}
}
if (!broadcast_dims.empty()) {
broadcast_dims_[hlo] = std::move(broadcast_dims);
}
}
if (hlo->opcode() == HloOpcode::kTranspose) {
auto it = broadcast_dims_.find(hlo->operand(0));
if (it != broadcast_dims_.end()) {
absl::flat_hash_set<int64_t> xpose_broadcast_dims;
std::vector<int64_t> reverse_map(hlo->shape().rank());
for (int64_t i = 0; i < reverse_map.size(); ++i) {
reverse_map[hlo->dimensions(i)] = i;
}
for (int64_t dim : it->second) {
xpose_broadcast_dims.insert(reverse_map[dim]);
}
broadcast_dims_[hlo] = std::move(xpose_broadcast_dims);
}
}
if (hlo->opcode() == HloOpcode::kReshape &&
Product(hlo->shape().dimensions()) > 0) {
auto it = broadcast_dims_.find(hlo->operand(0));
if (it != broadcast_dims_.end()) {
absl::flat_hash_set<int64_t> reshape_broadcast_dims;
for (int64_t i = 0; i < hlo->shape().rank(); ++i) {
reshape_broadcast_dims.insert(i);
}
std::vector<int64_t> before_dim_size_stack;
std::vector<int64_t> after_dim_size_stack;
const int64_t operand0_rank = hlo->operand(0)->shape().rank();
const int64_t hlo_shape_rank = hlo->shape().rank();
before_dim_size_stack.reserve(operand0_rank);
after_dim_size_stack.reserve(hlo_shape_rank);
for (int64_t i = operand0_rank - 1; i >= 0; --i) {
before_dim_size_stack.push_back(hlo->operand(0)->shape().dimensions(i));
}
for (int64_t i = hlo_shape_rank - 1; i >= 0; --i) {
after_dim_size_stack.push_back(hlo->shape().dimensions(i));
}
while (!before_dim_size_stack.empty() && !after_dim_size_stack.empty()) {
int64_t before_size = before_dim_size_stack.back();
int64_t after_size = after_dim_size_stack.back();
int64_t current_before_dim =
hlo->operand(0)->shape().rank() - before_dim_size_stack.size();
int64_t current_after_dim =
hlo->shape().rank() - after_dim_size_stack.size();
before_dim_size_stack.pop_back();
after_dim_size_stack.pop_back();
if (!it->second.contains(current_before_dim)) {
reshape_broadcast_dims.erase(current_after_dim);
}
if (before_size == after_size) {
continue;
}
if (before_size % after_size == 0) {
before_dim_size_stack.push_back(before_size / after_size);
} else if (after_size % before_size == 0) {
after_dim_size_stack.push_back(after_size / before_size);
} else {
for (int64_t i = current_after_dim; i < hlo->shape().rank(); ++i) {
reshape_broadcast_dims.erase(i);
}
break;
}
}
if (!before_dim_size_stack.empty() || !after_dim_size_stack.empty()) {
reshape_broadcast_dims.clear();
}
if (!reshape_broadcast_dims.empty()) {
broadcast_dims_[hlo] = std::move(reshape_broadcast_dims);
}
}
}
if (hlo->opcode() == HloOpcode::kSlice ||
hlo->opcode() == HloOpcode::kDynamicSlice) {
auto it = broadcast_dims_.find(hlo->operand(0));
if (it != broadcast_dims_.end()) {
auto dims = it->second;
broadcast_dims_[hlo] = std::move(dims);
}
}
if (hlo->opcode() == HloOpcode::kPad) {
auto it = broadcast_dims_.find(hlo->operand(0));
if (it != broadcast_dims_.end()) {
absl::flat_hash_set<int64_t> pad_broadcast_dims;
for (int64_t i = 0; i < hlo->shape().rank(); ++i) {
const auto& dim = hlo->padding_config().dimensions(i);
if (dim.edge_padding_low() == 0 && dim.edge_padding_high() == 0 &&
dim.interior_padding() == 0 && it->second.contains(i)) {
pad_broadcast_dims.insert(i);
}
}
if (!pad_broadcast_dims.empty()) {
broadcast_dims_[hlo] = std::move(pad_broadcast_dims);
}
}
}
return hlo;
}
PartitionedHlo PartitionedHlo::Reshard(const HloSharding& target,
std::optional<Literal> pad_value) const {
if (sharding() == target) {
return *this;
}
if (hlo()->opcode() == HloOpcode::kConstant && !sharding().IsManual() &&
target.IsManual()) {
PartitionedHlo pconstant = this->Reshard(HloSharding::Replicate());
pconstant.hlo()->set_sharding(target);
return pconstant;
}
auto& cache = state_.reshard_cache->per_hlo_cache[hlo()].reshard_cache;
const bool replace_cache = pad_value.has_value();
const bool is_to_replicate =
hlo_->shape().IsArray() && target.NumTiles() < sharding().NumTiles();
const bool use_cache =
!is_to_replicate || state_.partitioner->options().cache_all_gather;
if (!replace_cache && use_cache) {
auto it = cache.find(target);
if (it != cache.end()) {
return it->second;
}
}
auto resharded = ReshardNoCache(target, std::move(pad_value));
{
auto& cache =
state_.reshard_cache->per_hlo_cache[resharded.hlo()].reshard_cache;
cache.insert_or_assign(sharding(), *this);
}
if (use_cache) {
auto& cache = state_.reshard_cache->per_hlo_cache[hlo()].reshard_cache;
auto [it, _] = cache.insert_or_assign(target, std::move(resharded));
return it->second;
}
return resharded;
}
PartitionedHlo PartitionedHlo::ReshardNoCache(
const HloSharding& target, std::optional<Literal> pad_value,
bool allow_full_replication) const {
VLOG(2) << "Resharding " << hlo_->ToString() << " from "
<< hlo_->sharding().ToString() << " to " << target.ToString();
const Shape& shape = hlo_->shape();
if (shape.element_type() == TOKEN) {
return *this;
}
CHECK(shape.IsTuple() || !target.IsTuple());
if (shape.IsTuple() && !target.IsTuple()) {
return Reshard(target.GetTupleSharding(shape).value());
}
if (shape.IsTuple()) {
std::vector<HloInstruction*> elements;
for (int64_t i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) {
auto subshape = ShapeUtil::GetTupleElementShape(shape, i);
auto element = state_.b->AddInstruction(
HloInstruction::CreateGetTupleElement(subshape, hlo(), i));
element->set_sharding(sharding().GetSubSharding(shape, {i}));
elements.push_back(
PartitionedHlo(
element, ShapeUtil::GetTupleElementShape(base_shape_, i), state_)
.Reshard(target.GetSubSharding(shape, {i}))
.hlo());
}
auto tuple =
state_.b->AddInstruction(HloInstruction::CreateTuple(elements));
tuple->set_sharding(target);
return PartitionedHlo(tuple, base_shape_, state_);
}
if (sharding() == target) {
return *this;
}
CHECK_EQ(target.IsManualSubgroup(), sharding().IsManualSubgroup());
if (sharding().IsManualSubgroup()) {
auto grouped = hlo_sharding_util::GetManualSubgroupSharding(sharding());
auto target_grouped = AlignGroupsWithIfCompatible(
hlo_sharding_util::GetManualSubgroupSharding(target), grouped);
CHECK(target_grouped.has_value())
<< "Resharding target has incompatible sharding subgroups. From "
<< sharding().ToString() << " to " << target.ToString();
HloSharding original_sharding = sharding();
hlo_->set_sharding(grouped.sharding);
HloInstruction* partitioned =
PartitionedHlo(hlo_, base_shape_,
CreatePerGroupPartitioningState(
state(), grouped.device_groups, state_.b))
.ReshardNoCache(target_grouped->sharding)
.hlo();
hlo_->set_sharding(original_sharding);
partitioned->set_sharding(target);
return PartitionedHlo(partitioned, base_shape_, state_);
}
if (CanReshardWithCollectivePermute(sharding(), target)) {
return ReshardWithCollectivePermute(target);
}
if (auto src_tgt_dims =
GetReshardAllToAllSourceTargetDims(sharding(), target)) {
return ReshardWithAllToAll(target, *src_tgt_dims);
}
if (!target.IsTileMaximal() && sharding().ReplicateOnLastTileDim()) {
auto try_reshard = ReshardFromPartialReplicateWithDynamicSlice(target);
if (try_reshard.has_value()) {
return try_reshard.value();
}
try_reshard = ReshardPartialReplicateWithAllToAll(target);
if (try_reshard.has_value()) {
return try_reshard.value();
}
}
if (!sharding().IsTileMaximal() && target.ReplicateOnLastTileDim()) {
auto try_reshard = ReshardToPartialReplicateWithAllGather(target);
if (try_reshard.has_value()) {
return try_reshard.value();
}
try_reshard = ReshardPartialReplicateWithAllToAll(target);
if (try_reshard.has_value()) {
return try_reshard.value();
}
}
if (!sharding().IsReplicated()) {
if (!target.IsReplicated()) {
if (sharding().IsTiled() && target.IsTiled()) {
auto reshard = TryComplexReshardHandling(target);
if (reshard.has_value()) {
return reshard.value();
}
std::vector<int64_t> equal_dims;
for (int64_t dim = 0; dim < hlo_->shape().rank(); ++dim) {
if (sharding().tile_assignment().dim(dim) == 1 ||
target.tile_assignment().dim(dim) !=
sharding().tile_assignment().dim(dim)) {
continue;
}
equal_dims.push_back(dim);
}
if (!equal_dims.empty()) {
auto grouped =
hlo_sharding_util::GroupShardingOnDims(sharding(), equal_dims);
auto grouped_target = AlignGroupsWith(
hlo_sharding_util::GroupShardingOnDims(target, equal_dims),
grouped);
Shape inner_base_shape = base_shape_;
for (int64_t dim : equal_dims) {
inner_base_shape.set_dimensions(dim, hlo_->shape().dimensions(dim));
}
auto state = CreatePerGroupPartitioningState(
state_, grouped.device_groups, state_.b);
HloInstruction* copy =
state_.b->AddInstruction(HloInstruction::CreateUnary(
hlo_->shape(), HloOpcode::kCopy, hlo_));
copy->set_sharding(grouped.sharding);
HloInstruction* resharded =
PartitionedHlo(copy, inner_base_shape, state)
.ReshardNoCache(grouped_target.sharding)
.hlo();
resharded->set_sharding(
hlo_sharding_util::UngroupSharding(grouped_target));
return PartitionedHlo(resharded, base_shape_, state_)
.ReshardNoCache(target);
}
}
if (!allow_full_replication) {
return *this;
}
LOG(ERROR)
<< "[spmd] Involuntary full rematerialization. The compiler was "
"not able to go from sharding "
<< sharding().ToString(true) << " to "
<< target.ToString(true)
<< " without doing a full rematerialization of the tensor for HLO "
"operation: "
<< hlo_->ToString()
<< ". You probably want to enrich the sharding annotations to "
"prevent "
"this from happening.";
}
return Replicate().Reshard(target);
}
if (target.IsTileMaximal()) {
auto copy = state_.b->AddInstruction(
HloInstruction::CreateUnary(hlo_->shape(), HloOpcode::kCopy, hlo_));
copy->set_sharding(target);
return PartitionedHlo(copy, base_shape_, state_);
}
if (target.ReplicateOnLastTileDim()) {
std::vector<int64_t> group_dims(target.tile_assignment().num_dimensions() -
1);
std::iota(group_dims.begin(), group_dims.end(), 0);
auto target_grouped =
hlo_sharding_util::GroupShardingOnDims(target, group_dims);
auto partially_sharded = PerGroupSliceFromReplicated(
hlo_, state_.partition_id, target_grouped.device_groups, group_dims,
target_grouped.group_dim_sizes, state_.b);
partially_sharded->set_sharding(target);
return PartitionedHlo(partially_sharded, base_shape(), state_);
}
auto padded_hlo = PadBaseShapeBeforeUnevenTiledSharding(
hlo_, target, state_.b, std::move(pad_value));
auto shard_shape = MakePartitionedShape(shape, target);
auto slice = state_.b->AddInstruction(HloInstruction::CreateDynamicSlice(
shard_shape, padded_hlo,
MakePartitionOffsets(shape, target, state_.partition_id, state_.b),
shard_shape.dimensions()));
slice->set_sharding(target);
return PartitionedHlo(slice, base_shape_, state_);
}
PartitionedHlo PartitionedHlo::PadWithValue(
HloInstruction* pad_value, absl::Span<const int64_t> left_padded_dims,
absl::Span<const int64_t> skipped_dims) const {
HloInstruction* result =
PadWithValueHlo(pad_value, left_padded_dims, skipped_dims);
if (hlo_ != result) {
result->set_sharding(hlo_->sharding());
}
return PartitionedHlo(result, base_shape_, state_);
}
HloInstruction* PartitionedHlo::PadWithValueHlo(
HloInstruction* pad_value, absl::Span<const int64_t> left_padded_dims,
absl::Span<const int64_t> skipped_dims) const {
const HloSharding& sharding = hlo_->sharding();
const Shape& shape = hlo_->shape();
CHECK(!shape.IsTuple() && shape.element_type() != TOKEN);
if (sharding.IsReplicated() || EvenlyPartitions(base_shape_, sharding)) {
return hlo_;
}
CHECK(!sharding.IsTileMaximal());
auto index_shape = ShapeUtil::ChangeElementType(shape, S32);
auto mask_shape = ShapeUtil::ChangeElementType(index_shape, PRED);
auto get_mask_for_dim = [&](int64_t dim, HloInstruction* start_index) {
auto iota =
state_.b->AddInstruction(HloInstruction::CreateIota(index_shape, dim));
auto broadcast_start_index = state_.b->AddInstruction(
HloInstruction::CreateBroadcast(index_shape, start_index, {}));
auto index_in_full_shape =
state_.b->AddInstruction(HloInstruction::CreateBinary(
index_shape, HloOpcode::kAdd, iota, broadcast_start_index));
ComparisonDirection direction = ComparisonDirection::kLt;
int64_t index_limit = base_shape_.dimensions(dim);
if (absl::c_linear_search(left_padded_dims, dim)) {
direction = ComparisonDirection::kGe;
index_limit =
index_shape.dimensions(dim) * sharding.tile_assignment().dim(dim) -
index_limit;
}
auto limit = state_.b->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(index_limit)));
auto broadcast_limit = state_.b->AddInstruction(
HloInstruction::CreateBroadcast(index_shape, limit, {}));
return state_.b->AddInstruction(HloInstruction::CreateCompare(
mask_shape, index_in_full_shape, broadcast_limit, direction));
};
HloInstruction* mask = nullptr;
auto offsets = MakePartitionOffsets(base_shape_, sharding,
state_.partition_id, state_.b);
for (int64_t i = 0; i < shape.rank(); ++i) {
if (base_shape_.dimensions(i) % sharding.tile_assignment().dim(i) == 0 ||
absl::c_linear_search(skipped_dims, i)) {
continue;
}
if (mask == nullptr) {
mask = get_mask_for_dim(i, offsets[i]);
} else {
mask = state_.b->AddInstruction(
HloInstruction::CreateBinary(mask->shape(), HloOpcode::kAnd, mask,
get_mask_for_dim(i, offsets[i])));
}
}
if (mask == nullptr) {
return hlo_;
}
auto broadcast_pad_value = state_.b->AddInstruction(
HloInstruction::CreateBroadcast(shape, pad_value, {}));
return state_.b->AddInstruction(HloInstruction::CreateTernary(
shape, HloOpcode::kSelect, mask, hlo_, broadcast_pad_value));
}
PartitionedHlo PartitionedHlo::PadWithZero(
absl::Span<const int64_t> left_padded_dims,
absl::Span<const int64_t> skipped_dims) const {
auto zero = state_.b->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(hlo_->shape().element_type())));
return PadWithValue(zero, left_padded_dims, skipped_dims);
}
std::optional<PartitionedHlo::WindowedInputShardReturnValue>
PartitionedHlo::ReshardAsWindowedInput(const Window& window,
const HloSharding& target,
HloInstruction* pad_value,
bool mask_invalid_region,
bool force_mask_in_compact) {
auto& cache = state_.reshard_cache->per_hlo_cache[hlo()].window_reshard_cache;
for (auto& entry : cache) {
if (std::get<0>(entry) == target &&
protobuf_util::ProtobufEquals(std::get<1>(entry), window)) {
return std::get<2>(entry);
}
}
auto update_cache = [&](WindowedInputShardReturnValue result) {
cache.emplace_back(target, window, std::move(result));
return std::get<2>(cache.back());
};
VLOG(2) << "ReshardAsWindowedInput()\n"
<< "\twindow:" << window_util::ToString(window)
<< "\ttarget sharding:" << target.ToString();
CHECK(!target.IsTileMaximal());
auto partition_ordinals =
MakeTiledPartitionOrdinals(target, state_.partition_id, state_.b);
auto shard_shape = base_shape_;
std::vector<MultiplyAddDivideOffsetCalculation> start_on_padded_calculations(
base_shape_.rank());
std::vector<MultiplyAddDivideOffsetCalculation> limit_on_padded_calculations(
base_shape_.rank());
std::vector<HloInstruction*> dynamic_slice_offset_on_output(
base_shape_.rank(), nullptr);
Window shard_window = window;
Shape padded_shape = base_shape_;
std::vector<HloInstruction*> offsets_on_padded_shape(base_shape_.rank());
std::vector<int64_t> per_shard_window_counts(base_shape_.rank());
std::vector<int64_t> explicit_left_padding(base_shape_.rank(), 0);
std::vector<int64_t> trimmed_target_sharding_tile_shape(base_shape_.rank());
std::vector<std::pair<int64_t, int64_t>> trimmed_target_sharding_middle_range(
base_shape_.rank(), std::pair<int64_t, int64_t>(-1, -1));
bool trimmed_shards = false;
std::vector<int64_t> dims_needs_pre_masking;
Shape halo_exchange_base_shape = base_shape_;
bool trimmed_in_shard = false;
std::vector<int64_t> pre_halo_exchange_slice_starts(base_shape_.rank(), 0);
std::vector<int64_t> pre_halo_exchange_slice_limits(
hlo_->shape().dimensions().begin(), hlo_->shape().dimensions().end());
std::vector<bool> can_leave_dimension_partitioned(base_shape_.rank(), false);
for (int64_t i = 0; i < base_shape_.rank(); ++i) {
can_leave_dimension_partitioned[i] =
window_util::IsTrivialWindowDimension(window.dimensions(i));
}
for (int64_t i = 0; i < base_shape_.rank(); ++i) {
int64_t shard_count = target.tile_assignment().dim(i);
trimmed_target_sharding_tile_shape[i] = shard_count;
if (shard_count == 1) {
offsets_on_padded_shape[i] = state_.b->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(S32)));
shard_shape.set_dimensions(
i, CeilOfRatio(base_shape_.dimensions(i), shard_count));
continue;
}
if (can_leave_dimension_partitioned[i]) {
int64_t shard_size = CeilOfRatio(base_shape_.dimensions(i), shard_count);
padded_shape.set_dimensions(i, shard_size * shard_count);
offsets_on_padded_shape[i] =
state_.b->AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(S32, {}), HloOpcode::kMultiply,
partition_ordinals[i],
state_.b->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(shard_size)))));
shard_shape.set_dimensions(i, shard_size);
continue;
}
const WindowDimension& wd = window.dimensions(i);
WindowDimension* swd = shard_window.mutable_dimensions(i);
const int64_t dilated_size = 1 + (wd.size() - 1) * wd.window_dilation();
const int64_t full_size =
1 + (base_shape_.dimensions(i) - 1) * wd.base_dilation() +
wd.padding_high() + wd.padding_low();
int64_t window_count = (full_size - dilated_size) / wd.stride() + 1;
per_shard_window_counts[i] = CeilOfRatio(window_count, shard_count);
int64_t input_shard_size = hlo_->shape().dimensions(i);
if (window_count < shard_count && wd.window_dilation() == 1 &&
wd.base_dilation() == 1) {
int64_t useful_input_shards = CeilOfRatio(
base_shape_.dimensions(i) + wd.padding_high(), input_shard_size);
if (useful_input_shards < shard_count) {
shard_count = std::max<int64_t>(useful_input_shards, window_count);
trimmed_shards = true;
trimmed_target_sharding_tile_shape[i] = shard_count;
if (shard_count == 1) {
offsets_on_padded_shape[i] = state_.b->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(S32)));
swd->set_padding_high(base_shape_.dimensions(i) + wd.padding_high() -
hlo_->shape().dimensions(i));
continue;
}
halo_exchange_base_shape.set_dimensions(i,
input_shard_size * shard_count);
if (input_shard_size * shard_count > base_shape_.dimensions(i) &&
wd.padding_high() > 0) {
dims_needs_pre_masking.push_back(i);
} else if (wd.padding_high() < 0 &&
full_size - wd.padding_low() < input_shard_size) {
input_shard_size = full_size - wd.padding_low();
halo_exchange_base_shape.set_dimensions(
i, input_shard_size * shard_count);
pre_halo_exchange_slice_limits[i] = input_shard_size;
trimmed_in_shard = true;
}
}
}
explicit_left_padding[i] = wd.padding_low() / wd.base_dilation();
swd->set_padding_low(wd.padding_low() % wd.base_dilation());
swd->set_padding_high(0);
if (window_count < shard_count && wd.window_dilation() == 1 &&
wd.base_dilation() == 1) {
int64_t middle_empty_shards =
(-explicit_left_padding[i]) / input_shard_size - window_count;
if (middle_empty_shards > 0) {
shard_count -= middle_empty_shards;
CHECK_GT(shard_count, 1);
trimmed_target_sharding_middle_range[i].first = window_count;
trimmed_target_sharding_middle_range[i].second = middle_empty_shards;
trimmed_shards = true;
trimmed_target_sharding_tile_shape[i] = shard_count;
explicit_left_padding[i] += middle_empty_shards * input_shard_size;
halo_exchange_base_shape.set_dimensions(i,
input_shard_size * shard_count);
HloInstruction* ordinal = partition_ordinals[i];
HloInstruction* left_count = CreateR0WithType<int32_t>(
ordinal->shape().element_type(), window_count, state_.b);
HloInstruction* on_left =
state_.b->AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::ChangeElementType(ordinal->shape(), PRED), ordinal,
left_count, ComparisonDirection::kLt));
HloInstruction* right_ordinal =
state_.b->AddInstruction(HloInstruction::CreateBinary(
ordinal->shape(), HloOpcode::kSubtract, ordinal, left_count));
partition_ordinals[i] =
state_.b->AddInstruction(HloInstruction::CreateTernary(
partition_ordinals[i]->shape(), HloOpcode::kSelect, on_left,
partition_ordinals[i], right_ordinal));
if (-explicit_left_padding[i] > input_shard_size * (shard_count - 1)) {
int64_t skip_amount =
-explicit_left_padding[i] - input_shard_size * (shard_count - 1);
input_shard_size -= skip_amount;
explicit_left_padding[i] += skip_amount * shard_count;
pre_halo_exchange_slice_starts[i] = skip_amount;
trimmed_in_shard = true;
if (full_size < input_shard_size) {
skip_amount = input_shard_size - full_size;
pre_halo_exchange_slice_limits[i] -= skip_amount;
explicit_left_padding[i] += skip_amount * (shard_count - 1);
input_shard_size = full_size;
}
halo_exchange_base_shape.set_dimensions(
i, input_shard_size * shard_count);
}
}
}
if (full_size < dilated_size) {
VLOG(2) << "Failed to reshard window operand because the window size is "
"larger than padded base size";
return std::nullopt;
}
if (wd.stride() != 1 &&
(wd.stride() * per_shard_window_counts[i]) % wd.base_dilation() != 0) {
VLOG(2) << "Failed to reshard window operand due to non-trivial dilation";
return std::nullopt;
}
start_on_padded_calculations[i] = MultiplyAddDivideOffsetCalculation(
wd.stride() * per_shard_window_counts[i],
wd.base_dilation() - 1 - swd->padding_low(), wd.base_dilation());
int64_t dilated_shard_size =
wd.stride() * (per_shard_window_counts[i] - 1) + dilated_size;
limit_on_padded_calculations[i] = MultiplyAddDivideOffsetCalculation(
wd.stride() * per_shard_window_counts[i],
dilated_shard_size + wd.base_dilation() - 1 - swd->padding_low(),
wd.base_dilation());
offsets_on_padded_shape[i] = start_on_padded_calculations[i].Calculate(
partition_ordinals[i], state_.b);
auto shard_size_function =
limit_on_padded_calculations[i] - start_on_padded_calculations[i];
int64_t max_shard_size = shard_size_function.MaxInRange(0, shard_count);
shard_shape.set_dimensions(i, max_shard_size);
padded_shape.set_dimensions(
i, limit_on_padded_calculations[i].Calculate(shard_count - 1));
if (wd.base_dilation() != 1) {
auto get_first_valid_element_offset_on_dilated_shard =
[&](int64_t shard_ordinal) {
return start_on_padded_calculations[i].Calculate(shard_ordinal) *
wd.base_dilation() +
swd->padding_low() -
wd.stride() * per_shard_window_counts[i] * shard_ordinal;
};
CHECK_EQ(get_first_valid_element_offset_on_dilated_shard(0),
swd->padding_low());
for (int64_t shard_ordinal = 0; shard_ordinal < shard_count;
++shard_ordinal) {
int64_t wanted_limit_on_dilated_shard =
wd.stride() * (per_shard_window_counts[i] - 1) + dilated_size;
int64_t actual_limit_on_dilated_shard_without_pad_high =
get_first_valid_element_offset_on_dilated_shard(shard_ordinal) +
(max_shard_size - 1) * wd.base_dilation() + 1;
swd->set_padding_high(std::max<int64_t>(
swd->padding_high(),
wanted_limit_on_dilated_shard -
actual_limit_on_dilated_shard_without_pad_high));
}
if (wd.stride() == 1) {
int64_t max_pad_low =
get_first_valid_element_offset_on_dilated_shard(0);
bool all_same = true;
for (int64_t shard_ordinal = 1; shard_ordinal < shard_count;
++shard_ordinal) {
int64_t start =
get_first_valid_element_offset_on_dilated_shard(shard_ordinal);
if (start != swd->padding_low()) {
all_same = false;
}
max_pad_low = std::max(max_pad_low, start);
}
if (!all_same) {
auto start_on_padded_input =
start_on_padded_calculations[i].Calculate(partition_ordinals[i],
state_.b);
auto first_window_minus_max_pad_low =
MultiplyAddDivideOffsetCalculation(
wd.base_dilation(), swd->padding_low() - max_pad_low, 1)
.Calculate(start_on_padded_input, state_.b);
auto required_first_window =
MultiplyAddDivideOffsetCalculation(per_shard_window_counts[i], 0,
1)
.Calculate(partition_ordinals[i], state_.b);
dynamic_slice_offset_on_output[i] =
state_.b->AddInstruction(HloInstruction::CreateBinary(
required_first_window->shape(), HloOpcode::kSubtract,
required_first_window, first_window_minus_max_pad_low));
}
swd->set_padding_low(max_pad_low);
} else {
if ((wd.stride() * per_shard_window_counts[i]) % wd.base_dilation() !=
0) {
return std::nullopt;
}
}
}
}
auto get_dynamic_slice_offset_on_output_if_needed =
[&]() -> std::optional<std::vector<HloInstruction*>> {
if (absl::c_all_of(
dynamic_slice_offset_on_output,
[](HloInstruction* offset) { return offset == nullptr; })) {
return std::nullopt;
}
auto zero = state_.b->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(S32)));
for (int64_t i = 0; i < dynamic_slice_offset_on_output.size(); ++i) {
if (dynamic_slice_offset_on_output[i] == nullptr) {
dynamic_slice_offset_on_output[i] = zero;
}
}
return dynamic_slice_offset_on_output;
};
auto handle_all_windowed_dimensions_are_replicated = [&]() {
PaddingConfig padding_config;
auto pad_hlo_shape = padded_shape;
for (int64_t i = 0; i < base_shape_.rank(); ++i) {
auto padding_config_dim = padding_config.add_dimensions();
padding_config_dim->set_interior_padding(0);
if (target.tile_assignment().dim(i) == 1 ||
(can_leave_dimension_partitioned[i] && !sharding().IsReplicated())) {
padding_config_dim->set_edge_padding_low(0);
padding_config_dim->set_edge_padding_high(0);
pad_hlo_shape.set_dimensions(i, hlo_->shape().dimensions(i));
} else {
padding_config_dim->set_edge_padding_low(explicit_left_padding[i]);
padding_config_dim->set_edge_padding_high(padded_shape.dimensions(i) -
explicit_left_padding[i] -
base_shape_.dimensions(i));
}
}
auto padded_hlo =
ShapeUtil::Compatible(pad_hlo_shape, base_shape_)
? hlo_
: state_.b->AddInstruction(HloInstruction::CreatePad(
pad_hlo_shape, hlo_, pad_value, padding_config));
auto sharded_input =
state_.b->AddInstruction(HloInstruction::CreateDynamicSlice(
shard_shape, padded_hlo, offsets_on_padded_shape,
shard_shape.dimensions()));
return update_cache(WindowedInputShardReturnValue{
sharded_input, shard_window,
get_dynamic_slice_offset_on_output_if_needed()});
};
auto sharding_with_windowed_dims_replicated =
GetShardingReplicatedOnWindowedDimension(target, window);
if (sharding().IsReplicated() ||
(target != sharding() &&
sharding_with_windowed_dims_replicated == sharding())) {
return handle_all_windowed_dimensions_are_replicated();
}
if (target != sharding() &&
sharding_with_windowed_dims_replicated != sharding()) {
return Reshard(target).ReshardAsWindowedInput(window, target, pad_value);
}
if (Product(trimmed_target_sharding_tile_shape) == 1) {
return update_cache(WindowedInputShardReturnValue{
hlo_, shard_window, get_dynamic_slice_offset_on_output_if_needed()});
}
if (target.ReplicateOnLastTileDim()) {
trimmed_target_sharding_tile_shape.push_back(
target.tile_assignment().dimensions().back());
}
std::optional<HloSharding> trimmed_target;
const HloSharding* halo_exchange_target = ⌖
if (trimmed_shards) {
Array<int64_t> trimmed_devices(trimmed_target_sharding_tile_shape);
trimmed_devices.Each([&](absl::Span<const int64_t> indices, int64_t* d) {
std::vector<int64_t> target_indices(indices.begin(), indices.end());
for (int64_t i = 0; i < base_shape_.rank(); ++i) {
const auto& range = trimmed_target_sharding_middle_range[i];
if (range.first >= 0 && indices[i] >= range.first) {
target_indices[i] += range.second;
}
}
*d = target.tile_assignment()(target_indices);
});
trimmed_target = target.ReplicateOnLastTileDim()
? HloSharding::PartialTile(trimmed_devices)
: HloSharding::Tile(trimmed_devices);
halo_exchange_target = &*trimmed_target;
}
HloInstruction* visiting_hlo = hlo_;
if (!dims_needs_pre_masking.empty()) {
std::vector<int64_t> skipped_dims;
for (int dim = 0; dim < base_shape_.rank(); ++dim) {
if (!absl::c_linear_search(dims_needs_pre_masking, dim)) {
skipped_dims.push_back(dim);
}
}
visiting_hlo = PadWithValueHlo(pad_value, {},
skipped_dims);
}
if (trimmed_in_shard) {
std::vector<int64_t> slice_sizes(halo_exchange_base_shape.rank());
for (int64_t i = 0; i < slice_sizes.size(); ++i) {
slice_sizes[i] =
pre_halo_exchange_slice_limits[i] - pre_halo_exchange_slice_starts[i];
}
visiting_hlo = state_.b->AddInstruction(HloInstruction::CreateSlice(
ShapeUtil::MakeShape(halo_exchange_base_shape.element_type(),
slice_sizes),
visiting_hlo,
pre_halo_exchange_slice_starts,
pre_halo_exchange_slice_limits,
std::vector<int64_t>(halo_exchange_base_shape.rank(), 1)));
}
for (int dim = 0; dim < base_shape_.rank(); ++dim) {
int64_t shard_count = halo_exchange_target->tile_assignment().dim(dim);
if (shard_count == 1 || can_leave_dimension_partitioned[dim]) {
continue;
}
int64_t input_shard_size =
CeilOfRatio(halo_exchange_base_shape.dimensions(dim), shard_count);
MultiplyAddDivideOffsetCalculation shard_limit_of_previous_on_padded(
input_shard_size, explicit_left_padding[dim], 1);
OffsetCalculation left_halo_size_functions =
shard_limit_of_previous_on_padded - start_on_padded_calculations[dim];
MultiplyAddDivideOffsetCalculation shard_start_of_next_on_padded(
input_shard_size, input_shard_size + explicit_left_padding[dim], 1);
OffsetCalculation right_halo_size_functions =
limit_on_padded_calculations[dim] - shard_start_of_next_on_padded;
auto resharded = ExchangeHaloAndGetValidData(
visiting_hlo, halo_exchange_base_shape, left_halo_size_functions,
right_halo_size_functions, explicit_left_padding[dim],
padded_shape.dimensions(dim), shard_shape.dimensions(dim), dim,
*halo_exchange_target, offsets_on_padded_shape[dim], pad_value,
partition_ordinals[dim], state_.collective_ops_creator,
state_.next_channel_id, state_.b, mask_invalid_region,
force_mask_in_compact);
if (!resharded) {
VLOG(1) << "ReshardAsWindowedInput failed without replicate first: halo "
"is beyond the neighbor.";
if (sharding_with_windowed_dims_replicated == sharding()) {
return handle_all_windowed_dimensions_are_replicated();
}
return Reshard(sharding_with_windowed_dims_replicated)
.ReshardAsWindowedInput(window, target, pad_value);
}
visiting_hlo = *resharded;
}
return update_cache(WindowedInputShardReturnValue{
visiting_hlo, shard_window,
get_dynamic_slice_offset_on_output_if_needed()});
}
PartitionedHlo PartitionedHlo::Replicate() const {
auto& cache = state_.reshard_cache->per_hlo_cache[hlo()].reshard_cache;
if (state_.partitioner->options().cache_all_gather) {
for (auto& entry : cache) {
if (entry.first.IsReplicated()) {
return entry.second;
}
}
}
const HloSharding sharding = hlo_->sharding();
const Shape& shape = hlo_->shape();
CHECK(!shape.IsTuple() && shape.element_type() != TOKEN);
if (sharding.IsReplicated()) {
return *this;
}
for (auto& entry : cache) {
if (entry.first.IsReplicated()) {
return entry.second;
}
}
auto update_cache = [&](PartitionedHlo resharded) {
state_.reshard_cache->per_hlo_cache[resharded.hlo()]
.reshard_cache.insert_or_assign(sharding, *this);
auto& cache = state_.reshard_cache->per_hlo_cache[hlo()].reshard_cache;
if (state_.partitioner->options().cache_all_gather) {
auto [it, _] = cache.insert_or_assign(HloSharding::Replicate(),
std::move(resharded));
return it->second;
}
return resharded;
};
if (sharding.IsTileMaximal()) {
return update_cache(Broadcast());
}
std::vector<int64_t> all_dims(shape.rank());
std::iota(all_dims.begin(), all_dims.end(), 0);
HloInstruction* result = ReplicatePartial(all_dims);
result->set_sharding(HloSharding::Replicate());
return update_cache(PartitionedHlo(result, base_shape_, state_));
}
HloInstruction* PartitionedHlo::ReplicatePartial(
absl::Span<const int64_t> dims) const {
CHECK(!sharding().IsTileMaximal());
const Shape& shard_shape = hlo()->shape();
Shape final_result_shape = shard_shape;
Shape ag_result_shape = shard_shape;
std::vector<int64_t> broadcast_dims;
std::vector<int64_t> dus_ar_dims;
std::vector<int64_t> ag_dims;
for (int64_t i : dims) {
int64_t partitions = sharding().tile_assignment().dim(i);
if (partitions == 1) {
continue;
}
final_result_shape.set_dimensions(i, base_shape().dimensions(i));
if (base_shape().dimensions(i) == shard_shape.dimensions(i)) {
broadcast_dims.push_back(i);
} else if (base_shape().dimensions(i) <= partitions / 2) {
dus_ar_dims.push_back(i);
} else {
ag_result_shape.set_dimensions(i, base_shape().dimensions(i));
ag_dims.push_back(i);
}
}
HloInstruction* broadcast = hlo_;
if (!broadcast_dims.empty()) {
std::vector<int64_t> other_dims;
for (int64_t i = 0; i < sharding().tile_assignment().num_dimensions();
++i) {
if (!absl::c_linear_search(broadcast_dims, i)) {
other_dims.push_back(i);
}
}
HloSharding original_sharding = sharding();
auto grouped =
hlo_sharding_util::GroupShardingOnDims(original_sharding, other_dims);
std::vector<int64_t> dev_indices(
grouped.sharding.tile_assignment().num_dimensions(), 0);
hlo_->set_sharding(HloSharding::AssignDevice(
grouped.sharding.tile_assignment()(dev_indices)));
auto per_group_partitioner_state = CreatePerGroupPartitioningState(
state(), grouped.device_groups, state().b);
auto partial_replicate_hlo =
PartitionedHlo(hlo_, shard_shape, per_group_partitioner_state)
.Broadcast();
hlo_->set_sharding(original_sharding);
partial_replicate_hlo.hlo()->clear_sharding();
broadcast = partial_replicate_hlo.hlo();
}
if (ag_dims.empty() && dus_ar_dims.empty()) {
return broadcast;
}
HloInstruction* result = nullptr;
if (state_.collective_ops_creator.create_cross_partition_all_gather) {
result = state_.partitioner->AllGatherShards(
state_.b, broadcast, sharding(), state_.next_channel_id, ag_dims,
state_.collective_ops_creator);
}
if (result == nullptr) {
dus_ar_dims.insert(dus_ar_dims.end(), ag_dims.begin(), ag_dims.end());
result = broadcast;
} else {
if (!ShapeUtil::Compatible(result->shape(), ag_result_shape)) {
std::vector<int64_t> start_indices(ag_result_shape.rank(), 0);
std::vector<int64_t> strides(ag_result_shape.rank(), 1);
result = state_.b->AddInstruction(
HloInstruction::CreateSlice(ag_result_shape, result, start_indices,
ag_result_shape.dimensions(), strides));
}
}
if (!dus_ar_dims.empty()) {
auto zero = state_.b->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(shard_shape.element_type())));
std::vector<int64_t> masking_dims;
for (int64_t dim : dus_ar_dims) {
if (shard_shape.dimensions(dim) * sharding().tile_assignment().dim(dim) !=
base_shape().dimensions(dim)) {
masking_dims.push_back(dim);
}
}
if (!masking_dims.empty()) {
std::vector<int64_t> skipped_dims;
for (int64_t i = 0; i < base_shape().rank(); ++i) {
if (!absl::c_linear_search(masking_dims, i)) {
skipped_dims.push_back(i);
}
}
result->copy_sharding(hlo_);
result = PartitionedHlo(result, final_result_shape, state_)
.PadWithValue(zero,
{},
skipped_dims)
.hlo();
}
auto zero_bcast = state_.b->AddInstruction(
HloInstruction::CreateBroadcast(final_result_shape, zero, {}));
auto offsets = MakePartitionOffsets(
final_result_shape,
hlo_sharding_util::PartiallyReplicateTiledShardingOnAllDimsExcept(
sharding(), dus_ar_dims),
state_.partition_id, state_.b, dus_ar_dims);
auto dus =
state_.b->AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
final_result_shape, zero_bcast, result, offsets));
HloComputation* reduction =
MakeBinaryAdd(shard_shape.element_type(), state_.module);
result = state_.partitioner->AllReduceAlongShardingDims(
state_.b, dus, sharding(), state_.next_channel_id, dus_ar_dims,
state_.collective_ops_creator, reduction);
}
return result;
}
std::optional<PartitionedHlo>
PartitionedHlo::ReshardToPartialReplicateWithAllGather(
const HloSharding& target) const {
if (!target.ReplicateOnLastTileDim()) {
return std::nullopt;
}
auto compatible_sharding =
PartialReplicateReshardCompatibleSharding(target, sharding());
if (!compatible_sharding.has_value()) {
return std::nullopt;
}
const auto& temp_sharding = compatible_sharding.value();
auto partitioned_hlo = *this;
if (CanReshardWithCollectivePermute(sharding(), temp_sharding)) {
partitioned_hlo =
partitioned_hlo.ReshardWithCollectivePermute(temp_sharding);
}
int64_t rank = hlo_->shape().rank();
std::vector<int64_t> replicate_dims;
std::vector<int64_t> replicate_factors;
for (int64_t dim = 0; dim < rank; dim++) {
int64_t replicate_factor = temp_sharding.tile_assignment().dim(dim) /
target.tile_assignment().dim(dim);
if (replicate_factor > 1) {
replicate_dims.emplace_back(dim);
replicate_factors.emplace_back(replicate_factor);
}
}
auto halo_exchange = TileToPartialReplicateHaloExchange(
partitioned_hlo.hlo_, base_shape_, temp_sharding, target, replicate_dims,
partitioned_hlo.state().collective_ops_creator,
partitioned_hlo.state().next_channel_id,
partitioned_hlo.state().partition_id, partitioned_hlo.state().b);
if (!halo_exchange.has_value()) {
return std::nullopt;
}
auto halo_exchange_hlo = halo_exchange.value();
auto sharding_grouped = hlo_sharding_util::GroupShardingOnDims(
temp_sharding, replicate_dims, replicate_factors);
auto per_group_partitioner_state = CreatePerGroupPartitioningState(
partitioned_hlo.state(), sharding_grouped.device_groups,
partitioned_hlo.state().b);
auto base_shape = MakePartitionedShape(base_shape_, target);
auto original_sharding = partitioned_hlo.sharding();
halo_exchange_hlo->set_sharding(sharding_grouped.sharding);
auto partial_replicate_hlo = PartitionedHlo(halo_exchange_hlo, base_shape,
per_group_partitioner_state);
HloInstruction* result =
partial_replicate_hlo.ReplicatePartial(replicate_dims);
partitioned_hlo.hlo()->set_sharding(original_sharding);
result->set_sharding(target);
return PartitionedHlo(result, base_shape_, partitioned_hlo.state());
}
std::optional<PartitionedHlo>
PartitionedHlo::ReshardFromPartialReplicateWithDynamicSlice(
const HloSharding& target) const {
if (!sharding().ReplicateOnLastTileDim()) {
return std::nullopt;
}
auto target_compatible_sharding =
PartialReplicateReshardCompatibleSharding(sharding(), target);
if (!target_compatible_sharding.has_value()) {
return std::nullopt;
}
std::vector<int64_t> expand_tile_dims;
std::vector<int64_t> tiling_dim_factors;
int64_t rank = hlo_->shape().rank();
tiling_dim_factors.reserve(target.tile_assignment().num_dimensions());
const auto& temp_target_sharding = target_compatible_sharding.value();
for (int64_t dim = 0; dim < rank; dim++) {
if (temp_target_sharding.tile_assignment().dim(dim) >
sharding().tile_assignment().dim(dim)) {
expand_tile_dims.push_back(dim);
}
tiling_dim_factors.emplace_back(
temp_target_sharding.tile_assignment().dim(dim) /
sharding().tile_assignment().dim(dim));
}
if (target.ReplicateOnLastTileDim()) {
tiling_dim_factors.emplace_back(
target.tile_assignment().dimensions().back());
}
auto padded_hlo = PadFromPartialReplicateShape(
hlo_, base_shape_, sharding(), temp_target_sharding, expand_tile_dims,
state_.collective_ops_creator, state_.next_channel_id,
state_.partition_id, state_.b);
if (!padded_hlo.has_value()) {
return std::nullopt;
}
auto shard_shape = MakePartitionedShape(base_shape_, temp_target_sharding);
auto padded_base_shape = shard_shape;
for (int64_t i = 0; i < padded_base_shape.rank(); ++i) {
padded_base_shape.set_dimensions(
i, padded_base_shape.dimensions(i) *
temp_target_sharding.tile_assignment().dim(i));
}
auto offsets = MakePartitionOffsets(padded_base_shape, temp_target_sharding,
state_.partition_id, state_.b);
auto old_offsets = MakePartitionOffsets(padded_base_shape, sharding(),
state_.partition_id, state_.b);
for (int64_t i = 0; i < offsets.size(); ++i) {
offsets[i] = state_.b->AddInstruction(HloInstruction::CreateBinary(
offsets[i]->shape(), HloOpcode::kSubtract, offsets[i], old_offsets[i]));
}
auto slice = state_.b->AddInstruction(HloInstruction::CreateDynamicSlice(
shard_shape, padded_hlo.value(), offsets, shard_shape.dimensions()));
slice->set_sharding(temp_target_sharding);
auto result = PartitionedHlo(slice, base_shape_, state_);
if (CanReshardWithCollectivePermute(temp_target_sharding, target)) {
return result.ReshardWithCollectivePermute(target);
}
return result;
}
PartitionedHlo PartitionedHlo::Broadcast() const {
const Shape& shape = hlo_->shape();
const HloSharding& sharding = hlo_->sharding();
CHECK(sharding.HasUniqueDevice());
CHECK(!shape.IsTuple() && shape.element_type() != TOKEN);
auto src_core_id = state_.b->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<uint32_t>(sharding.GetUniqueDevice())));
Shape bcast_shape = ShapeUtil::ChangeElementType(shape, PRED);
auto is_src_core = state_.b->AddInstruction(HloInstruction::CreateBroadcast(
bcast_shape,
state_.b->AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}), state_.partition_id, src_core_id,
ComparisonDirection::kEq)),
{}));
auto zero = state_.b->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(shape.element_type())));
auto zero_bcast = state_.b->AddInstruction(
HloInstruction::CreateBroadcast(shape, zero, {}));
auto operand = state_.b->AddInstruction(HloInstruction::CreateTernary(
shape, HloOpcode::kSelect, is_src_core, hlo(), zero_bcast));
HloComputation* reduction =
MakeBinaryAdd(shape.element_type(), state_.module);
auto result = state_.collective_ops_creator.create_cross_partition_all_reduce(
state_.b, operand, reduction, {}, NewChannel());
result->set_sharding(HloSharding::Replicate());
return PartitionedHlo(result, base_shape_, state_);
}
PartitionedHlo PartitionedHlo::ReshardWithAllToAll(
const HloSharding& target,
absl::Span<const std::pair<int64_t, int64_t>> source_target_dims) const {
if (source_target_dims.empty()) {
if (target == sharding()) {
return *this;
}
return ReshardWithCollectivePermute(target);
}
VLOG(5) << "Source: " << sharding().ToString();
VLOG(5) << "Target: " << target.ToString();
int64_t source_dim = source_target_dims[0].first;
int64_t target_dim = source_target_dims[0].second;
const int64_t group_size = sharding().tile_assignment().dim(source_dim) /
sharding().tile_assignment().dim(target_dim);
VLOG(5) << "Group size: " << group_size;
auto temp_target_tile = [&] {
auto& original_tile_assignment = sharding().tile_assignment();
std::vector<int64_t> reshape_tile_dims(
original_tile_assignment.num_dimensions() + 2);
int64_t i = 0;
int64_t added_source_dim = -1;
int64_t added_target_dim = -1;
for (int64_t j = 0; j < original_tile_assignment.num_dimensions(); ++j) {
if (source_dim == j) {
reshape_tile_dims[i] = original_tile_assignment.dim(j) / group_size;
reshape_tile_dims[++i] = group_size;
added_source_dim = i;
} else if (target_dim == j) {
reshape_tile_dims[i] = original_tile_assignment.dim(j);
reshape_tile_dims[++i] = 1;
added_target_dim = i;
} else {
reshape_tile_dims[i] = original_tile_assignment.dim(j);
}
++i;
}
VLOG(5) << "Added target: " << added_target_dim;
VLOG(5) << "Added source: " << added_source_dim;
std::vector<int64_t> xpose_dims(reshape_tile_dims.size());
std::iota(xpose_dims.begin(), xpose_dims.end(), 0);
xpose_dims[added_source_dim] = added_target_dim;
xpose_dims[added_target_dim] = added_source_dim;
auto temp_target_tile =
hlo_sharding_util::TransposeSharding(
HloSharding::Tile(
original_tile_assignment.Reshape(reshape_tile_dims)),
xpose_dims)
.tile_assignment();
VLOG(5) << "Transposed target: " << temp_target_tile.ToString();
std::vector<int64_t> temp_target_tile_dims(
sharding().tile_assignment().dimensions().begin(),
sharding().tile_assignment().dimensions().end());
temp_target_tile_dims[source_dim] =
sharding().tile_assignment().dim(target_dim);
temp_target_tile_dims[target_dim] =
sharding().tile_assignment().dim(source_dim);
return temp_target_tile.Reshape(temp_target_tile_dims);
}();
auto temp_target = target.ReplicateOnLastTileDim()
? HloSharding::PartialTile(temp_target_tile)
: HloSharding::Tile(temp_target_tile);
VLOG(5) << "Temp target sharding: " << temp_target.ToString();
auto padded_shape = hlo_->shape();
auto padded_base_shape = base_shape_;
auto current_base_padded_shape = base_shape_;
padded_base_shape.set_dimensions(
target_dim, RoundUpTo(base_shape_.dimensions(target_dim),
temp_target.tile_assignment().dim(target_dim)));
current_base_padded_shape.set_dimensions(
target_dim, hlo_->shape().dimensions(target_dim) *
sharding().tile_assignment().dim(target_dim));
auto padded_source_base_shape = base_shape_;
auto current_source_base_padded_shape = base_shape_;
padded_source_base_shape.set_dimensions(
source_dim, RoundUpTo(base_shape_.dimensions(source_dim),
temp_target.tile_assignment().dim(source_dim)));
current_source_base_padded_shape.set_dimensions(
source_dim, hlo_->shape().dimensions(source_dim) *
sharding().tile_assignment().dim(source_dim));
VLOG(5) << "Target dim: " << target_dim;
VLOG(5) << "Source dim: " << source_dim;
VLOG(5) << "Original sharded shape: " << hlo_->shape();
VLOG(5) << "Base shape: " << base_shape_.ToString();
VLOG(5) << "Padded base shape: " << padded_base_shape.ToString();
VLOG(5) << "Current padded shape: " << current_base_padded_shape.ToString();
VLOG(5) << "Padded source base shape: "
<< padded_source_base_shape.ToString();
VLOG(5) << "Current source padded shape: "
<< current_source_base_padded_shape.ToString();
VLOG(5) << "Dimension padded target_dim: "
<< hlo_->shape().dimensions(target_dim) *
sharding().tile_assignment().dim(target_dim);
CHECK_GE(padded_base_shape.rank(), current_base_padded_shape.rank());
CHECK_LE(padded_source_base_shape.rank(),
current_source_base_padded_shape.rank());
PaddingConfig pc;
for (int64_t i = 0; i < hlo_->shape().rank(); ++i) {
auto* pd = pc.add_dimensions();
pd->set_edge_padding_low(0);
pd->set_edge_padding_high(padded_base_shape.dimensions(i) -
current_base_padded_shape.dimensions(i));
pd->set_interior_padding(0);
}
PartitionedHlo p_hlo = *this;
VLOG(5) << "Before reshard: " << p_hlo.hlo_->ToString();
HloInstruction* zero = CreateZero(
ShapeUtil::MakeShape(hlo_->shape().element_type(), {}), state_.b);
HloSharding sharding_copy = sharding();
auto padded_phlo =
ReshardDataForPad(zero, pc, p_hlo, sharding_copy, state_.b);
CHECK(padded_phlo.has_value());
VLOG(5) << "Resharded: " << padded_phlo->sharded_input->ToString();
VLOG(5) << "Padded Window: " << padded_phlo->shard_window.DebugString();
HloInstruction* padded_hlo =
PadDataFromWindowReshard(*padded_phlo, zero, state_.b);
VLOG(5) << "Padded data: " << padded_hlo->ToString();
std::vector<std::vector<int64_t>> groups(
temp_target.tile_assignment().num_elements() / group_size);
temp_target.tile_assignment().Each(
[&](absl::Span<const int64_t> indices, int64_t device) {
int64_t group_id = 0;
for (int64_t dim = 0; dim < indices.size(); ++dim) {
if (dim == target_dim) {
group_id *= temp_target.tile_assignment().dim(dim) / group_size;
group_id += indices[dim] / group_size;
} else {
group_id *= temp_target.tile_assignment().dim(dim);
group_id += indices[dim];
}
}
groups[group_id].push_back(device);
});
HloInstruction* result = nullptr;
std::vector<int64_t> dimensions;
const int64_t rank = base_shape_.rank();
dimensions.reserve(rank + 1);
for (int64_t i = 0; i < rank; ++i) {
if (i == target_dim) {
dimensions.push_back(group_size);
dimensions.push_back(padded_hlo->shape().dimensions(i) / group_size);
} else {
dimensions.push_back(padded_hlo->shape().dimensions(i));
}
}
VLOG(5) << "Target ata shape: "
<< ShapeUtil::MakeShape(base_shape_.element_type(), dimensions)
.ToString();
auto reshape = state_.b->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(base_shape_.element_type(), dimensions),
padded_hlo));
auto all_to_all =
state_.collective_ops_creator.create_cross_partition_all_to_all(
state_.b, {reshape}, groups, (*state_.next_channel_id)++, target_dim);
int64_t new_source_dim =
(target_dim < source_dim) ? source_dim + 1 : source_dim;
std::vector<int64_t> permutation;
for (int64_t i = 0; i < all_to_all->shape().rank(); ++i) {
if (i == target_dim) {
continue;
}
if (i == new_source_dim) {
permutation.push_back(target_dim);
}
permutation.push_back(i);
}
auto transpose = state_.b->AddInstruction(HloInstruction::CreateTranspose(
ShapeInference::InferTransposeShape(all_to_all->shape(), permutation)
.value(),
all_to_all, permutation));
auto new_shape = ShapeInference::InferAllToAllShape(
padded_hlo->shape(), target_dim, source_dim, group_size)
.value();
result = state_.b->AddInstruction(
HloInstruction::CreateReshape(new_shape, transpose));
result->set_sharding(temp_target);
std::vector<int64_t> strides(result->shape().rank(), 1);
std::vector<int64_t> starts(result->shape().rank(), 0);
std::vector<int64_t> limits(result->shape().rank());
for (int64_t i = 0; i < result->shape().rank(); ++i) {
limits[i] = padded_source_base_shape.dimensions(i);
}
auto sliced_phlo = ReshardDataForSlicing(
strides, starts, limits,
PartitionedHlo(result, current_source_base_padded_shape, state_),
temp_target, state_.b);
CHECK(sliced_phlo.has_value());
result = SliceDataFromWindowReshard(*sliced_phlo, strides, base_shape_,
temp_target, state_.b);
result->set_sharding(temp_target);
auto remaining_source_target_dims = source_target_dims;
remaining_source_target_dims.remove_prefix(1);
return PartitionedHlo(result, base_shape_, state_)
.ReshardWithAllToAll(target, remaining_source_target_dims);
}
namespace {
std::optional<std::tuple<HloSharding, HloSharding, int64_t>>
PatternMatchMergeOrSplitSharding(const Shape& shape, const Shape& base_shape,
const HloSharding& source,
const HloSharding& target) {
if (!source.IsTiled() || !target.IsTiled()) {
return std::nullopt;
}
if (source.TiledDataRank() != target.TiledDataRank()) {
return std::nullopt;
}
if ((source.HasPartialReplication() ^ target.HasPartialReplication()) ||
(source.HasPartialReplication() &&
source.tile_assignment().dimensions()[source.TiledDataRank()] !=
target.tile_assignment().dimensions()[target.TiledDataRank()])) {
return std::nullopt;
}
std::vector<int64_t> diff_index;
for (int64_t i = 0; i < target.TiledDataRank(); ++i) {
if (source.tile_assignment().dim(i) != target.tile_assignment().dim(i)) {
diff_index.push_back(i);
}
}
if (diff_index.size() < 2) {
return std::nullopt;
}
for (int64_t diff_index_i = 0; diff_index_i < diff_index.size();
++diff_index_i) {
for (int64_t diff_index_j = diff_index_i + 1;
diff_index_j < diff_index.size(); ++diff_index_j) {
int64_t i = diff_index[diff_index_i];
int64_t j = diff_index[diff_index_j];
const std::vector<bool> is_one = {source.tile_assignment().dim(i) == 1,
source.tile_assignment().dim(j) == 1,
target.tile_assignment().dim(i) == 1,
target.tile_assignment().dim(j) == 1};
int64_t new_dim_size;
switch (std::count(is_one.begin(), is_one.end(), true)) {
case 1: {
if (source.tile_assignment().dim(i) *
source.tile_assignment().dim(j) !=
target.tile_assignment().dim(i) *
target.tile_assignment().dim(j)) {
continue;
}
if (source.tile_assignment().dim(i) == 1 ||
target.tile_assignment().dim(i) == 1) {
std::swap(i, j);
}
if (target.tile_assignment().dim(j) == 1) {
if (shape.dimensions(i) % source.tile_assignment().dim(j) != 0) {
continue;
}
new_dim_size = source.tile_assignment().dim(i);
} else {
if (base_shape.dimensions(i) % source.tile_assignment().dim(i) !=
0) {
continue;
}
new_dim_size = target.tile_assignment().dim(i);
}
break;
}
case 0: {
if (source.tile_assignment().dim(i) <
target.tile_assignment().dim(i)) {
std::swap(i, j);
}
if (source.tile_assignment().dim(i) !=
target.tile_assignment().dim(i) *
target.tile_assignment().dim(j)) {
continue;
}
if (base_shape.dimensions(i) % source.tile_assignment().dim(i) != 0) {
continue;
}
new_dim_size = target.tile_assignment().dim(i);
break;
}
default:
continue;
}
auto reshaped_sharding =
hlo_sharding_util::SplitShardingDimension(source, i, new_dim_size);
std::vector<int64_t> dimensions(
reshaped_sharding.tile_assignment().dimensions().begin(),
reshaped_sharding.tile_assignment().dimensions().end());
std::swap(dimensions[i + 1], dimensions[j + (j > i ? 1 : 0)]);
auto target_tile_assignment =
target.tile_assignment().Reshape(dimensions);
auto new_sharding =
source.HasPartialReplication()
? HloSharding::PartialTile(target_tile_assignment,
source.metadata())
: HloSharding::Tile(target_tile_assignment, source.metadata());
VLOG(10) << "Reshaped sharding before: " << reshaped_sharding.ToString();
VLOG(10) << "Reshaped sharding: " << new_sharding.ToString();
return std::make_tuple(std::move(reshaped_sharding),
std::move(new_sharding), i);
}
}
return std::nullopt;
}
std::optional<HloSharding> PatternMatchPartiallyReplicateDim(
const HloSharding& source, const HloSharding& target) {
if (!target.ReplicateOnLastTileDim()) {
return std::nullopt;
}
const int64_t target_replicated_dim = target.SubgroupReplicationDim();
const int64_t source_replicated_size =
source.HasPartialReplication()
? source.tile_assignment().dim(source.SubgroupReplicationDim())
: 1;
CHECK_NE(target_replicated_dim, -1) << "Expected replicated dim";
for (int i = 0; i < source.TiledDataRank(); ++i) {
if (source.tile_assignment().dim(i) == 1 ||
source.tile_assignment().dim(i) * source_replicated_size !=
target.tile_assignment().dim(target_replicated_dim)) {
continue;
}
auto replicated_sharding =
hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(source, {i});
return replicated_sharding;
}
return std::nullopt;
}
PartitionedHlo SplitReshapeHelper(const PartitionedHlo& to_reshape,
int64_t dim_to_split, int64_t dim_size,
const HloSharding& target_sharding) {
Shape original_shape = to_reshape.hlo()->shape();
std::vector<int64_t> shape_dim(original_shape.dimensions().begin(),
original_shape.dimensions().end());
shape_dim.insert(shape_dim.begin() + dim_to_split + 1, dim_size);
shape_dim[dim_to_split] /= dim_size;
std::vector<int64_t> base_shape_dim(
to_reshape.base_shape().dimensions().begin(),
to_reshape.base_shape().dimensions().end());
base_shape_dim.insert(
base_shape_dim.begin() + dim_to_split + 1,
dim_size * target_sharding.tile_assignment().dim(dim_to_split + 1));
base_shape_dim[dim_to_split] /=
dim_size * target_sharding.tile_assignment().dim(dim_to_split + 1);
Shape shape = ShapeUtil::MakeShape(original_shape.element_type(), shape_dim);
HloInstruction* reshaped_instr = to_reshape.state().b->AddInstruction(
HloInstruction::CreateReshape(shape, to_reshape.hlo()));
reshaped_instr->set_sharding(target_sharding);
return PartitionedHlo{
reshaped_instr,
ShapeUtil::MakeShape(to_reshape.base_shape().element_type(),
base_shape_dim),
to_reshape.state()};
}
PartitionedHlo MergeReshapeHelper(const PartitionedHlo& to_reshape,
int64_t dim_to_merge,
const HloSharding& target_sharding) {
Shape original_shape = to_reshape.hlo()->shape();
std::vector<int64_t> shape_dim(original_shape.dimensions().begin(),
original_shape.dimensions().end());
shape_dim[dim_to_merge] *= shape_dim[dim_to_merge + 1];
shape_dim.erase(shape_dim.begin() + dim_to_merge + 1);
std::vector<int64_t> base_shape_dim(
to_reshape.base_shape().dimensions().begin(),
to_reshape.base_shape().dimensions().end());
base_shape_dim[dim_to_merge] *= base_shape_dim[dim_to_merge + 1];
base_shape_dim.erase(base_shape_dim.begin() + dim_to_merge + 1);
Shape shape = ShapeUtil::MakeShape(original_shape.element_type(), shape_dim);
HloInstruction* reshaped_instr = to_reshape.state().b->AddInstruction(
HloInstruction::CreateReshape(shape, to_reshape.hlo()));
reshaped_instr->set_sharding(target_sharding);
return PartitionedHlo(
reshaped_instr,
ShapeUtil::MakeShape(original_shape.element_type(), base_shape_dim),
to_reshape.state());
}
}
std::optional<PartitionedHlo> PartitionedHlo::TryComplexReshardHandling(
const HloSharding& target) const {
VLOG(5) << "Trying to split complicated reshard: " << sharding().ToString()
<< " to " << target.ToString();
const bool is_source_partially_replicated =
sharding().ReplicateOnLastTileDim();
const bool is_target_partially_replicated = target.ReplicateOnLastTileDim();
if (auto reshape = PatternMatchMergeOrSplitSharding(
this->hlo()->shape(), this->base_shape(), sharding(), target)) {
auto& [before_sharding, new_reshaped_sharding, source_dim] = *reshape;
VLOG(10) << "Matched \"pattern_match_reshape()\": "
<< std::get<0>(*reshape).ToString();
VLOG(10) << "Original shape: " << hlo()->shape().ToString();
VLOG(10) << "Dim to split: " << std::get<1>(*reshape) << " size "
<< sharding().tile_assignment().dim(source_dim);
VLOG(10) << "Before sharding: " << before_sharding.ToString();
PartitionedHlo reshaped = SplitReshapeHelper(
*this, source_dim, this->hlo()->shape().dimensions(source_dim),
before_sharding);
auto reshard = reshaped.ReshardNoCache(new_reshaped_sharding,
std::nullopt,
false);
if (reshard.sharding() != new_reshaped_sharding) {
return std::nullopt;
}
auto reshaped_sharding = hlo_sharding_util::MergeShardingDimension(
reshard.sharding(), source_dim);
reshaped = MergeReshapeHelper(reshard, source_dim, reshaped_sharding);
if (reshaped.sharding() != target) {
reshaped = reshaped.ReshardNoCache(target, std::nullopt,
false);
if (reshaped.sharding() != target) {
return std::nullopt;
}
}
return reshaped;
}
if (auto intermediate_target =
PatternMatchPartiallyReplicateDim(sharding(), target)) {
VLOG(5) << "Matched \"pattern_match_partially_replicate_dim()\": "
<< intermediate_target->ToString();
auto intermediate_reshard = Reshard(*intermediate_target);
auto final_reshard = intermediate_reshard.ReshardNoCache(
target, std::nullopt, false);
if (final_reshard.sharding() != target) {
return std::nullopt;
}
return final_reshard;
}
if (is_source_partially_replicated && !is_target_partially_replicated) {
const int64_t partial_repl_amount =
sharding().tile_assignment().dimensions().back();
int64_t first_different_dimension = -1;
for (int64_t i = 0; i < target.tile_assignment().num_dimensions(); ++i) {
if (target.tile_assignment().dim(i) !=
sharding().tile_assignment().dim(i) &&
sharding().tile_assignment().dim(i) == 1 &&
target.tile_assignment().dim(i) % partial_repl_amount == 0) {
first_different_dimension = i;
break;
}
}
if (first_different_dimension == -1) {
return std::nullopt;
}
VLOG(5) << "Matched partially replicated to non partially replicated: "
<< sharding().ToString();
std::vector<int64_t> transpose_dims(
sharding().tile_assignment().num_dimensions(), 0);
std::iota(transpose_dims.begin(), transpose_dims.end(), 0);
std::swap(transpose_dims[first_different_dimension], transpose_dims.back());
auto intermediate_sharding =
hlo_sharding_util::TransposeSharding(sharding(), transpose_dims);
auto intermediate_reshard = Reshard(intermediate_sharding);
auto reshard = intermediate_reshard.ReshardNoCache(
target, std::nullopt, false);
if (reshard.sharding() != target) {
return std::nullopt;
}
return reshard;
}
return std::nullopt;
}
std::optional<PartitionedHlo>
PartitionedHlo::ReshardPartialReplicateWithAllToAll(
const HloSharding& target) const {
bool source_is_partial_replicate = sharding().ReplicateOnLastTileDim();
const auto& partial_replicate_sharding =
source_is_partial_replicate ? sharding() : target;
if (!partial_replicate_sharding.ReplicateOnLastTileDim()) {
return std::nullopt;
}
const auto& tile_sharding = source_is_partial_replicate ? target : sharding();
if (tile_sharding.ReplicateOnLastTileDim() || tile_sharding.IsTileMaximal()) {
return std::nullopt;
}
const int num_replicas =
partial_replicate_sharding.tile_assignment().dimensions().back();
if (((tile_sharding.tile_assignment().num_dimensions() + 1) !=
partial_replicate_sharding.tile_assignment().num_dimensions()) ||
(partial_replicate_sharding.tile_assignment().dim(0) != 1)) {
return std::nullopt;
}
int to_replicate_dim = -1;
for (int i = tile_sharding.tile_assignment().num_dimensions() - 1; i >= 0;
--i) {
if (tile_sharding.tile_assignment().dim(i) > 1 &&
(to_replicate_dim == -1)) {
if (tile_sharding.tile_assignment().dim(i) != num_replicas) {
return std::nullopt;
}
to_replicate_dim = i;
}
if (tile_sharding.tile_assignment().dim(i) !=
partial_replicate_sharding.tile_assignment().dim(i + 1)) {
return std::nullopt;
}
}
if (to_replicate_dim == -1) {
return std::nullopt;
}
auto reshape_tile_assignment =
partial_replicate_sharding.tile_assignment().Reshape(
tile_sharding.tile_assignment().dimensions());
if (reshape_tile_assignment != tile_sharding.tile_assignment()) {
return std::nullopt;
}
std::vector<int64_t> tmp_tile_assignment_dimensions(
tile_sharding.tile_assignment().dimensions().begin(),
tile_sharding.tile_assignment().dimensions().end());
tmp_tile_assignment_dimensions[to_replicate_dim] = 1;
tmp_tile_assignment_dimensions.push_back(num_replicas);
auto tmp_tile_assignment =
tile_sharding.tile_assignment().Reshape(tmp_tile_assignment_dimensions);
auto tmp_partial_replicate_sharding =
HloSharding::PartialTile(tmp_tile_assignment);
if (source_is_partial_replicate) {
if (auto src_tgt_dims = GetReshardAllToAllSourceTargetDims(
sharding(), tmp_partial_replicate_sharding)) {
auto partitioned_hlo =
ReshardWithAllToAll(tmp_partial_replicate_sharding, *src_tgt_dims);
return partitioned_hlo.Reshard(target);
}
} else {
auto partitioned_hlo = Reshard(tmp_partial_replicate_sharding);
if (auto src_tgt_dims = GetReshardAllToAllSourceTargetDims(
partitioned_hlo.sharding(), target)) {
return partitioned_hlo.ReshardWithAllToAll(target, *src_tgt_dims);
}
}
return std::nullopt;
}
PartitionedHlo PartitionedHlo::ReshardWithCollectivePermute(
const HloSharding& target) const {
CHECK(CanReshardWithCollectivePermute(sharding(), target))
<< sharding().ToString() << " to " << target.ToString();
if (auto broadcast_dims = state_.b->BroadcastDimsForCreatedHlo(hlo())) {
if (!(*broadcast_dims)->empty()) {
std::vector<int64_t> broadcast_dims_vector;
for (int64_t i = 0; i < hlo()->shape().rank(); ++i) {
if ((*broadcast_dims)->contains(i)) {
broadcast_dims_vector.push_back(i);
}
}
if (hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(
sharding(), broadcast_dims_vector) ==
hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(
target, broadcast_dims_vector)) {
auto copy = state_.b->AddInstruction(HloInstruction::CreateUnary(
hlo()->shape(), HloOpcode::kCopy, hlo()));
copy->set_sharding(target);
return PartitionedHlo(copy, base_shape_, state_);
}
}
}
std::vector<std::pair<int64_t, int64_t>> src_dst_pairs;
sharding().tile_assignment().Each(
[&](absl::Span<const int64_t> indices, int64_t src_device) {
int64_t dst_device = target.tile_assignment()(indices);
src_dst_pairs.emplace_back(src_device, dst_device);
});
auto cp =
state_.collective_ops_creator.create_cross_partition_collective_permute(
state_.b, hlo(), src_dst_pairs, (*state_.next_channel_id)++);
cp->set_sharding(target);
return PartitionedHlo(cp, base_shape_, state_);
}
SpmdPartitioningVisitor::SpmdPartitioningVisitor(
HloComputation* computation, int64_t num_partitions, int64_t num_replicas,
const SPMDCollectiveOpsCreator& collective_ops_creator,
int64_t* next_channel_id, SpmdLogger* logger,
SpmdPartitionerOptions options, SpmdPartitioner* partitioner,
const CallGraph& call_graph)
: changed_(false),
module_(computation->parent()),
num_partitions_(num_partitions),
num_replicas_(num_replicas),
collective_ops_creator_(collective_ops_creator),
next_channel_id_(next_channel_id),
b_(SpmdBuilder(absl::StrCat(computation->name(), "_spmd"),
nullptr)),
partition_id_(collective_ops_creator_.create_partition_id(&b_)),
logger_(logger),
options_(std::move(options)),
partitioner_(partitioner),
call_graph_(call_graph) {}
SpmdPartitioningVisitor::SpmdPartitioningVisitor(
const SpmdPartitioningVisitor& src)
: changed_(src.changed_),
module_(src.module_),
num_partitions_(src.num_partitions_),
num_replicas_(src.num_replicas_),
collective_ops_creator_(src.collective_ops_creator_),
next_channel_id_(src.next_channel_id_),
b_(absl::StrCat(module_->entry_computation()->name(), "_spmd"),
nullptr),
partition_id_(collective_ops_creator_.create_partition_id(&b_)),
logger_(src.logger_),
options_(src.options_),
partitioner_(src.partitioner_),
call_graph_(src.call_graph_) {}
std::unique_ptr<SpmdPartitioningVisitor> SpmdPartitioningVisitor::Clone()
const {
return std::make_unique<SpmdPartitioningVisitor>(*this);
}
PartitionedHlo::PartitioningState
SpmdPartitioningVisitor::MakePartitioningState() {
PartitionedHlo::PartitioningState state;
state.b = &b_;
state.module = module_;
state.num_replicas = num_replicas_;
state.next_channel_id = next_channel_id_;
state.reshard_cache = &reshard_cache_;
state.partitioner = partitioner_;
if (!device_groups_.empty()) {
state.collective_ops_creator = *visiting_collective_ops_creator_;
state.partition_id = *visiting_partition_id_;
return CreatePerGroupPartitioningState(state, device_groups_, &b_);
} else {
state.collective_ops_creator = collective_ops_creator_;
state.partition_id = partition_id_;
}
return state;
}
std::vector<ReplicaGroup> SpmdPartitioningVisitor::CreateReplicaGroups(
std::vector<std::vector<int64_t>>& groups) {
std::vector<ReplicaGroup> device_groups;
device_groups.reserve(groups.size() * num_replicas_);
for (int64_t i = 0; i < num_replicas_; ++i) {
for (const auto& group : groups) {
device_groups.emplace_back();
for (int64_t id : group) {
device_groups.back().add_replica_ids(i * num_partitions_ + id);
}
}
}
return device_groups;
}
absl::Status SpmdPartitioningVisitor::HandleCall(HloInstruction* hlo) {
std::vector<HloInstruction*> call_args;
HloComputation* computation = hlo->called_computations()[0];
for (int64_t i = 0; i < hlo->operand_count(); ++i) {
computation->parameter_instruction(i)->set_sharding(
hlo->operand(i)->sharding());
call_args.push_back(GetPartitionedHlo(hlo->operand(i)).hlo());
}
TF_RETURN_IF_ERROR(partitioner_
->PartitionComputation(computation, hlo->sharding(),
next_channel_id_, logger_,
call_graph_)
.status());
SetPartitionedHlo(hlo, [&] {
auto* call = b_.AddInstruction(HloInstruction::CreateCall(
MakePartitionedShape(hlo->shape(), hlo->sharding()), call_args,
hlo->called_computations()[0]));
call->set_raw_backend_config_string(hlo->raw_backend_config_string());
return call;
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::DefaultAction(HloInstruction* hlo) {
if (hlo->HasSideEffect() && !hlo->sharding().HasUniqueDevice()) {
return Unimplemented("Side-effect ops cannot be replicated: %s",
hlo->ToString());
}
if (hlo->IsElementwise() && hlo->operand_count() > 0) {
return HandleElementwise(hlo);
}
if (!hlo->sharding().IsTileMaximal()) {
VLOG(1) << "Not partitioned in SPMD mode (DefaultAction):"
<< hlo->ToString();
for (int64_t i = 0; i < hlo->operand_count(); ++i) {
VLOG(1) << " operand " << i
<< " sharding:" << hlo->operand(i)->sharding().ToString();
}
}
const HloSharding base_sharding = [&]() {
if (hlo->sharding().HasUniqueDevice()) {
return HloSharding::AssignDevice(hlo->sharding().GetUniqueDevice());
}
return HloSharding::Replicate();
}();
std::vector<HloInstruction*> new_operands;
for (HloInstruction* operand : hlo->operands()) {
HloSharding operand_sharding =
base_sharding.NormalizeTupleSharding(operand->shape());
new_operands.push_back(
GetPartitionedHlo(operand).Reshard(operand_sharding).hlo());
}
auto clone =
b_.AddInstruction(hlo->CloneWithNewOperands(hlo->shape(), new_operands));
clone->set_sharding(base_sharding.NormalizeTupleSharding(clone->shape()));
SetPartitionedHlo(hlo,
PartitionedHlo(clone, hlo->shape(), MakePartitioningState())
.Reshard(hlo->sharding()));
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::Preprocess(HloInstruction* hlo) {
visiting_hlo_ = hlo;
b_.set_visiting_hlo(hlo);
auto manual_to_onedevice = [&](HloOpcode opcode, const Shape& shape,
const HloSharding& sharding) {
if (sharding.IsTuple()) {
std::vector<HloSharding> subshardings = sharding.tuple_elements();
for (HloSharding& subsharding : subshardings) {
if (subsharding.IsManual() && opcode != HloOpcode::kCustomCall) {
subsharding = HloSharding::AssignDevice(0);
}
}
return HloSharding::Tuple(shape, subshardings);
}
if (sharding.IsManual() && opcode != HloOpcode::kCustomCall &&
opcode != HloOpcode::kPartitionId) {
return HloSharding::AssignDevice(0);
}
return sharding;
};
if (hlo->opcode() != HloOpcode::kConditional &&
hlo->opcode() != HloOpcode::kTuple &&
hlo->opcode() != HloOpcode::kParameter &&
hlo->opcode() != HloOpcode::kWhile && hlo->opcode() != HloOpcode::kRng &&
hlo->opcode() != HloOpcode::kOutfeed &&
hlo->opcode() != HloOpcode::kAllReduce &&
hlo->opcode() != HloOpcode::kCall) {
const bool has_manual_sharding =
hlo->sharding().IsManual() ||
(hlo->sharding().IsTuple() &&
absl::c_any_of(
hlo->sharding().tuple_elements(),
[](const HloSharding& sharding) { return sharding.IsManual(); }));
if (has_manual_sharding && !hlo->IsCustomCall("SPMDFullToShardShape")) {
visiting_hlo_sharding_ = hlo->sharding();
auto get_sharding_shape = [](const HloInstruction* hlo) {
if (hlo->opcode() != HloOpcode::kOutfeed) {
return hlo->shape();
}
std::vector<Shape> operand_shapes(hlo->operand_count());
for (int i = 0; i < hlo->operand_count(); ++i) {
operand_shapes[i] = hlo->operand(i)->shape();
}
return ShapeUtil::MakeTupleShape(operand_shapes);
};
hlo->set_sharding(manual_to_onedevice(
hlo->opcode(), get_sharding_shape(hlo), *visiting_hlo_sharding_));
visiting_hlo_operand_shardings_.reserve(hlo->operand_count());
for (HloInstruction* operand : hlo->unique_operands()) {
visiting_hlo_operand_shardings_.push_back(operand->sharding());
operand->set_sharding(manual_to_onedevice(
hlo->opcode(), get_sharding_shape(operand), operand->sharding()));
GetPartitionedHlo(operand).hlo()->copy_sharding(operand);
}
} else {
const bool has_manual_subgroup =
hlo->sharding().IsManualSubgroup() ||
(hlo->sharding().IsTuple() &&
absl::c_any_of(hlo->sharding().tuple_elements(),
[](const HloSharding& sharding) {
return sharding.IsManualSubgroup();
}));
if (has_manual_subgroup && !hlo->IsCustomCall("SPMDFullToShardShape") &&
!hlo->IsCustomCall("SPMDShardToFullShape") &&
hlo->opcode() != HloOpcode::kGetTupleElement) {
auto get_grouped_sharding =
[&](const HloSharding& sharding, const Shape& shape,
const GroupedSharding* ref =
nullptr) -> absl::StatusOr<GroupedSharding> {
if (!sharding.IsTuple()) {
GroupedSharding grouped =
hlo_sharding_util::GetManualSubgroupSharding(sharding);
if (ref != nullptr) {
auto aligned =
AlignGroupsWithIfCompatible(std::move(grouped), *ref);
TF_RET_CHECK(aligned.has_value())
<< "Incompatible manual sharding at " << hlo->ToString();
return *aligned;
}
return grouped;
}
std::vector<HloSharding> elements;
elements.reserve(sharding.tuple_elements().size());
CHECK(!sharding.tuple_elements().empty());
GroupedSharding grouped0 =
hlo_sharding_util::GetManualSubgroupSharding(
sharding.tuple_elements()[0]);
if (ref != nullptr) {
auto aligned =
AlignGroupsWithIfCompatible(std::move(grouped0), *ref);
TF_RET_CHECK(aligned.has_value())
<< "Incompatible manual sharding at " << hlo->ToString();
grouped0 = std::move(*aligned);
}
elements.push_back(std::move(grouped0.sharding));
for (int64_t i = 1; i < sharding.tuple_elements().size(); ++i) {
auto grouped_i = AlignGroupsWithIfCompatible(
hlo_sharding_util::GetManualSubgroupSharding(
sharding.tuple_elements()[i]),
grouped0);
TF_RET_CHECK(grouped_i.has_value())
<< "Incompatible manual sharding between tuple elements: "
<< hlo->ToString();
elements.push_back(std::move(grouped_i->sharding));
}
grouped0.sharding = HloSharding::Tuple(shape, elements);
return grouped0;
};
TF_ASSIGN_OR_RETURN(
auto group_sharding,
get_grouped_sharding(hlo->sharding(), hlo->shape()));
visiting_hlo_sharding_ = hlo->sharding();
hlo->set_sharding(group_sharding.sharding);
device_groups_ = group_sharding.device_groups;
visiting_num_partitions_ = num_partitions_;
num_partitions_ = num_partitions_ / group_sharding.device_groups.size();
visiting_partition_id_ = partition_id_;
visiting_collective_ops_creator_ = std::move(collective_ops_creator_);
auto grouped_state = MakePartitioningState();
collective_ops_creator_ =
std::move(grouped_state.collective_ops_creator);
partition_id_ = grouped_state.partition_id;
visiting_hlo_operand_shardings_.reserve(hlo->operand_count());
visiting_state_.reserve(hlo->operand_count());
for (HloInstruction* operand : hlo->unique_operands()) {
visiting_hlo_operand_shardings_.push_back(operand->sharding());
auto old_state = GetPartitionedHlo(operand).state();
visiting_state_.push_back(old_state);
if (operand->shape().IsArray() && operand->IsConstant() &&
operand->shape().rank() == 0 &&
!operand->sharding().IsManualSubgroup()) {
continue;
}
TF_ASSIGN_OR_RETURN(
auto op_group_sharding,
get_grouped_sharding(operand->sharding(), operand->shape(),
&group_sharding));
operand->set_sharding(op_group_sharding.sharding);
GetPartitionedHlo(operand).hlo()->copy_sharding(operand);
auto group_state = CreatePerGroupPartitioningState(
old_state, op_group_sharding.device_groups, &b_);
GetPartitionedHlo(operand).set_state(group_state);
}
}
}
}
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::Postprocess(HloInstruction* hlo) {
logger_->RegisterLogEntry(hlo, b_.derived_instructions(hlo));
visiting_hlo_ = nullptr;
b_.set_visiting_hlo(nullptr);
if (visiting_hlo_sharding_) {
hlo->set_sharding(*visiting_hlo_sharding_);
GetPartitionedHlo(hlo).hlo()->set_sharding(*visiting_hlo_sharding_);
int64_t i = 0;
for (HloInstruction* operand : hlo->unique_operands()) {
operand->set_sharding(visiting_hlo_operand_shardings_[i++]);
GetPartitionedHlo(operand).hlo()->copy_sharding(operand);
}
visiting_hlo_sharding_.reset();
visiting_hlo_operand_shardings_.clear();
}
if (!device_groups_.empty()) {
device_groups_.clear();
num_partitions_ = *visiting_num_partitions_;
visiting_num_partitions_.reset();
collective_ops_creator_ = *visiting_collective_ops_creator_;
visiting_collective_ops_creator_.reset();
partition_id_ = *visiting_partition_id_;
visiting_partition_id_.reset();
GetPartitionedHlo(hlo).set_state(MakePartitioningState());
}
if (!visiting_state_.empty()) {
int64_t i = 0;
for (const HloInstruction* operand : hlo->unique_operands()) {
GetPartitionedHlo(operand).set_state(std::move(visiting_state_[i++]));
}
visiting_state_.clear();
}
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleElementwise(HloInstruction* hlo) {
std::vector<HloInstruction*> new_operands;
for (HloInstruction* operand : hlo->operands()) {
new_operands.push_back(
GetPartitionedHlo(operand).Reshard(hlo->sharding()).hlo());
}
SetPartitionedHlo(hlo, [&] {
return b_.AddInstruction(hlo->CloneWithNewOperands(
MakePartitionedShape(hlo->shape(), hlo->sharding()), new_operands));
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleConcatenate(HloInstruction* hlo) {
const HloSharding& sharding = hlo->sharding();
if (sharding.IsTileMaximal()) {
return DefaultAction(hlo);
}
const Shape shard_shape = MakePartitionedShape(hlo->shape(), hlo->sharding());
const int64_t dimension = hlo->concatenate_dimension();
if (sharding.tile_assignment().dim(dimension) == 1) {
std::vector<HloInstruction*> new_operands;
for (HloInstruction* operand : hlo->operands()) {
new_operands.push_back(
GetPartitionedHlo(operand).Reshard(sharding).hlo());
}
SetPartitionedHlo(hlo, [&] {
return b_.AddInstruction(
hlo->CloneWithNewOperands(shard_shape, new_operands));
});
return absl::OkStatus();
}
auto temp_output_shape = MakePartitionedShape(hlo->shape(), sharding);
auto last_operand_padded_shape =
MakePartitionedShape(hlo->operands().back()->shape(), sharding);
int last_operand_padding =
last_operand_padded_shape.dimensions(dimension) *
sharding.tile_assignment().dim(dimension) -
hlo->operands().back()->shape().dimensions(dimension);
int temp_output_padding = temp_output_shape.dimensions(dimension) *
sharding.tile_assignment().dim(dimension) -
hlo->shape().dimensions(dimension);
int padding_for_last_operand =
last_operand_padding < temp_output_padding
? 0
: last_operand_padding - temp_output_padding;
temp_output_shape.set_dimensions(
dimension, temp_output_shape.dimensions(dimension) *
sharding.tile_assignment().dim(dimension) +
padding_for_last_operand);
auto temp_output = CreateZero(temp_output_shape, &b_);
int64_t offset = 0;
auto state = MakePartitioningState();
for (HloInstruction* operand : hlo->operands()) {
auto spmd_operand =
GetPartitionedHlo(operand).Reshard(sharding).PadWithZero().hlo();
std::vector<HloInstruction*> start_indices(
hlo->shape().rank(), b_.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(S32))));
start_indices[dimension] =
MultiplyAddDivideOffsetCalculation(
spmd_operand->shape().dimensions(dimension), offset, 1)
.Calculate(MakeTiledPartitionOrdinals(sharding, state.partition_id,
&b_)[dimension],
&b_);
temp_output = b_.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
temp_output_shape, temp_output, spmd_operand, start_indices));
offset += operand->shape().dimensions(dimension);
}
std::vector<int64_t> non_concat_dims;
non_concat_dims.reserve(hlo->shape().rank() - 1);
for (int64_t i = 0; i < hlo->shape().rank(); ++i) {
if (i != dimension) {
non_concat_dims.push_back(i);
}
}
auto grouped =
hlo_sharding_util::GroupShardingOnDims(sharding, non_concat_dims);
auto per_group_partitioner_state =
CreatePerGroupPartitioningState(state, grouped.device_groups, &b_);
auto all_reduce = per_group_partitioner_state.collective_ops_creator
.create_cross_partition_all_reduce(
&b_, temp_output,
MakeBinaryAdd(hlo->shape().element_type(), module_),
{}, NewChannel());
SetPartitionedHlo(hlo, [&] {
auto start_indices = MakeTiledPartitionOrdinals(
grouped.sharding, per_group_partitioner_state.partition_id, &b_);
start_indices[dimension] = MultiplyAddDivideOffsetCalculation(
shard_shape.dimensions(dimension), 0, 1)
.Calculate(start_indices[dimension], &b_);
return b_.AddInstruction(HloInstruction::CreateDynamicSlice(
shard_shape, all_reduce, start_indices, shard_shape.dimensions()));
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleSlice(HloInstruction* hlo) {
const HloSharding& sharding = hlo->sharding();
if (sharding.IsTileMaximal()) {
return DefaultAction(hlo);
}
auto operand = GetPartitionedHlo(hlo->operand(0)).Reshard(sharding);
auto reshard_operand =
ReshardDataForSlicing(hlo->slice_strides(), hlo->slice_starts(),
hlo->slice_limits(), operand, sharding, &b_);
if (!reshard_operand.has_value()) {
return DefaultAction(hlo);
}
TF_RET_CHECK(!reshard_operand->dynamic_slice_index_on_output.has_value());
HloInstruction* final_operand = SliceDataFromWindowReshard(
*reshard_operand, hlo->slice_strides(), hlo->shape(), sharding, &b_);
SetPartitionedHlo(hlo, [&] {
if (final_operand != reshard_operand->sharded_input) {
return final_operand;
}
return b_.AddInstruction(HloInstruction::CreateUnary(
final_operand->shape(), HloOpcode::kCopy, final_operand));
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleSort(HloInstruction* hlo) {
HloSharding sharding = hlo->sharding();
int64_t input_count = 1;
if (hlo->shape().IsTuple()) {
input_count = hlo->shape().tuple_shapes_size();
CHECK_GT(input_count, 0);
}
if (sharding.HasUniqueDevice()) {
std::vector<HloInstruction*> new_operands(input_count, nullptr);
for (int64_t i = 0; i != input_count; ++i) {
HloSharding subsharding =
hlo->sharding().IsTuple()
? hlo->sharding().GetSubSharding(hlo->shape(), {i})
: hlo->sharding();
CHECK(!subsharding.IsTuple() && subsharding.HasUniqueDevice());
new_operands[i] =
GetPartitionedHlo(hlo->operand(i)).Reshard(subsharding).hlo();
}
auto clone = b_.AddInstruction(
hlo->CloneWithNewOperands(hlo->shape(), new_operands));
clone->set_sharding(sharding);
SetPartitionedHlo(
hlo, PartitionedHlo(clone, hlo->shape(), MakePartitioningState()));
return absl::OkStatus();
}
auto k = GetKValueInTopKWhenPartitionSortDim(hlo);
if (k.has_value()) {
HloSortInstruction* sort = DynCast<HloSortInstruction>(hlo);
const int64_t sort_dim = sort->sort_dimension();
auto input = hlo->operand(0);
auto index = hlo->operand(1);
const HloSharding& input_sharding = input->sharding();
const int64_t partition_count =
input_sharding.tile_assignment().dim(sort_dim);
const int64_t input_size = input->shape().dimensions(sort_dim);
const auto element_type = input->shape().element_type();
const auto index_type = index->shape().element_type();
auto partitioned_input = GetPartitionedHlo(input).PadWithValue(
CreateFirstWithType(element_type, &b_));
auto partitioned_index =
GetPartitionedHlo(index)
.Reshard(input_sharding)
.PadWithValue(CreateLastWithType(index_type, &b_));
std::vector<int64_t> replicated_dimensions(
input->shape().dimensions().begin(), input->shape().dimensions().end());
replicated_dimensions[sort_dim] = RoundUpTo(input_size, partition_count);
const Shape replicated_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(element_type, replicated_dimensions),
ShapeUtil::MakeShape(index_type, replicated_dimensions)});
auto topk_sharding =
input_sharding.GetTupleSharding(replicated_shape).value();
auto shard_shape = MakePartitionedShape(replicated_shape, topk_sharding);
auto topk = b_.AddInstruction(hlo->CloneWithNewOperands(
shard_shape, {partitioned_input.hlo(), partitioned_index.hlo()}));
HloInstruction* value_gte =
b_.AddInstruction(HloInstruction::CreateGetTupleElement(
topk->shape().tuple_shapes(0), topk, 0));
HloInstruction* index_gte =
b_.AddInstruction(HloInstruction::CreateGetTupleElement(
topk->shape().tuple_shapes(1), topk, 1));
replicated_dimensions[sort_dim] = k.value() * partition_count;
auto slice_input = SliceFirstK(value_gte, &b_, sort_dim, k.value());
slice_input->set_sharding(input_sharding);
PartitionedHlo partitioned_slice_input(
slice_input, ShapeUtil::MakeShape(element_type, replicated_dimensions),
MakePartitioningState());
auto replicated_slice_input =
partitioned_slice_input.Reshard(HloSharding::Replicate()).hlo();
auto slice_index = SliceFirstK(index_gte, &b_, sort_dim, k.value());
slice_index->set_sharding(input_sharding);
PartitionedHlo partitioned_slice_index(
slice_index, ShapeUtil::MakeShape(index_type, replicated_dimensions),
MakePartitioningState());
auto replicated_slice_index =
partitioned_slice_index.Reshard(HloSharding::Replicate()).hlo();
const Shape final_topk_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(element_type, replicated_dimensions),
ShapeUtil::MakeShape(index_type, replicated_dimensions)});
HloInstruction* final_sort = b_.AddInstruction(HloInstruction::CreateSort(
final_topk_shape, sort_dim,
{replicated_slice_input, replicated_slice_index}, sort->to_apply(),
sort->is_stable()));
final_sort->set_sharding(
HloSharding::Replicate().GetTupleSharding(final_sort->shape()).value());
PartitionedHlo replicated_sort(final_sort, final_sort->shape(),
MakePartitioningState());
SetPartitionedHlo(hlo, replicated_sort.Reshard(hlo->sharding()));
return absl::OkStatus();
}
auto sort = DynCast<HloSortInstruction>(hlo);
auto sort_dim = sort->sort_dimension();
VLOG(2) << "sort dim: " << sort_dim;
auto cur_sharding = sharding;
bool same_subsharding = true;
if (sharding.IsTuple()) {
cur_sharding = sharding.GetSubSharding(hlo->shape(), {0});
for (int64_t i = 1; i != input_count; ++i) {
if (cur_sharding != hlo->sharding().GetSubSharding(hlo->shape(), {i})) {
same_subsharding = false;
break;
}
}
}
auto subshape = hlo->operand(0)->shape();
if (subshape.rank() > 1 && same_subsharding && cur_sharding.IsTiled() &&
!cur_sharding.IsTileMaximal() &&
cur_sharding.tile_assignment().dim(sort_dim) != 1) {
std::vector<int64_t> tile_assignment_dims(
cur_sharding.tile_assignment().dimensions().begin(),
cur_sharding.tile_assignment().dimensions().end());
int64_t picked_dim = -1;
int64_t first_nonsort_nonsharded_dim = -1;
auto nshards = tile_assignment_dims[sort_dim];
for (int64_t dim = 0; dim < subshape.rank(); ++dim) {
if (dim == sort_dim || tile_assignment_dims[dim] != 1 ||
subshape.dimensions(dim) == 1) {
continue;
}
if (first_nonsort_nonsharded_dim == -1) {
first_nonsort_nonsharded_dim = dim;
}
if (subshape.dimensions(dim) % nshards != 0) {
continue;
}
picked_dim = dim;
break;
}
if (picked_dim == -1) {
picked_dim = first_nonsort_nonsharded_dim;
}
std::vector<HloInstruction*> new_operands;
std::vector<HloSharding> new_shardings;
std::optional<HloSharding> new_output_sharding;
if (picked_dim != -1) {
VLOG(2) << "Sort partitioning - picked target dimension to move the "
"sharding: "
<< picked_dim;
CHECK_NE(picked_dim, -1)
<< "Sort partitioning - sharding cannot exist in the sort dimension "
"if "
"there are no free dimensions to move it into";
std::vector<int64_t> permutation(
cur_sharding.tile_assignment().dimensions().begin(),
cur_sharding.tile_assignment().dimensions().end());
absl::c_iota(permutation, 0);
std::swap(permutation[sort_dim], permutation[picked_dim]);
auto new_sharding =
hlo_sharding_util::TransposeSharding(cur_sharding, permutation);
VLOG(2) << "Sort partitioning - new sharding: "
<< new_sharding.ToString();
for (auto& operand : hlo->operands()) {
new_operands.push_back(
GetPartitionedHlo(operand).Reshard(new_sharding).hlo());
new_shardings.push_back(new_sharding);
}
new_output_sharding = new_sharding;
if (sharding.IsTuple()) {
new_output_sharding = HloSharding::Tuple(sort->shape(), new_shardings);
}
} else {
auto new_sharding =
hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(cur_sharding,
{sort_dim});
for (auto& operand : hlo->operands()) {
new_operands.push_back(
GetPartitionedHlo(operand).Reshard(new_sharding).hlo());
new_shardings.push_back(new_sharding);
}
new_output_sharding = new_sharding;
if (sharding.IsTuple()) {
new_output_sharding = HloSharding::Tuple(sort->shape(), new_shardings);
}
}
auto final_sort = b_.AddInstruction(hlo->CloneWithNewOperands(
MakePartitionedShape(sort->shape(), *new_output_sharding),
new_operands));
final_sort->set_sharding(*new_output_sharding);
PartitionedHlo psort(final_sort, sort->shape(), MakePartitioningState());
SetPartitionedHlo(sort, psort.Reshard(sort->sharding()));
return absl::OkStatus();
}
if (hlo->shape().IsTuple()) {
if (hlo->shape().tuple_shapes_size() == 0) {
return DefaultAction(hlo);
}
sharding = hlo->sharding().GetSubSharding(hlo->shape(), {0});
for (int64_t i = 1; i < hlo->operand_count(); ++i) {
if (sharding != hlo->sharding().GetSubSharding(hlo->shape(), {i})) {
return DefaultAction(hlo);
}
}
}
if (sharding.IsTileMaximal()) {
return DefaultAction(hlo);
}
for (int64_t dim : hlo->dimensions()) {
if (sharding.tile_assignment().dim(dim) > 1) {
return DefaultAction(hlo);
}
}
std::vector<HloInstruction*> new_operands;
for (HloInstruction* operand : hlo->operands()) {
new_operands.push_back(GetPartitionedHlo(operand).Reshard(sharding).hlo());
}
SetPartitionedHlo(hlo, [&] {
return b_.AddInstruction(hlo->CloneWithNewOperands(
MakePartitionedShape(hlo->shape(), hlo->sharding()), new_operands));
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleTranspose(HloInstruction* hlo) {
const HloSharding& sharding = hlo->sharding();
if (sharding.IsTileMaximal()) {
return DefaultAction(hlo);
}
std::vector<int64_t> inverse_dimensions(hlo->shape().rank());
for (int64_t i = 0; i < hlo->shape().rank(); ++i) {
inverse_dimensions[hlo->dimensions(i)] = i;
}
auto desired_operand_sharding =
hlo_sharding_util::TransposeSharding(sharding, inverse_dimensions);
auto operand = GetPartitionedHlo(hlo->operand(0))
.Reshard(desired_operand_sharding)
.hlo();
SetPartitionedHlo(hlo, [&] {
return b_.AddInstruction(hlo->CloneWithNewOperands(
MakePartitionedShape(hlo->shape(), hlo->sharding()), {operand}));
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleReshape(HloInstruction* hlo) {
const HloSharding& sharding = hlo->sharding();
if (sharding.IsTileMaximal()) {
return DefaultAction(hlo);
}
auto operand = GetPartitionedHlo(hlo->operand(0));
auto desired_operand = [&](const HloSharding& output_sharding)
-> std::optional<HloInstruction*> {
std::optional<HloSharding> desired_operand_sharding =
hlo_sharding_util::ReshapeSharding(
hlo->shape(), hlo->operand(0)->shape(), output_sharding);
if (desired_operand_sharding.has_value() &&
output_sharding.NumTiles() == desired_operand_sharding->NumTiles()) {
return b_.AddInstruction(hlo->CloneWithNewOperands(
MakePartitionedShape(hlo->shape(), output_sharding),
{operand.Reshard(*desired_operand_sharding).hlo()}));
}
return std::nullopt;
};
if (auto operand_hlo = desired_operand(hlo->sharding())) {
SetPartitionedHlo(hlo, [&] { return *operand_hlo; });
return absl::OkStatus();
}
std::optional<HloSharding> desired_output_sharding =
hlo_sharding_util::ReshapeSharding(hlo->operand(0)->shape(), hlo->shape(),
operand.sharding());
if (desired_output_sharding.has_value()) {
if (auto operand_hlo = desired_operand(*desired_output_sharding)) {
(*operand_hlo)->set_sharding(*desired_output_sharding);
SetPartitionedHlo(hlo, [&] {
return PartitionedHlo(*operand_hlo, hlo->shape(),
MakePartitioningState())
.Reshard(hlo->sharding())
.hlo();
});
return absl::OkStatus();
}
}
auto shard_reshape =
[](PartitionedHlo& operand, const HloSharding& sharding,
const Shape& base_shape) -> absl::StatusOr<HloInstruction*> {
auto replicate = [&] {
HloInstruction* rep = operand.Replicate().hlo();
HloInstruction* reshape = operand.state().b->AddInstruction(
HloInstruction::CreateReshape(base_shape, rep));
reshape->set_sharding(HloSharding::Replicate());
return PartitionedHlo(reshape, base_shape, operand.state())
.Reshard(sharding)
.hlo();
};
if (operand.sharding().NumTiles() != sharding.NumTiles()) {
return replicate();
}
auto maybe_input_sharded_dim = UniqueTiledDim(operand.sharding());
auto maybe_output_sharded_dim = UniqueTiledDim(sharding);
if (!maybe_input_sharded_dim || !maybe_output_sharded_dim) {
return replicate();
}
int64_t input_sharded_dim = *maybe_input_sharded_dim;
int64_t output_sharded_dim = *maybe_output_sharded_dim;
int64_t input_major_dims_size = 1;
for (int64_t i = 0; i < input_sharded_dim; ++i) {
input_major_dims_size *= operand.base_shape().dimensions(i);
}
int64_t output_major_dims_size = 1;
for (int64_t i = 0; i < output_sharded_dim; ++i) {
output_major_dims_size *= base_shape.dimensions(i);
}
if (input_major_dims_size != output_major_dims_size) {
return replicate();
}
auto new_input_tile_assignment = sharding.tile_assignment().Reshape(
operand.sharding().tile_assignment().dimensions());
auto aligned_sharding =
sharding.ReplicateOnLastTileDim()
? HloSharding::PartialTile(new_input_tile_assignment)
: HloSharding::Tile(new_input_tile_assignment);
operand = operand.Reshard(aligned_sharding);
auto replication_count =
sharding.ReplicateOnLastTileDim()
? sharding.tile_assignment().dimensions().back()
: 1;
int64_t input_dim_size = operand.base_shape().dimensions(input_sharded_dim);
int64_t output_dim_size = base_shape.dimensions(output_sharded_dim);
auto input_shard_shape =
MakePartitionedShape(operand.base_shape(), operand.sharding());
auto output_shard_shape = MakePartitionedShape(base_shape, sharding);
if (input_dim_size % output_dim_size == 0) {
int64_t split_factor = input_dim_size / output_dim_size;
int64_t output_shard_size =
output_shard_shape.dimensions(output_sharded_dim);
Window window;
for (int64_t i = 0; i < base_shape.rank(); ++i) {
WindowDimension* dim = window.add_dimensions();
dim->set_size(1);
dim->set_stride(1);
dim->set_window_dilation(1);
dim->set_window_reversal(false);
dim->set_base_dilation(1);
dim->set_padding_low(0);
if (i == input_sharded_dim) {
dim->set_padding_high(output_shard_size * split_factor *
sharding.tile_assignment().num_elements() /
replication_count -
input_dim_size);
} else {
dim->set_padding_high(0);
}
}
auto reshard_operand = operand.ReshardAsWindowedInput(
window, operand.sharding(),
CreateZero(ShapeUtil::MakeShape(base_shape.element_type(), {}),
operand.state().b),
false);
if (!reshard_operand.has_value()) {
return replicate();
}
TF_RET_CHECK(!reshard_operand->dynamic_slice_index_on_output.has_value());
CHECK_EQ(
reshard_operand->sharded_input->shape().dimensions(input_sharded_dim),
output_shard_size * split_factor);
return operand.state().b->AddInstruction(HloInstruction::CreateReshape(
output_shard_shape, reshard_operand->sharded_input));
} else if (output_dim_size % input_dim_size == 0) {
int64_t merge_factor = output_dim_size / input_dim_size;
auto tmp_shard_shape = output_shard_shape;
tmp_shard_shape.set_dimensions(
output_sharded_dim,
input_shard_shape.dimensions(input_sharded_dim) * merge_factor);
auto tmp_reshape = operand.state().b->AddInstruction(
HloInstruction::CreateReshape(tmp_shard_shape, operand.hlo()));
tmp_reshape->set_sharding(sharding);
auto tmp_full_shape = tmp_shard_shape;
tmp_full_shape.set_dimensions(
output_sharded_dim, tmp_shard_shape.dimensions(output_sharded_dim) *
sharding.tile_assignment().num_elements() /
replication_count);
auto tmp_output =
PartitionedHlo(tmp_reshape, tmp_full_shape, operand.state());
Window window;
for (int64_t i = 0; i < tmp_shard_shape.rank(); ++i) {
WindowDimension* dim = window.add_dimensions();
dim->set_size(1);
dim->set_stride(1);
dim->set_window_dilation(1);
dim->set_window_reversal(false);
dim->set_base_dilation(1);
dim->set_padding_low(0);
if (i == output_sharded_dim) {
dim->set_padding_high(output_dim_size -
tmp_shard_shape.dimensions(output_sharded_dim) *
sharding.tile_assignment().num_elements() /
replication_count);
} else {
dim->set_padding_high(0);
}
}
auto reshard_output = tmp_output.ReshardAsWindowedInput(
window, sharding,
CreateZero(ShapeUtil::MakeShape(base_shape.element_type(), {}),
operand.state().b),
false);
if (!reshard_output.has_value()) {
return replicate();
}
TF_RET_CHECK(!reshard_output->dynamic_slice_index_on_output.has_value());
CHECK_EQ(
reshard_output->sharded_input->shape().dimensions(output_sharded_dim),
output_shard_shape.dimensions(output_sharded_dim));
return reshard_output->sharded_input;
}
return replicate();
};
std::function<absl::StatusOr<HloInstruction*>(
PartitionedHlo&, const HloSharding&, const Shape&)>
recursive_shard =
[&](PartitionedHlo& operand, const HloSharding& sharding,
const Shape& base_shape) -> absl::StatusOr<HloInstruction*> {
const Shape& operand_base_shape = operand.base_shape();
HloSharding propagated = hlo_sharding_util::PropagateShardingThroughReshape(
operand_base_shape, base_shape, operand.sharding());
if (propagated.IsTiled()) {
auto operand_propagated_back = hlo_sharding_util::ReshapeSharding(
base_shape, operand_base_shape, propagated);
std::vector<int64_t> operand_group_dims;
if (!operand_propagated_back.has_value()) {
return shard_reshape(operand, sharding, base_shape);
}
CHECK(operand_propagated_back->IsTiled());
Shape inner_operand_base_shape = operand_base_shape;
for (int64_t i = 0; i < operand_base_shape.rank(); ++i) {
if (operand_propagated_back->tile_assignment().dim(i) > 1) {
operand_group_dims.push_back(i);
inner_operand_base_shape.set_dimensions(
i, operand.hlo()->shape().dimensions(i));
}
}
Shape inner_base_shape = base_shape;
bool use_original_output_sharding =
sharding.NumTiles() > propagated.NumTiles();
std::vector<int64_t> output_group_dims;
for (int64_t i = 0; i < inner_base_shape.rank(); ++i) {
int64_t num_shards = propagated.tile_assignment().dim(i);
if (num_shards > 1) {
inner_base_shape.set_dimensions(
i, CeilOfRatio(base_shape.dimensions(i), num_shards));
output_group_dims.push_back(i);
if (num_shards != sharding.tile_assignment().dim(i)) {
use_original_output_sharding = false;
}
}
}
auto operand_group = hlo_sharding_util::GroupShardingOnDims(
operand.sharding(), operand_group_dims);
auto output_group = hlo_sharding_util::GroupShardingOnDims(
use_original_output_sharding ? sharding : propagated,
output_group_dims);
if (use_original_output_sharding) {
output_group = AlignGroupsWith(std::move(output_group), operand_group);
}
auto inner_state = CreatePerGroupPartitioningState(
operand.state(), operand_group.device_groups, operand.state().b);
HloInstruction* inner_operand_hlo =
b_.AddInstruction(HloInstruction::CreateUnary(
operand.hlo()->shape(), HloOpcode::kCopy, operand.hlo()));
inner_operand_hlo->set_sharding(operand_group.sharding);
auto inner_operand = PartitionedHlo(
inner_operand_hlo, inner_operand_base_shape, inner_state);
TF_ASSIGN_OR_RETURN(HloInstruction * reshape,
recursive_shard(inner_operand, output_group.sharding,
inner_base_shape));
reshape->set_sharding(hlo_sharding_util::UngroupSharding(output_group));
return PartitionedHlo(reshape, base_shape, operand.state())
.Reshard(sharding)
.hlo();
}
return shard_reshape(operand, sharding, base_shape);
};
TF_ASSIGN_OR_RETURN(HloInstruction * partitioned,
recursive_shard(operand, sharding, hlo->shape()));
SetPartitionedHlo(hlo, [&] { return partitioned; });
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleIota(HloInstruction* hlo) {
const HloSharding& sharding = hlo->sharding();
if (sharding.IsTileMaximal()) {
return DefaultAction(hlo);
}
SetPartitionedHlo(hlo, [&] {
int64_t dimension = Cast<HloIotaInstruction>(hlo)->iota_dimension();
auto iota = b_.AddInstruction(HloInstruction::CreateIota(
MakePartitionedShape(hlo->shape(), sharding), dimension));
if (sharding.tile_assignment().dim(dimension) > 1) {
auto partition_ordinals = MakeTiledPartitionOrdinals(
sharding, MakePartitioningState().partition_id, &b_);
auto multiplier = b_.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(iota->shape().dimensions(dimension))));
auto offset = b_.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(S32, {}), HloOpcode::kMultiply,
partition_ordinals[dimension], multiplier));
if (iota->shape().element_type() != S32) {
offset = b_.AddInstruction(HloInstruction::CreateConvert(
ShapeUtil::MakeShape(iota->shape().element_type(), {}), offset));
}
auto broadcast = b_.AddInstruction(
HloInstruction::CreateBroadcast(iota->shape(), offset, {}));
return b_.AddInstruction(HloInstruction::CreateBinary(
iota->shape(), HloOpcode::kAdd, iota, broadcast));
}
return iota;
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleSingleDevice(
const HloInstruction* hlo) {
TF_RET_CHECK(hlo->sharding().HasUniqueDevice());
int64_t device = hlo->sharding().GetUniqueDevice();
const HloSharding sharding = HloSharding::AssignDevice(device);
std::vector<HloInstruction*> operands;
std::vector<const Shape*> operand_shapes;
const auto& old_operands = hlo->operands();
const auto old_operands_size = old_operands.size();
operands.reserve(old_operands_size);
operand_shapes.reserve(old_operands_size);
for (const HloInstruction* operand : old_operands) {
operands.push_back(GetPartitionedHlo(operand).Reshard(sharding).hlo());
operand_shapes.push_back(&operand->shape());
}
auto operand = b_.AddInstruction(HloInstruction::CreateTuple(operands));
auto operand_shape = ShapeUtil::MakeTupleShapeWithPtrs(operand_shapes);
auto on_device = b_.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<uint32_t>(device)));
auto pred = b_.AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}), MakePartitioningState().partition_id,
on_device, ComparisonDirection::kEq));
SpmdBuilder true_b("true_computation", visiting_hlo_);
HloComputation* true_computation;
{
auto param = true_b.AddInstruction(HloInstruction::CreateParameter(
0, operand_shape, "true_branch_param"));
std::vector<HloInstruction*> new_operands;
new_operands.reserve(operands.size());
for (int64_t i = 0; i < operands.size(); ++i) {
new_operands.push_back(true_b.AddInstruction(
HloInstruction::CreateGetTupleElement(*operand_shapes[i], param, i)));
}
auto root = true_b.AddInstruction(
hlo->CloneWithNewOperands(hlo->shape(), new_operands));
true_computation = module_->AddEmbeddedComputation(true_b.Build(root));
}
SpmdBuilder false_b("false_computation", visiting_hlo_);
HloComputation* false_computation;
{
false_b.AddInstruction(HloInstruction::CreateParameter(
0, operand_shape, "false_branch_param"));
auto root = CreateZero(hlo->shape(), &false_b);
false_computation = module_->AddEmbeddedComputation(false_b.Build(root));
}
SetPartitionedHlo(hlo, [&]() {
return b_.AddInstruction(HloInstruction::CreateConditional(
hlo->shape(), pred, operand, true_computation, operand,
false_computation));
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleAllReduce(HloInstruction* hlo) {
if (hlo->IsCrossReplicaAllReduce() && hlo->operand_count() == 1) {
return HandleElementwise(hlo);
}
if (hlo->channel_id()) {
TF_RET_CHECK(hlo->operand_count() == 1)
<< "SPMD partitioner supports only single-operand allreduce in manual "
"partitioning mode.";
if (hlo->sharding().IsManual() || hlo->sharding().IsReplicated()) {
return HandleElementwise(hlo);
}
TF_RET_CHECK(hlo->sharding().IsManualSubgroup())
<< "Cross-partition allreduce must be in (partial) manual partitioning "
"mode.";
auto* ar = Cast<HloAllReduceInstruction>(hlo);
TF_RET_CHECK(ar->use_global_device_ids())
<< "Cross-partition allreduce in partial manual partitioning mode must "
"use global device IDs.";
std::vector<int64_t> partition_to_group_id(
hlo->sharding().tile_assignment().num_elements());
hlo->sharding().tile_assignment().Each(
[&](absl::Span<const int64_t> indices, int64_t partition) {
int64_t group_id = 0;
for (int64_t i = 0; i < indices.size(); ++i) {
if (i == hlo->sharding().SubgroupManualDim()) {
continue;
}
group_id *= hlo->sharding().tile_assignment().dim(i);
group_id += indices[i];
}
partition_to_group_id[partition] = group_id;
});
for (const auto& group : ar->replica_groups()) {
int64_t first_partition = group.replica_ids(0) % num_partitions_;
for (int64_t device : group.replica_ids()) {
int64_t partition = device % num_partitions_;
if (partition_to_group_id[partition] !=
partition_to_group_id[first_partition]) {
return InvalidArgumentStrCat(
"Manual all-reduce across devices that belong to different "
"manual subgroups: ",
ar->ToString());
}
}
}
return HandleElementwise(hlo);
}
return DefaultAction(hlo);
}
absl::Status SpmdPartitioningVisitor::HandleBroadcast(HloInstruction* hlo) {
if (hlo->sharding().IsTileMaximal()) {
return DefaultAction(hlo);
}
auto& operand = GetPartitionedHlo(hlo->operand(0));
std::vector<int64_t> new_dims;
for (int64_t i = 0; i < hlo->shape().rank(); ++i) {
if (!absl::c_linear_search(hlo->dimensions(), i)) {
new_dims.push_back(i);
}
}
auto desired_input_sharding = hlo_sharding_util::RemoveShapeDimensions(
hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(hlo->sharding(),
new_dims),
new_dims);
auto input = operand.Reshard(desired_input_sharding).hlo();
auto output_shard_shape = MakePartitionedShape(hlo->shape(), hlo->sharding());
SetPartitionedHlo(hlo, [&] {
return b_.AddInstruction(
hlo->CloneWithNewOperands(output_shard_shape, {input}));
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleConstant(HloInstruction* hlo) {
const Literal& literal = hlo->literal();
if (literal.shape().IsTuple() ||
(!hlo->sharding().IsTileMaximal() &&
(!EvenlyPartitions(hlo->shape(), hlo->sharding()) ||
!literal.IsAllFirst()))) {
return DefaultAction(hlo);
}
SetPartitionedHlo(hlo, [&]() {
auto shard_shape = MakePartitionedShape(hlo->shape(), hlo->sharding());
std::vector<int64_t> start_indices(hlo->shape().rank(), 0);
auto constant = b_.AddInstruction(HloInstruction::CreateConstant(
literal.Slice(start_indices, shard_shape.dimensions())));
*constant->mutable_shape() = shard_shape;
return constant;
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleDynamicSlice(HloInstruction* hlo) {
if (hlo->sharding().IsTileMaximal()) {
return DefaultAction(hlo);
}
for (int64_t i = 0; i < hlo->shape().rank(); ++i) {
if (hlo->sharding().tile_assignment().dim(i) != 1 &&
hlo->dynamic_slice_sizes()[i] !=
hlo->operand(0)->shape().dimensions(i)) {
return DefaultAction(hlo);
}
}
std::vector<HloInstruction*> new_indices(hlo->shape().rank());
auto new_input =
GetPartitionedHlo(hlo->operand(0)).Reshard(hlo->sharding()).hlo();
for (int64_t i = 0; i < new_indices.size(); ++i) {
if (hlo->dynamic_slice_sizes()[i] ==
hlo->operand(0)->shape().dimensions(i)) {
new_indices[i] = CreateZero(hlo->operand(i + 1)->shape(), &b_);
continue;
}
new_indices[i] = GetPartitionedHlo(hlo->operand(i + 1))
.Reshard(HloSharding::Replicate())
.hlo();
}
SetPartitionedHlo(hlo, [&]() {
auto partitioned_shape =
MakePartitionedShape(hlo->shape(), hlo->sharding());
return b_.AddInstruction(HloInstruction::CreateDynamicSlice(
partitioned_shape, new_input, new_indices,
partitioned_shape.dimensions()));
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleDynamicUpdateSlice(
HloInstruction* hlo) {
if (hlo->sharding().IsTileMaximal()) {
return DefaultAction(hlo);
}
std::vector<int64_t> partitioned_slice_dims;
std::vector<int64_t> slice_dims;
std::vector<int64_t> partitioned_non_slice_dims;
std::vector<int64_t> partitioned_slice_offsets;
bool any_non_constant_sliced_dim = false;
for (int64_t i = 0; i < hlo->shape().rank(); ++i) {
if (hlo->operand(1)->shape().dimensions(i) != hlo->shape().dimensions(i)) {
slice_dims.push_back(i);
int64_t slice_size = hlo->operand(1)->shape().dimensions(i);
if (hlo->sharding().tile_assignment().dim(i) != 1) {
if (!hlo->operand(i + 2)->IsConstant() && slice_size != 1) {
any_non_constant_sliced_dim = true;
continue;
}
partitioned_slice_dims.push_back(i);
if (slice_size == 1) {
partitioned_slice_offsets.push_back(-1);
} else {
partitioned_slice_offsets.push_back(
hlo->operand(i + 2)->literal().Get<int>({}));
}
}
} else if (hlo->sharding().tile_assignment().dim(i) != 1) {
partitioned_non_slice_dims.push_back(i);
}
}
auto handle_with_replicate_slice_dims = [&]() {
HloSharding replicated_sharding =
hlo_sharding_util::PartiallyReplicateTiledShardingOnAllDimsExcept(
hlo->operand(0)->sharding(), partitioned_non_slice_dims);
auto base = GetPartitionedHlo(hlo->operand(0)).Reshard(replicated_sharding);
auto operand =
GetPartitionedHlo(hlo->operand(1)).Reshard(replicated_sharding);
std::vector<HloInstruction*> new_indices(hlo->shape().rank());
for (int64_t i = 0; i < new_indices.size(); ++i) {
new_indices[i] = GetPartitionedHlo(hlo->operand(i + 2))
.Reshard(HloSharding::Replicate())
.hlo();
}
auto dus = b_.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
base.hlo()->shape(), base.hlo(), operand.hlo(), new_indices));
dus->set_sharding(replicated_sharding);
SetPartitionedHlo(hlo, PartitionedHlo(dus, base.base_shape(), base.state())
.Reshard(hlo->sharding()));
};
if (any_non_constant_sliced_dim) {
if (partitioned_non_slice_dims.empty()) {
return DefaultAction(hlo);
}
handle_with_replicate_slice_dims();
return absl::OkStatus();
}
if (!partitioned_slice_dims.empty()) {
auto add_hlo = [&](std::unique_ptr<HloInstruction> to_add) {
return b_.AddInstruction(std::move(to_add));
};
std::vector<HloInstruction*> new_indices(hlo->shape().rank());
for (int64_t i = 0; i < new_indices.size(); ++i) {
if (hlo->operand(1)->shape().dimensions(i) ==
hlo->shape().dimensions(i)) {
new_indices[i] = CreateZero(hlo->operand(i + 2)->shape(), &b_);
continue;
}
new_indices[i] = GetPartitionedHlo(hlo->operand(i + 2))
.Reshard(HloSharding::Replicate())
.hlo();
}
const auto& dus_sharding = hlo->sharding();
const auto& partitioned_input =
GetPartitionedHlo(hlo->operand(0)).Reshard(dus_sharding).hlo();
auto update_sharding = HloSharding::Replicate();
if (!partitioned_non_slice_dims.empty()) {
update_sharding =
hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(dus_sharding,
slice_dims);
}
HloInstruction* replicate_update =
GetPartitionedHlo(hlo->operand(1)).Reshard(update_sharding).hlo();
const auto& update_shape = replicate_update->shape();
const auto& partitioned_shape = partitioned_input->shape();
auto partition_ordinals = MakeTiledPartitionOrdinals(
hlo->sharding(), MakePartitioningState().partition_id, &b_);
HloInstruction* all_dims_within_partition = add_hlo(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
for (int i = 0; i < partitioned_slice_dims.size(); ++i) {
int dim = partitioned_slice_dims[i];
const int64_t per_partition_size = partitioned_shape.dimensions(dim);
if ((partitioned_slice_offsets[i] != -1) &&
(partitioned_slice_offsets[i] / per_partition_size) !=
((partitioned_slice_offsets[i] + update_shape.dimensions(dim) -
1) /
per_partition_size)) {
handle_with_replicate_slice_dims();
return absl::OkStatus();
}
const Shape& compare_shape =
ShapeUtil::ChangeElementType(partition_id_->shape(), PRED);
auto per_partition_size_hlo = add_hlo(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int>(per_partition_size)));
const Shape& offset_shape = per_partition_size_hlo->shape();
auto partition_offset = add_hlo(HloInstruction::CreateBinary(
offset_shape, HloOpcode::kMultiply, partition_ordinals[dim],
per_partition_size_hlo));
auto offset_ge = add_hlo(HloInstruction::CreateCompare(
compare_shape, new_indices[dim], partition_offset,
ComparisonDirection::kGe));
auto offset_lt = add_hlo(HloInstruction::CreateCompare(
compare_shape, new_indices[dim],
add_hlo(HloInstruction::CreateBinary(
offset_shape, HloOpcode::kMultiply,
add_hlo(HloInstruction::CreateBinary(
offset_shape, HloOpcode::kAdd, partition_ordinals[dim],
add_hlo(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int>(1))))),
per_partition_size_hlo)),
ComparisonDirection::kLt));
auto update_within_partition = add_hlo(HloInstruction::CreateBinary(
compare_shape, HloOpcode::kAnd, offset_ge, offset_lt));
all_dims_within_partition = add_hlo(HloInstruction::CreateBinary(
compare_shape, HloOpcode::kAnd, all_dims_within_partition,
update_within_partition));
new_indices[dim] = add_hlo(HloInstruction::CreateTernary(
new_indices[dim]->shape(), HloOpcode::kSelect,
update_within_partition,
add_hlo(HloInstruction::CreateBinary(
new_indices[dim]->shape(), HloOpcode::kSubtract, new_indices[dim],
partition_offset)),
add_hlo(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(0)))));
}
auto dus = add_hlo(HloInstruction::CreateDynamicUpdateSlice(
partitioned_shape, partitioned_input, replicate_update, new_indices));
SetPartitionedHlo(hlo, [&]() {
return add_hlo(HloInstruction::CreateTernary(
dus->shape(), HloOpcode::kSelect,
add_hlo(HloInstruction::CreateBroadcast(
ShapeUtil::ChangeElementType(dus->shape(), PRED),
all_dims_within_partition, {})),
dus, partitioned_input));
});
return absl::OkStatus();
}
std::vector<HloInstruction*> new_indices(hlo->shape().rank());
auto new_input =
GetPartitionedHlo(hlo->operand(0)).Reshard(hlo->sharding()).hlo();
auto new_update =
GetPartitionedHlo(hlo->operand(1)).Reshard(hlo->sharding()).hlo();
for (int64_t i = 0; i < new_indices.size(); ++i) {
if (hlo->operand(1)->shape().dimensions(i) == hlo->shape().dimensions(i)) {
new_indices[i] = CreateZero(hlo->operand(i + 2)->shape(), &b_);
continue;
}
new_indices[i] = GetPartitionedHlo(hlo->operand(i + 2))
.Reshard(HloSharding::Replicate())
.hlo();
}
SetPartitionedHlo(hlo, [&]() {
auto partitioned_shape =
MakePartitionedShape(hlo->shape(), hlo->sharding());
return b_.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
partitioned_shape, new_input, new_update, new_indices));
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleGetTupleElement(
HloInstruction* hlo) {
if (hlo->sharding().IsManual()) {
return DefaultAction(hlo);
}
const auto& tuple = GetPartitionedHlo(hlo->operand(0));
auto gte = b_.AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetTupleElementShape(tuple.hlo()->shape(), hlo->tuple_index()),
tuple.hlo(), hlo->tuple_index()));
const auto source_sharding =
tuple.sharding().GetSubSharding(tuple.base_shape(), {hlo->tuple_index()});
gte->set_sharding(source_sharding);
PartitionedHlo source_partitioned_gte(
gte, tuple.base_shape().tuple_shapes(hlo->tuple_index()),
MakePartitioningState());
source_partitioned_gte = source_partitioned_gte.Reshard(hlo->sharding());
SetPartitionedHlo(hlo, source_partitioned_gte);
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleInfeed(HloInstruction* hlo) {
const Shape& shape = ShapeUtil::GetTupleElementShape(hlo->shape(), 0);
auto token = GetPartitionedHlo(hlo->operand(0)).hlo();
if (ShapeUtil::GetLeafCount(shape) == 0) {
SetPartitionedHlo(hlo, [&]() {
return b_.AddInstruction(
HloInstruction::CreateInfeed(shape, token, hlo->infeed_config()));
});
return absl::OkStatus();
}
auto sharding = hlo->sharding().GetSubSharding(hlo->shape(), {0});
auto shard_shape = MakePartitionedShape(shape, sharding);
if (EvenlyPartitions(shape, sharding)) {
SetPartitionedHlo(hlo, [&]() {
return b_.AddInstruction(HloInstruction::CreateInfeed(
shard_shape, token, hlo->infeed_config()));
});
return absl::OkStatus();
}
if (hlo->sharding().HasUniqueDevice()) {
return HandleSingleDevice(hlo);
}
std::vector<Shape> per_branch_partitioned_shapes;
std::vector<int32_t> conditional_branch_indices(num_partitions_);
for (int64_t i = 0; i < num_partitions_; ++i) {
auto partitioned_shape =
MakeNonPaddedShapeForGivenPartition(shape, sharding, i);
int64_t matching_existing_index = 0;
for (; matching_existing_index < per_branch_partitioned_shapes.size();
++matching_existing_index) {
if (ShapeUtil::Compatible(
partitioned_shape,
per_branch_partitioned_shapes[matching_existing_index])) {
break;
}
}
if (matching_existing_index < per_branch_partitioned_shapes.size()) {
conditional_branch_indices[i] = matching_existing_index;
} else {
conditional_branch_indices[i] = per_branch_partitioned_shapes.size();
per_branch_partitioned_shapes.push_back(std::move(partitioned_shape));
}
}
HloInstruction* branch_index;
auto state = MakePartitioningState();
if (per_branch_partitioned_shapes.size() == num_partitions_) {
branch_index = state.partition_id;
if (branch_index->shape().element_type() != S32) {
branch_index = b_.AddInstruction(HloInstruction::CreateConvert(
ShapeUtil::ChangeElementType(branch_index->shape(), S32),
branch_index));
}
} else {
auto branch_index_table = b_.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<int32_t>(conditional_branch_indices)));
branch_index = b_.AddInstruction(HloInstruction::CreateDynamicSlice(
ShapeUtil::MakeShape(S32, {1}), branch_index_table,
{state.partition_id}, {1}));
branch_index = b_.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(S32, {}), branch_index));
}
std::vector<HloComputation*> branches(per_branch_partitioned_shapes.size());
for (int64_t i = 0; i < branches.size(); ++i) {
SpmdBuilder branch_b(absl::StrCat("infeed_branch_", i), visiting_hlo_);
auto param = branch_b.AddInstruction(HloInstruction::CreateParameter(
0, token->shape(), "infeed_token_param"));
auto infeed = branch_b.AddInstruction(HloInstruction::CreateInfeed(
per_branch_partitioned_shapes[i], param, hlo->infeed_config()));
if (!ShapeUtil::Compatible(per_branch_partitioned_shapes[i], shard_shape)) {
std::function<HloInstruction*(const ShapeIndex&, HloInstruction*)>
pad_infeed = [&](const ShapeIndex& index,
HloInstruction* infeed_element) -> HloInstruction* {
if (index == ShapeIndex({1})) {
return infeed_element;
}
const Shape& element_shape =
ShapeUtil::GetSubshape(infeed->shape(), index);
if (element_shape.IsTuple() && element_shape.tuple_shapes_size() > 0) {
std::vector<HloInstruction*> padded_elements(
element_shape.tuple_shapes_size());
for (int64_t i = 0; i < padded_elements.size(); ++i) {
auto sub_index = index;
sub_index.push_back(i);
padded_elements[i] = pad_infeed(
sub_index,
branch_b.AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetSubshape(element_shape, {i}), infeed_element,
i)));
}
return branch_b.AddInstruction(
HloInstruction::CreateTuple(padded_elements));
}
const Shape& pad_shape = ShapeUtil::GetSubshape(
shard_shape, ShapeIndexView(index).subspan(1));
if (ShapeUtil::Compatible(element_shape, pad_shape)) {
return infeed_element;
}
if (element_shape.IsArray()) {
CHECK(pad_shape.IsArray());
return PadToShape(infeed_element, pad_shape, &branch_b);
}
CHECK(element_shape.IsTuple());
CHECK(element_shape.tuple_shapes().empty());
return CreateZero(pad_shape, &branch_b);
};
pad_infeed({}, infeed);
}
branches[i] = module_->AddEmbeddedComputation(branch_b.Build());
}
SetPartitionedHlo(hlo, [&]() {
return b_.AddInstruction(HloInstruction::CreateConditional(
ShapeUtil::MakeTupleShape({shard_shape, token->shape()}), branch_index,
branches, std::vector<HloInstruction*>(branches.size(), token)));
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandlePad(HloInstruction* hlo) {
if (hlo->sharding().IsTileMaximal()) {
return DefaultAction(hlo);
}
auto lhs = GetPartitionedHlo(hlo->operand(0));
auto replicated_rhs = GetPartitionedHlo(hlo->operand(1))
.Reshard(HloSharding::Replicate())
.hlo();
auto reshard_operand = ReshardDataForPad(
replicated_rhs, hlo->padding_config(), lhs, hlo->sharding(), &b_);
if (!reshard_operand.has_value()) {
return DefaultAction(hlo);
}
auto* sharded_pad =
PadDataFromWindowReshard(*reshard_operand, replicated_rhs, &b_);
SetPartitionedHlo(hlo, [&]() {
if (!reshard_operand->dynamic_slice_index_on_output) {
return sharded_pad;
}
auto shard_shape = MakePartitionedShape(hlo->shape(), hlo->sharding());
return b_.AddInstruction(HloInstruction::CreateDynamicSlice(
shard_shape, sharded_pad,
*reshard_operand->dynamic_slice_index_on_output,
shard_shape.dimensions()));
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleParameter(HloInstruction* hlo) {
SetPartitionedHlo(hlo, [&]() {
auto shard_shape = MakePartitionedShape(hlo->shape(), hlo->sharding());
auto new_param = b_.AddInstruction(HloInstruction::CreateParameter(
hlo->parameter_number(), shard_shape, "param"));
if (hlo->parameter_replicated_at_leaf_buffers()) {
new_param->set_parameter_replicated_at_leaf_buffers(
*hlo->parameter_replicated_at_leaf_buffers());
}
return new_param;
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleReduce(HloInstruction* hlo) {
int64_t input_count = 1;
if (hlo->shape().IsTuple()) {
input_count = hlo->shape().tuple_shapes_size();
CHECK_GT(input_count, 0);
}
if (hlo->sharding().HasUniqueDevice()) {
std::vector<HloInstruction*> new_operands(input_count * 2, nullptr);
for (auto i = 0; i != input_count; ++i) {
HloSharding subsharding =
hlo->sharding().IsTuple()
? hlo->sharding().GetSubSharding(hlo->shape(), {i})
: hlo->sharding();
CHECK(!subsharding.IsTuple() && subsharding.HasUniqueDevice());
new_operands[i] =
GetPartitionedHlo(hlo->operand(i)).Reshard(subsharding).hlo();
new_operands[input_count + i] =
GetPartitionedHlo(hlo->operand(input_count + i))
.Reshard(subsharding)
.hlo();
}
auto clone = b_.AddInstruction(
hlo->CloneWithNewOperands(hlo->shape(), new_operands));
clone->copy_sharding(hlo);
SetPartitionedHlo(
hlo, PartitionedHlo(clone, hlo->shape(), MakePartitioningState())
.Reshard(hlo->sharding()));
return absl::OkStatus();
}
std::vector<PartitionedHlo> inputs;
std::vector<HloInstruction*> inits;
std::vector<int64_t> preserved_dims;
for (int64_t i = 0; i < hlo->operand(0)->shape().rank(); ++i) {
if (!absl::c_linear_search(hlo->dimensions(), i)) {
preserved_dims.push_back(i);
}
}
for (int64_t operand_id = 0; operand_id < input_count; ++operand_id) {
inits.push_back(GetPartitionedHlo(hlo->operand(operand_id + input_count))
.Reshard(HloSharding::Replicate())
.hlo());
inputs.push_back(GetPartitionedHlo(hlo->operand(operand_id)));
if (operand_id > 0) {
inputs.back() = inputs.back().Reshard(inputs[0].sharding());
}
if (!inputs[0].sharding().IsTileMaximal()) {
inputs.back() =
inputs.back().PadWithValue(inits[operand_id], {},
preserved_dims);
}
}
std::vector<const Shape*> new_operand_shapes(input_count * 2);
for (int64_t i = 0; i < input_count; ++i) {
new_operand_shapes[i] = &inputs[i].hlo()->shape();
new_operand_shapes[i + input_count] = &inits[i]->shape();
}
TF_ASSIGN_OR_RETURN(
auto reduce_shape,
ShapeInference::InferReduceShape(new_operand_shapes, hlo->dimensions(),
hlo->to_apply()->ComputeProgramShape()));
std::vector<HloInstruction*> input_hlos(input_count);
for (int64_t i = 0; i < input_count; ++i) {
input_hlos[i] = inputs[i].hlo();
}
auto local_reduce = b_.AddInstruction(HloInstruction::CreateReduce(
reduce_shape, input_hlos, inits, hlo->dimensions(), hlo->to_apply()));
SetPartitionedHlo(hlo, [&]() {
HloInstruction* reduce = local_reduce;
const bool reduce_sharded_dimension =
!inputs[0].sharding().IsTileMaximal() &&
absl::c_any_of(hlo->dimensions(), [&](int64_t i) {
return inputs[0].sharding().tile_assignment().dim(i) > 1;
});
if (reduce_sharded_dimension) {
if (inputs[0].sharding().ReplicateOnLastTileDim()) {
preserved_dims.push_back(inputs[0].base_shape().rank());
}
if (local_reduce->shape().IsArray()) {
reduce = partitioner_->AllReduceAlongShardingDims(
&b_, local_reduce, inputs[0].sharding(), next_channel_id_,
hlo->dimensions(), collective_ops_creator_, hlo->to_apply());
} else {
auto grouped = hlo_sharding_util::GroupShardingOnDims(
inputs[0].sharding(), preserved_dims);
auto grouped_state = CreatePerGroupPartitioningState(
inputs[0].state(), grouped.device_groups, &b_);
std::vector<HloInstruction*> all_gathered_partial_results(input_count);
for (int64_t i = 0; i < input_count; ++i) {
auto gte = b_.AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetTupleElementShape(reduce_shape, i), local_reduce,
i));
auto expanded_shape = input_hlos[i]->shape();
auto all_gather_shape = input_hlos[i]->shape();
for (int64_t dim : hlo->dimensions()) {
expanded_shape.set_dimensions(dim, 1);
all_gather_shape.set_dimensions(
dim, inputs[0].sharding().tile_assignment().dim(dim));
}
auto reshape = b_.AddInstruction(
HloInstruction::CreateReshape(expanded_shape, gte));
reshape->set_sharding(grouped.sharding);
all_gathered_partial_results[i] =
PartitionedHlo(reshape, all_gather_shape, grouped_state)
.Replicate()
.hlo();
}
reduce = b_.AddInstruction(HloInstruction::CreateReduce(
reduce_shape, all_gathered_partial_results, inits,
hlo->dimensions(), hlo->to_apply()));
}
}
auto sharding = hlo_sharding_util::RemoveShapeDimensions(
hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(
inputs[0].sharding(), hlo->dimensions()),
hlo->dimensions());
if (local_reduce->shape().IsArray()) {
reduce->set_sharding(sharding);
} else {
reduce->set_sharding(HloSharding::Tuple(
reduce->shape(), std::vector<HloSharding>(input_count, sharding)));
}
return PartitionedHlo(reduce, hlo->shape(), MakePartitioningState())
.Reshard(hlo->sharding())
.hlo();
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleReverse(HloInstruction* hlo) {
auto reverse = Cast<HloReverseInstruction>(hlo);
if (reverse->sharding().IsTileMaximal()) {
return DefaultAction(hlo);
}
auto operand = GetPartitionedHlo(reverse->operand(0))
.Reshard(hlo_sharding_util::ReverseSharding(
reverse->sharding(), reverse->dimensions()));
auto left_padded_operand =
HaloExchangeToPadOnLeft(operand, reverse->dimensions());
if (!left_padded_operand) {
return DefaultAction(hlo);
}
SetPartitionedHlo(hlo, [&] {
return b_.AddInstruction(hlo->CloneWithNewOperands(
left_padded_operand->shape(), {left_padded_operand}));
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleWhile(HloInstruction* hlo) {
const HloSharding& sharding = hlo->sharding();
hlo->while_condition()->parameter_instruction(0)->set_sharding(sharding);
hlo->while_body()->parameter_instruction(0)->set_sharding(sharding);
HloInstruction* cond_root = hlo->while_condition()->root_instruction();
const HloSharding cond_root_sharding =
hlo_sharding_util::ReplicateAllDataDims(cond_root->sharding());
cond_root->set_sharding(cond_root_sharding);
TF_RETURN_IF_ERROR(
partitioner_
->PartitionComputation(hlo->while_condition(), cond_root_sharding,
next_channel_id_, logger_, call_graph_)
.status());
TF_RETURN_IF_ERROR(partitioner_
->PartitionComputation(hlo->while_body(), sharding,
next_channel_id_, logger_,
call_graph_)
.status());
HloInstruction* whileOp = b_.AddInstruction(HloInstruction::CreateWhile(
MakePartitionedShape(hlo->shape(), sharding), hlo->while_condition(),
hlo->while_body(),
GetPartitionedHlo(hlo->operand(0)).Reshard(sharding).hlo()));
hlo->SetupDerivedInstruction(whileOp);
SetPartitionedHlo(hlo, [&] { return whileOp; });
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleConditional(HloInstruction* hlo) {
std::vector<HloInstruction*> branch_args;
for (int64_t i = 0; i < hlo->branch_count(); ++i) {
HloComputation* computation = hlo->branch_computation(i);
computation->parameter_instruction(0)->set_sharding(
hlo->operand(i + 1)->sharding());
branch_args.push_back(GetPartitionedHlo(hlo->operand(i + 1)).hlo());
}
for (int64_t i = 0; i < hlo->branch_count(); ++i) {
HloComputation* computation = hlo->branch_computation(i);
TF_RETURN_IF_ERROR(partitioner_
->PartitionComputation(computation, hlo->sharding(),
next_channel_id_, logger_,
call_graph_)
.status());
}
SetPartitionedHlo(hlo, [&] {
HloInstruction* cond = GetPartitionedHlo(hlo->operand(0)).hlo();
if (!hlo->operand(0)->sharding().IsManual()) {
if (hlo->operand(0)->sharding().IsManualSubgroup()) {
auto grouped_sharding = hlo_sharding_util::GetManualSubgroupSharding(
hlo->operand(0)->sharding());
grouped_sharding.sharding = HloSharding::Replicate();
cond =
GetPartitionedHlo(hlo->operand(0))
.Reshard(hlo_sharding_util::UngroupSharding(grouped_sharding))
.hlo();
} else {
cond = GetPartitionedHlo(hlo->operand(0))
.Reshard(HloSharding::Replicate())
.hlo();
}
}
return b_.AddInstruction(HloInstruction::CreateConditional(
MakePartitionedShape(hlo->shape(), hlo->sharding()), cond,
hlo->called_computations(), branch_args));
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleOptimizationBarrier(
HloInstruction* hlo) {
return HandleElementwise(hlo);
}
absl::Status SpmdPartitioningVisitor::HandleOutfeed(HloInstruction* hlo) {
if (hlo->sharding().HasUniqueDevice()) {
return HandleSingleDevice(hlo);
}
if (hlo->sharding().IsManual()) {
auto clone_from_original = [&](const HloSharding& shared_sharding) {
std::vector<HloInstruction*> new_operands;
new_operands.reserve(hlo->operand_count());
for (int64_t i = 0; i < hlo->operand_count(); ++i) {
new_operands.push_back(
GetPartitionedHlo(hlo->operand(i)).Reshard(shared_sharding).hlo());
}
auto clone = b_.AddInstruction(
hlo->CloneWithNewOperands(hlo->shape(), new_operands));
clone->set_sharding(shared_sharding);
return clone;
};
SetPartitionedHlo(hlo,
[&] { return clone_from_original(hlo->sharding()); });
return absl::OkStatus();
}
HloSharding sharding = hlo->sharding();
const Shape& shape = hlo->operand(0)->shape();
const int64_t required_leaves = HloSharding::RequiredLeaves(shape);
if (sharding.IsTuple() &&
sharding.tuple_elements().size() == required_leaves + 1) {
if (shape.IsTuple()) {
sharding = HloSharding::Tuple(
shape,
absl::MakeSpan(sharding.tuple_elements().data(), required_leaves));
} else {
sharding = sharding.tuple_elements().front();
}
}
auto partitioned_operand =
GetPartitionedHlo(hlo->operand(0)).Reshard(sharding);
const auto& shard_shape = partitioned_operand.hlo()->shape();
const auto& operand = partitioned_operand.hlo();
auto token = GetPartitionedHlo(hlo->operand(1)).hlo();
if (EvenlyPartitions(shape, sharding)) {
Shape outfeed_shape = operand->shape();
TF_RETURN_IF_ERROR(LayoutUtil::CopyLayoutBetweenShapes(hlo->outfeed_shape(),
&outfeed_shape));
SetPartitionedHlo(hlo, [&]() {
return b_.AddInstruction(HloInstruction::CreateOutfeed(
outfeed_shape, operand, token, hlo->outfeed_config()));
});
return absl::OkStatus();
}
std::vector<Shape> per_branch_partitioned_shapes;
std::vector<int32_t> conditional_branch_indices(num_partitions_);
for (int64_t i = 0; i < num_partitions_; ++i) {
auto partitioned_shape =
MakeNonPaddedShapeForGivenPartition(shape, sharding, i);
int64_t matching_existing_index = 0;
for (; matching_existing_index < per_branch_partitioned_shapes.size();
++matching_existing_index) {
if (ShapeUtil::Compatible(
partitioned_shape,
per_branch_partitioned_shapes[matching_existing_index])) {
break;
}
}
if (matching_existing_index < per_branch_partitioned_shapes.size()) {
conditional_branch_indices[i] = matching_existing_index;
} else {
conditional_branch_indices[i] = per_branch_partitioned_shapes.size();
per_branch_partitioned_shapes.push_back(std::move(partitioned_shape));
}
}
HloInstruction* branch_index;
auto state = MakePartitioningState();
if (per_branch_partitioned_shapes.size() == num_partitions_) {
branch_index = state.partition_id;
if (branch_index->shape().element_type() != S32) {
branch_index = b_.AddInstruction(HloInstruction::CreateConvert(
ShapeUtil::ChangeElementType(branch_index->shape(), S32),
branch_index));
}
} else {
auto branch_index_table = b_.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<int32_t>(conditional_branch_indices)));
branch_index = b_.AddInstruction(HloInstruction::CreateDynamicSlice(
ShapeUtil::MakeShape(S32, {1}), branch_index_table, {partition_id_},
{1}));
branch_index = b_.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(S32, {}), branch_index));
}
std::vector<HloComputation*> branches(per_branch_partitioned_shapes.size());
for (int64_t i = 0; i < branches.size(); ++i) {
SpmdBuilder branch_b(absl::StrCat("outfeed_branch_", i), visiting_hlo_);
auto param = branch_b.AddInstruction(HloInstruction::CreateParameter(
0,
ShapeUtil::MakeTupleShape({operand->shape(), token->shape()}),
"outfeed_token_param"));
auto outfeed_data = branch_b.AddInstruction(
HloInstruction::CreateGetTupleElement(operand->shape(), param, 0));
auto outfeed_token = branch_b.AddInstruction(
HloInstruction::CreateGetTupleElement(token->shape(), param, 1));
if (!ShapeUtil::Compatible(per_branch_partitioned_shapes[i], shard_shape)) {
std::function<HloInstruction*(const ShapeIndex&, HloInstruction*)>
slice_outfeed =
[&](const ShapeIndex& index,
HloInstruction* outfeed_operand) -> HloInstruction* {
const Shape& element_shape =
ShapeUtil::GetSubshape(outfeed_data->shape(), index);
if (element_shape.IsTuple() && element_shape.tuple_shapes_size() > 0) {
std::vector<HloInstruction*> slice_elements(
element_shape.tuple_shapes_size());
for (int64_t i = 0; i < slice_elements.size(); ++i) {
auto sub_index = index;
sub_index.push_back(i);
slice_elements[i] = slice_outfeed(
sub_index,
branch_b.AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetSubshape(element_shape, {i}), outfeed_operand,
i)));
}
return branch_b.AddInstruction(
HloInstruction::CreateTuple(slice_elements));
}
const Shape& slice_shape = ShapeUtil::GetSubshape(
per_branch_partitioned_shapes[i], ShapeIndexView(index));
if (ShapeUtil::Compatible(element_shape, slice_shape)) {
return outfeed_operand;
}
if (element_shape.IsArray()) {
CHECK(slice_shape.IsArray());
std::vector<int64_t> start_indices(slice_shape.rank(), 0);
std::vector<int64_t> slice_strides(slice_shape.rank(), 1);
return branch_b.AddInstruction(HloInstruction::CreateSlice(
slice_shape, outfeed_operand, start_indices,
slice_shape.dimensions(), slice_strides));
}
CHECK(element_shape.IsTuple());
CHECK(element_shape.tuple_shapes().empty());
return outfeed_operand;
};
outfeed_data = slice_outfeed({}, outfeed_data);
}
TF_RETURN_IF_ERROR(LayoutUtil::CopyLayoutBetweenShapes(
hlo->outfeed_shape(), &per_branch_partitioned_shapes[i]));
branch_b.AddInstruction(HloInstruction::CreateOutfeed(
per_branch_partitioned_shapes[i], outfeed_data, outfeed_token,
hlo->outfeed_config()));
branches[i] = module_->AddEmbeddedComputation(branch_b.Build());
}
SetPartitionedHlo(hlo, [&]() {
return b_.AddInstruction(HloInstruction::CreateConditional(
token->shape(), branch_index, branches,
std::vector<HloInstruction*>(
branches.size(),
b_.AddInstruction(HloInstruction::CreateTuple({operand, token})))));
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleRng(HloInstruction* hlo) {
if (hlo->sharding().HasUniqueDevice()) {
return HandleSingleDevice(hlo);
}
auto clone_from_original = [&](const HloSharding& shared_sharding) {
std::vector<HloInstruction*> new_operands;
new_operands.reserve(hlo->operand_count());
for (int64_t i = 0; i < hlo->operand_count(); ++i) {
new_operands.push_back(
GetPartitionedHlo(hlo->operand(i)).Reshard(shared_sharding).hlo());
}
auto clone = b_.AddInstruction(
hlo->CloneWithNewOperands(hlo->shape(), new_operands));
clone->set_sharding(shared_sharding);
return clone;
};
if (hlo->sharding().IsManual()) {
SetPartitionedHlo(hlo,
[&] { return clone_from_original(hlo->sharding()); });
return absl::OkStatus();
}
if (hlo->sharding().IsReplicated()) {
SetPartitionedHlo(hlo, [&] {
auto clone = clone_from_original(HloSharding::AssignDevice(0));
return PartitionedHlo(clone, hlo->shape(), MakePartitioningState())
.Reshard(HloSharding::Replicate())
.hlo();
});
return absl::OkStatus();
}
TF_RET_CHECK(!hlo->sharding().IsTileMaximal());
std::vector<HloInstruction*> new_operands;
new_operands.reserve(hlo->operand_count());
for (int64_t i = 0; i < hlo->operand_count(); ++i) {
new_operands.push_back(GetPartitionedHlo(hlo->operand(i))
.Reshard(HloSharding::Replicate())
.hlo());
}
if (!hlo->sharding().ReplicateOnLastTileDim()) {
SetPartitionedHlo(hlo, [&] {
return b_.AddInstruction(HloInstruction::CreateRng(
MakePartitionedShape(hlo->shape(), hlo->sharding()),
hlo->random_distribution(), new_operands));
});
} else {
std::vector<int64_t> group_dims(
hlo->sharding().tile_assignment().num_dimensions() - 1);
std::iota(group_dims.begin(), group_dims.end(), 0);
auto sharding_grouped =
hlo_sharding_util::GroupShardingOnDims(hlo->sharding(), group_dims);
auto per_group_state = CreatePerGroupPartitioningState(
MakePartitioningState(), sharding_grouped.device_groups, &b_);
auto rng = b_.AddInstruction(HloInstruction::CreateRng(
MakePartitionedShape(hlo->shape(), hlo->sharding()),
hlo->random_distribution(), new_operands));
rng->set_sharding(HloSharding::AssignDevice(0));
SetPartitionedHlo(hlo, [&]() {
return PartitionedHlo(rng, rng->shape(), per_group_state)
.Replicate()
.hlo();
});
}
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleReduceWindow(HloInstruction* hlo) {
if (hlo->sharding().IsTileMaximal()) {
return DefaultAction(hlo);
}
auto* reduce_window = Cast<HloReduceWindowInstruction>(hlo);
absl::Span<HloInstruction* const> input_arrays = reduce_window->inputs();
absl::Span<HloInstruction* const> init_values = reduce_window->init_values();
absl::InlinedVector<PartitionedHlo::WindowedInputShardReturnValue, 2>
sharded_results;
absl::InlinedVector<const Shape*, 2> sharded_input_shapes,
replicated_init_shapes;
absl::InlinedVector<HloInstruction*, 2> sharded_inputs, replicated_inits;
int64_t input_idx = 0;
for (const HloInstruction* input_array : input_arrays) {
PartitionedHlo& operand = GetPartitionedHlo(input_array);
PartitionedHlo replicated_init = GetPartitionedHlo(init_values[input_idx])
.Reshard(HloSharding::Replicate());
const HloSharding& sharding =
hlo->sharding().IsTuple() ? hlo->sharding().tuple_elements()[input_idx]
: hlo->sharding();
auto resharded_operand_and_window = operand.ReshardAsWindowedInput(
hlo->window(), sharding, replicated_init.hlo());
if (!resharded_operand_and_window.has_value()) {
return DefaultAction(hlo);
}
sharded_results.push_back(resharded_operand_and_window.value());
sharded_inputs.push_back(resharded_operand_and_window->sharded_input);
sharded_input_shapes.push_back(&sharded_inputs.back()->shape());
replicated_inits.push_back(replicated_init.hlo());
replicated_init_shapes.push_back(&replicated_inits.back()->shape());
input_idx++;
}
TF_ASSIGN_OR_RETURN(Shape sharded_rw_shape,
ShapeInference::InferReduceWindowShape(
sharded_input_shapes, replicated_init_shapes,
sharded_results[0].shard_window,
hlo->to_apply()->ComputeProgramShape()));
Shape shard_shape = MakePartitionedShape(hlo->shape(), hlo->sharding());
if (shard_shape.has_layout()) {
*sharded_rw_shape.mutable_layout() = shard_shape.layout();
}
SetPartitionedHlo(hlo, [&]() {
HloInstruction* sharded_rw =
b_.AddInstruction(HloInstruction::CreateReduceWindow(
sharded_rw_shape, sharded_inputs, replicated_inits,
sharded_results[0].shard_window, hlo->to_apply()));
if (!sharded_results[0].dynamic_slice_index_on_output.has_value()) {
CHECK(ShapeUtil::Compatible(shard_shape, sharded_rw->shape()))
<< shard_shape << " vs " << sharded_rw->shape() << "\n";
return sharded_rw;
}
return b_.AddInstruction(HloInstruction::CreateDynamicSlice(
shard_shape, sharded_rw,
*sharded_results[0].dynamic_slice_index_on_output,
shard_shape.dimensions()));
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleSelectAndScatter(
HloInstruction* hlo) {
if (hlo->sharding().IsTileMaximal()) {
return DefaultAction(hlo);
}
auto operand = GetPartitionedHlo(hlo->operand(0));
auto source = GetPartitionedHlo(hlo->mutable_operand(1));
if (hlo->sharding() != operand.sharding()) {
operand = operand.Reshard(hlo->sharding());
}
if (hlo->sharding() != source.sharding()) {
source = source.Reshard(hlo->sharding());
}
if (hlo->shape().element_type() != F32 &&
hlo->shape().element_type() != BF16) {
return DefaultAction(hlo);
}
auto select = hlo->called_computations()[0];
auto select_root = select->root_instruction();
if (select_root->opcode() != HloOpcode::kCompare ||
select_root->operand(0)->opcode() != HloOpcode::kParameter ||
select_root->operand(1)->opcode() != HloOpcode::kParameter ||
select_root->operand(0)->parameter_number() +
select_root->operand(1)->parameter_number() !=
1) {
return DefaultAction(hlo);
}
float float_pad_value;
if (select_root->comparison_direction() == ComparisonDirection::kGe ||
select_root->comparison_direction() == ComparisonDirection::kGt) {
if (select_root->operand(0)->parameter_number() == 0) {
float_pad_value = -std::numeric_limits<float>::infinity();
} else {
float_pad_value = std::numeric_limits<float>::infinity();
}
} else if (select_root->comparison_direction() == ComparisonDirection::kLe ||
select_root->comparison_direction() == ComparisonDirection::kLt) {
if (select_root->operand(0)->parameter_number() == 0) {
float_pad_value = std::numeric_limits<float>::infinity();
} else {
float_pad_value = -std::numeric_limits<float>::infinity();
}
} else {
return DefaultAction(hlo);
}
auto pad_value = b_.AddInstruction(HloInstruction::CreateConstant(
hlo->shape().element_type() == BF16
? LiteralUtil::CreateR0<bfloat16>(
static_cast<bfloat16>(float_pad_value))
: LiteralUtil::CreateR0<float>(float_pad_value)));
auto replicated_init = GetPartitionedHlo(hlo->mutable_operand(2))
.Reshard(HloSharding::Replicate());
auto state = MakePartitioningState();
auto partition_ordinals =
MakeTiledPartitionOrdinals(hlo->sharding(), state.partition_id, &b_);
std::vector<MultiplyAddDivideOffsetCalculation> first_window(
hlo->shape().rank());
std::vector<MultiplyAddDivideOffsetCalculation> limit_window(
hlo->shape().rank());
std::vector<OffsetCalculation> data_left_halo_sizes(hlo->shape().rank());
std::vector<OffsetCalculation> data_right_halo_sizes(hlo->shape().rank());
std::vector<OffsetCalculation> source_left_halo_sizes(hlo->shape().rank());
std::vector<OffsetCalculation> source_right_halo_sizes(hlo->shape().rank());
auto unpadded_data_shard_shape =
MakePartitionedShape(hlo->shape(), hlo->sharding());
auto unpadded_source_shard_shape =
MakePartitionedShape(hlo->operand(1)->shape(), hlo->sharding());
auto source_shard_hlo = source.hlo();
auto data_shard_hlo = operand.hlo();
for (int64_t i = 0; i < hlo->shape().rank(); ++i) {
int64_t shard_count = hlo->sharding().tile_assignment().dim(i);
if (shard_count == 1) {
continue;
}
auto wd = hlo->window().dimensions(i);
if (wd.stride() > wd.size()) {
wd.set_size(wd.stride());
}
first_window[i] = MultiplyAddDivideOffsetCalculation(
unpadded_data_shard_shape.dimensions(i),
wd.padding_low() - wd.size() + wd.stride(), wd.stride());
limit_window[i] = MultiplyAddDivideOffsetCalculation(
unpadded_data_shard_shape.dimensions(i),
unpadded_data_shard_shape.dimensions(i) + wd.padding_low() +
wd.stride() - 1,
wd.stride());
source_left_halo_sizes[i] =
MultiplyAddDivideOffsetCalculation(
unpadded_source_shard_shape.dimensions(i), 0, 1) -
first_window[i];
source_right_halo_sizes[i] =
limit_window[i] - MultiplyAddDivideOffsetCalculation(
unpadded_source_shard_shape.dimensions(i),
unpadded_source_shard_shape.dimensions(i), 1);
data_left_halo_sizes[i] =
OffsetCalculation(MultiplyAddDivideOffsetCalculation(
unpadded_data_shard_shape.dimensions(i), wd.padding_low(), 1)) -
OffsetCalculation(
HloOpcode::kMultiply, first_window[i],
MultiplyAddDivideOffsetCalculation(0, wd.stride(), 1));
data_right_halo_sizes[i] =
OffsetCalculation(
HloOpcode::kMultiply, limit_window[i],
MultiplyAddDivideOffsetCalculation(0, wd.stride(), 1)) -
OffsetCalculation(MultiplyAddDivideOffsetCalculation(
unpadded_data_shard_shape.dimensions(i),
unpadded_data_shard_shape.dimensions(i) + wd.stride() +
wd.padding_low() - wd.size(),
1));
int64_t max_windows =
(limit_window[i] - first_window[i]).MaxInRange(0, shard_count);
auto first_window_hlo =
first_window[i].Calculate(partition_ordinals[i], &b_);
auto resharded_source = ExchangeHaloAndGetValidData(
source_shard_hlo, source.base_shape(), source_left_halo_sizes[i],
source_right_halo_sizes[i], 0,
limit_window[i].Calculate(shard_count - 1), max_windows, i,
hlo->sharding(), first_window_hlo, replicated_init.hlo(),
partition_ordinals[i], collective_ops_creator_, next_channel_id_, &b_);
if (!resharded_source) {
return DefaultAction(hlo);
}
source_shard_hlo = *resharded_source;
auto offset_start_in_data =
MultiplyAddDivideOffsetCalculation(wd.stride(), 0, 1)
.Calculate(first_window_hlo, &b_);
int64_t padded_data_size =
(limit_window[i].Calculate(shard_count - 1) - 1) * wd.stride() +
wd.size();
int64_t data_shard_size = (max_windows - 1) * wd.stride() + wd.size();
auto resharded_data = ExchangeHaloAndGetValidData(
data_shard_hlo, operand.base_shape(), data_left_halo_sizes[i],
data_right_halo_sizes[i], wd.padding_low(), padded_data_size,
data_shard_size, i, hlo->sharding(), offset_start_in_data, pad_value,
partition_ordinals[i], collective_ops_creator_, next_channel_id_, &b_);
if (!resharded_data) {
return DefaultAction(hlo);
}
data_shard_hlo = *resharded_data;
}
Window window_on_shard = hlo->window();
for (int64_t i = 0; i < window_on_shard.dimensions_size(); ++i) {
int64_t shard_count = hlo->sharding().tile_assignment().dim(i);
if (shard_count == 1) {
continue;
}
auto reshard_wd = window_on_shard.mutable_dimensions(i);
reshard_wd->set_padding_low(0);
reshard_wd->set_padding_high(0);
}
auto sharded_select_and_scatter =
b_.AddInstruction(HloInstruction::CreateSelectAndScatter(
data_shard_hlo->shape(), data_shard_hlo, select, window_on_shard,
source_shard_hlo, replicated_init.hlo(),
hlo->called_computations()[1]));
SetPartitionedHlo(hlo, [&]() {
auto shard_shape = MakePartitionedShape(hlo->shape(), hlo->sharding());
if (ShapeUtil::Compatible(sharded_select_and_scatter->shape(),
shard_shape)) {
return sharded_select_and_scatter;
}
auto zero = b_.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(S32)));
std::vector<HloInstruction*> slice_offsets(shard_shape.rank(), zero);
for (int64_t i = 0; i < window_on_shard.dimensions_size(); ++i) {
if (hlo->sharding().tile_assignment().dim(i) == 1) {
continue;
}
int64_t pad_low = hlo->window().dimensions(i).padding_low();
auto left_halo_size =
data_left_halo_sizes[i].Calculate(partition_ordinals[i], &b_);
if (data_left_halo_sizes[i].Calculate(0) == pad_low) {
slice_offsets[i] = left_halo_size;
} else {
auto is_shard0 = b_.AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}), zero, partition_ordinals[i],
ComparisonDirection::kEq));
auto pad_low_hlo = b_.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(pad_low)));
slice_offsets[i] = b_.AddInstruction(HloInstruction::CreateTernary(
zero->shape(), HloOpcode::kSelect, is_shard0, pad_low_hlo,
left_halo_size));
}
}
return b_.AddInstruction(HloInstruction::CreateDynamicSlice(
shard_shape, sharded_select_and_scatter, slice_offsets,
shard_shape.dimensions()));
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleTuple(HloInstruction* hlo) {
std::vector<HloInstruction*> new_operands;
new_operands.reserve(hlo->operand_count());
for (int64_t i = 0; i < hlo->operand_count(); ++i) {
new_operands.push_back(
GetPartitionedHlo(hlo->operand(i))
.Reshard(hlo->sharding().GetSubSharding(hlo->shape(), {i}))
.hlo());
}
SetPartitionedHlo(hlo, [&]() {
return b_.AddInstruction(HloInstruction::CreateTuple(new_operands));
});
return absl::OkStatus();
}
absl::StatusOr<bool> SpmdPartitioningVisitor::DoPartition(
HloComputation* computation, const HloSharding& root_sharding,
const SpmdPartitionerOptions& options) {
VLOG(2) << "Partitioning computation " << computation->name() << " for "
<< num_replicas_ << " replicas and " << num_partitions_
<< " partitions";
TF_RETURN_IF_ERROR(computation->Accept(this));
HloModule* module = computation->parent();
auto new_root =
GetPartitionedHlo(computation->root_instruction()).Reshard(root_sharding);
auto new_computation =
module->AddEmbeddedComputation(b_.Build(new_root.hlo()));
TF_RETURN_IF_ERROR(
DoCodeMotionForWindowedDotGeneralLoops(new_computation, options));
absl::flat_hash_map<HloComputation*, HloComputation*> replacement;
replacement[computation] = new_computation;
module->ReplaceComputations(replacement);
return changed_;
}
absl::Status SpmdPartitioningVisitor::HandlePartitionId(HloInstruction* hlo) {
if (hlo->has_sharding() && hlo->sharding().IsManual()) {
hlo->set_sharding(HloSharding::AssignDevice(0));
return DefaultAction(hlo);
}
return Unimplemented(
"PartitionId instruction is not supported for SPMD partitioning since "
"the meaning is ambiguous -- whether the instruction is replicated or "
"the data is replicated, and if the latter which data is replicated.");
}
SPMDCollectiveOpsCreator GetDefaultCollectiveOpsCreator(int64_t num_partitions,
int64_t num_replicas) {
return {
[](SpmdBuilder* b) {
return b->AddInstruction(HloInstruction::CreatePartitionId());
},
[num_replicas, num_partitions](
SpmdBuilder* b, HloInstruction* operand, HloComputation* reduction,
const std::vector<std::vector<int64_t>>& partition_subgroups,
int64_t channel_id) {
std::vector<ReplicaGroup> device_groups;
if (partition_subgroups.size() <= 1) {
device_groups.reserve(num_replicas);
for (int64_t rid = 0; rid < num_replicas; ++rid) {
device_groups.emplace_back();
for (int64_t pid = 0; pid < num_partitions; ++pid) {
device_groups.back().add_replica_ids(rid * num_partitions + pid);
}
}
} else {
device_groups.reserve(partition_subgroups.size() * num_replicas);
for (int64_t rid = 0; rid < num_replicas; ++rid) {
for (const auto& pgroup : partition_subgroups) {
device_groups.emplace_back();
for (int64_t pid : pgroup) {
device_groups.back().add_replica_ids(rid * num_partitions +
pid);
}
}
}
}
HloComputation* reduction_clone =
reduction->parent()->AddComputationAndUnifyNamesAndIds(
reduction->Clone(), false);
HloInstruction* all_reduce =
b->AddInstruction(HloInstruction::CreateAllReduce(
operand->shape(), {operand}, reduction_clone,
CollectiveDeviceList(device_groups),
false, channel_id,
true));
reduction_clone->SetCollectiveCallInstruction(all_reduce);
return all_reduce;
},
[num_replicas, num_partitions](
SpmdBuilder* b, HloInstruction* operand, HloComputation* reduction,
const IotaReplicaGroupList& partition_group_list,
int64_t channel_id) {
HloComputation* reduction_clone =
reduction->parent()->AddComputationAndUnifyNamesAndIds(
reduction->Clone(), false);
HloInstruction* all_reduce =
b->AddInstruction(HloInstruction::CreateAllReduce(
operand->shape(), {operand}, reduction_clone,
ExpandPartitionGroupListAcrossReplicas(
partition_group_list, num_replicas, num_partitions),
false, channel_id,
true));
reduction_clone->SetCollectiveCallInstruction(all_reduce);
return all_reduce;
},
[num_partitions](SpmdBuilder* b, HloInstruction* operand,
std::vector<std::pair<int64_t, int64_t>>& src_dst_pairs,
int64_t channel_id) {
if (src_dst_pairs.empty()) {
return CreateZero(operand->shape(), b);
} else {
bool is_copy =
src_dst_pairs.size() == num_partitions &&
absl::c_all_of(src_dst_pairs,
[](const std::pair<int64_t, int64_t>& pair) {
return pair.first == pair.second;
});
if (is_copy) {
return operand;
} else {
return b->AddInstruction(HloInstruction::CreateCollectivePermute(
operand->shape(), operand, src_dst_pairs, channel_id));
}
}
},
[](SpmdBuilder* b, absl::Span<HloInstruction* const> operands,
const std::vector<std::vector<int64_t>>& partition_subgroups,
int64_t channel_id, std::optional<int64_t> split_dimension) {
std::vector<Shape> shapes(operands.size(), operands[0]->shape());
const Shape output_shape = (shapes.size() == 1)
? shapes[0]
: ShapeUtil::MakeTupleShape(shapes);
std::vector<ReplicaGroup> groups(partition_subgroups.size());
for (int64_t i = 0; i < groups.size(); ++i) {
for (int64_t id : partition_subgroups[i]) {
groups[i].add_replica_ids(id);
}
}
return b->AddInstruction(HloInstruction::CreateAllToAll(
output_shape, operands, CollectiveDeviceList(groups),
false, channel_id, split_dimension));
},
[num_replicas, num_partitions](
SpmdBuilder* b, HloInstruction* operand, const Shape& ag_shape,
const std::vector<std::vector<int64_t>>& partition_subgroups,
int64_t channel_id, int64_t all_gather_dimension) {
std::vector<ReplicaGroup> device_groups;
device_groups.reserve(partition_subgroups.size() * num_replicas);
for (int64_t i = 0; i < num_replicas; ++i) {
for (const auto& pgroup : partition_subgroups) {
device_groups.emplace_back();
for (int64_t pid : pgroup) {
device_groups.back().add_replica_ids(i * num_partitions + pid);
}
}
}
return b->AddInstruction(HloInstruction::CreateAllGather(
ag_shape, {operand}, all_gather_dimension,
CollectiveDeviceList(device_groups),
false, channel_id,
true));
},
[num_replicas, num_partitions](
SpmdBuilder* b, HloInstruction* operand, const Shape& ag_shape,
const IotaReplicaGroupList& partition_group_list, int64_t channel_id,
int64_t all_gather_dimension) {
return b->AddInstruction(HloInstruction::CreateAllGather(
ag_shape, {operand}, all_gather_dimension,
ExpandPartitionGroupListAcrossReplicas(
partition_group_list, num_replicas, num_partitions),
false, channel_id,
true));
}};
}
SpmdPartitioner::SpmdPartitioner(int64_t num_partitions, int64_t num_replicas,
SpmdPartitionerOptions options)
: SpmdPartitioner(
num_partitions, num_replicas, std::move(options),
GetDefaultCollectiveOpsCreator(num_partitions, num_replicas)) {}
HloInstruction* SpmdPartitioner::AllGatherShards(
SpmdBuilder* b, HloInstruction* operand, const HloSharding& sharding,
int64_t* next_channel_id, absl::Span<const int64_t> selected_dims,
const SPMDCollectiveOpsCreator& collectives_creator) {
return AllGatherShardsInternal(b, operand, sharding, next_channel_id,
selected_dims, collectives_creator,
true)
.first;
}
std::pair<HloInstruction*, HloInstruction*>
SpmdPartitioner::AllGatherShardsInternal(
SpmdBuilder* b, HloInstruction* operand, const HloSharding& sharding,
int64_t* next_channel_id, absl::Span<const int64_t> selected_dims,
const SPMDCollectiveOpsCreator& collectives_creator, bool per_dim_ag) {
if (selected_dims.empty()) {
return std::make_pair(operand, nullptr);
}
CHECK(!sharding.IsTileMaximal());
if (per_dim_ag || selected_dims.size() == 1) {
HloInstruction* result = operand;
Shape result_shape = operand->shape();
for (auto it = selected_dims.rbegin(); it != selected_dims.rend(); ++it) {
if (sharding.tile_assignment().dim(*it) == 1) {
continue;
}
auto partition_group_list = GetIotaPartitionGroupsForReplication(
sharding, {*it}, num_partitions_);
if (partition_group_list.has_value() &&
collectives_creator
.create_cross_partition_all_gather_with_iota_device_list) {
result_shape.set_dimensions(
*it, result_shape.dimensions(*it) *
partition_group_list.value().num_devices_per_group());
result = collectives_creator
.create_cross_partition_all_gather_with_iota_device_list(
b, result, result_shape, partition_group_list.value(),
(*next_channel_id)++,
*it);
} else {
auto partition_subgroups =
GetPartitionGroupsForReplication(sharding, {*it});
result_shape.set_dimensions(
*it, result_shape.dimensions(*it) * partition_subgroups[0].size());
result = collectives_creator.create_cross_partition_all_gather(
b, result, result_shape, partition_subgroups, (*next_channel_id)++,
*it);
}
}
return std::make_pair(result, result);
}
std::vector<int64_t> shape;
shape.push_back(1);
for (int64_t dim : operand->shape().dimensions()) {
shape.push_back(dim);
}
auto reshape = b->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(operand->shape().element_type(), shape), operand));
HloInstruction* ag = nullptr;
HloInstruction* result = reshape;
auto partition_group_list = GetIotaPartitionGroupsForReplication(
sharding, selected_dims, num_partitions_);
if (partition_group_list.has_value() &&
collectives_creator
.create_cross_partition_all_gather_with_iota_device_list) {
shape[0] *= partition_group_list.value().num_devices_per_group();
result =
collectives_creator
.create_cross_partition_all_gather_with_iota_device_list(
b, result,
ShapeUtil::MakeShape(operand->shape().element_type(), shape),
partition_group_list.value(), (*next_channel_id)++,
0);
} else {
auto partition_subgroups =
GetPartitionGroupsForReplication(sharding, selected_dims);
shape[0] *= partition_subgroups[0].size();
result = collectives_creator.create_cross_partition_all_gather(
b, result, ShapeUtil::MakeShape(operand->shape().element_type(), shape),
partition_subgroups, (*next_channel_id)++,
0);
}
ag = result;
std::vector<int64_t> tiled_dims;
for (int64_t i = 0; i < sharding.tile_assignment().num_dimensions(); ++i) {
if (sharding.tile_assignment().dim(i) > 1 &&
absl::c_linear_search(selected_dims, i)) {
tiled_dims.push_back(i);
}
}
if (tiled_dims.size() > 1) {
std::vector<int64_t> split_dim_shape;
split_dim_shape.reserve(tiled_dims.size() + operand->shape().rank());
for (int64_t i : tiled_dims) {
split_dim_shape.push_back(sharding.tile_assignment().dim(i));
}
for (int64_t dim : operand->shape().dimensions()) {
split_dim_shape.push_back(dim);
}
result = b->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(operand->shape().element_type(), split_dim_shape),
result));
}
std::vector<int64_t> xpose_permutation(result->shape().rank());
int64_t split_dims_added = 0;
for (int64_t i = 0; i < xpose_permutation.size(); ++i) {
if (sharding.tile_assignment().dim(i - split_dims_added) == 1 ||
!absl::c_linear_search(selected_dims, i - split_dims_added)) {
xpose_permutation[i] = i + tiled_dims.size() - split_dims_added;
} else {
xpose_permutation[i] = split_dims_added;
xpose_permutation[i + 1] = i + tiled_dims.size() - split_dims_added;
split_dims_added++;
i++;
}
}
result = b->AddInstruction(HloInstruction::CreateTranspose(
ShapeInference::InferTransposeShape(result->shape(), xpose_permutation)
.value(),
result, xpose_permutation));
auto ag_shape = operand->shape();
for (int64_t i : tiled_dims) {
ag_shape.set_dimensions(
i, ag_shape.dimensions(i) * sharding.tile_assignment().dim(i));
}
result = b->AddInstruction(HloInstruction::CreateReshape(ag_shape, result));
return std::make_pair(result, ag);
}
HloInstruction* SpmdPartitioner::AllReduceAlongShardingDims(
SpmdBuilder* b, HloInstruction* operand, const HloSharding& sharding,
int64_t* next_channel_id, absl::Span<const int64_t> selected_dims,
const SPMDCollectiveOpsCreator& collectives_creator,
HloComputation* reduction) {
return AllReduceAlongShardingDimsInternal(
b, operand, sharding, next_channel_id, selected_dims, collectives_creator,
reduction, true);
}
HloInstruction* SpmdPartitioner::AllReduceAlongShardingDimsInternal(
SpmdBuilder* b, HloInstruction* operand, const HloSharding& sharding,
int64_t* next_channel_id, absl::Span<const int64_t> selected_dims,
const SPMDCollectiveOpsCreator& collectives_creator,
HloComputation* reduction, bool per_dim_ar) {
if (!per_dim_ar) {
auto partition_group_list = GetIotaPartitionGroupsForReplication(
sharding, selected_dims, num_partitions_);
if (partition_group_list.has_value() &&
collectives_creator
.create_cross_partition_all_reduce_with_iota_device_list) {
return collectives_creator
.create_cross_partition_all_reduce_with_iota_device_list(
b, operand, reduction, partition_group_list.value(),
(*next_channel_id)++);
} else {
auto partition_subgroups =
GetPartitionGroupsForReplication(sharding, selected_dims);
return collectives_creator.create_cross_partition_all_reduce(
b, operand, reduction, partition_subgroups, (*next_channel_id)++);
}
}
auto result = operand;
for (auto it = selected_dims.rbegin(); it != selected_dims.rend(); ++it) {
if (sharding.tile_assignment().dim(*it) == 1) {
continue;
}
auto partition_group_list =
GetIotaPartitionGroupsForReplication(sharding, {*it}, num_partitions_);
if (partition_group_list.has_value() &&
collectives_creator
.create_cross_partition_all_reduce_with_iota_device_list) {
result = collectives_creator
.create_cross_partition_all_reduce_with_iota_device_list(
b, result, reduction, partition_group_list.value(),
(*next_channel_id)++);
} else {
auto partition_subgroups =
GetPartitionGroupsForReplication(sharding, {*it});
result = collectives_creator.create_cross_partition_all_reduce(
b, result, reduction, partition_subgroups, (*next_channel_id)++);
}
}
return result;
}
absl::StatusOr<bool> SpmdPartitioner::PartitionComputation(
HloComputation* computation, const HloSharding& root_sharding,
int64_t* next_channel_id, SpmdLogger* logger, const CallGraph& call_graph) {
auto visitor = CreateVisitor(computation, num_partitions_, num_replicas_,
collective_ops_creator_, next_channel_id, logger,
options_, call_graph);
return visitor->DoPartition(computation, root_sharding, options_);
}
std::unique_ptr<SpmdPartitioningVisitor> SpmdPartitioner::CreateVisitor(
HloComputation* computation, int64_t num_partitions, int64_t num_replicas,
const SPMDCollectiveOpsCreator& collective_ops_creator,
int64_t* next_channel_id, SpmdLogger* logger,
SpmdPartitionerOptions options, const CallGraph& call_graph) {
return std::make_unique<SpmdPartitioningVisitor>(
computation, num_partitions, num_replicas, collective_ops_creator,
next_channel_id, logger, std::move(options), this, call_graph);
}
int64_t SpmdPartitioner::MemoryCostInBytes(HloInstruction* hlo) {
auto memory_cost_for_operands = [](HloInstruction* hlo) {
int64_t memory = 0;
for (const HloInstruction* operand : hlo->operands()) {
memory += ShapeSizeInBytes(operand->shape());
}
return memory;
};
switch (hlo->opcode()) {
case HloOpcode::kAllReduce:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kScatter:
case HloOpcode::kWhile:
case HloOpcode::kTuple:
return memory_cost_for_operands(hlo);
default:
return memory_cost_for_operands(hlo) + ShapeSizeInBytes(hlo->shape());
}
}
int64_t SpmdPartitioner::CommunicationCostInBytes(HloInstruction* hlo) {
CHECK(IsCollective(hlo));
switch (hlo->opcode()) {
case HloOpcode::kAllReduce:
return ShapeSizeInBytes(hlo->shape()) * 2;
case HloOpcode::kCollectivePermute:
return ShapeSizeInBytes(hlo->shape());
case HloOpcode::kAllGather: {
HloAllGatherInstruction* ag = Cast<HloAllGatherInstruction>(hlo);
int64_t group_size =
ag->shape().dimensions(ag->all_gather_dimension()) /
ag->operand(0)->shape().dimensions(ag->all_gather_dimension());
return ShapeSizeInBytes(hlo->shape()) * (group_size - 1) / group_size;
}
case HloOpcode::kAllToAll: {
int64_t group_size;
if (!hlo->replica_groups().empty()) {
group_size = hlo->replica_groups()[0].replica_ids_size();
} else {
group_size = hlo->channel_id() ? num_partitions_ : num_replicas_;
}
return ShapeSizeInBytes(hlo->shape()) * (group_size - 1) / group_size;
}
default:
return 0;
}
}
absl::StatusOr<bool> SpmdPartitioner::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
set_execution_threads(execution_threads);
TF_RETURN_IF_ERROR(PreprocessSharding(module, execution_threads));
TF_RETURN_IF_ERROR(PreprocessHlos(module, execution_threads));
XLA_VLOG_LINES(1, SpmdLogger::ReportBeforePartition(
*module, options_.report_instruction_count));
std::vector<HloSharding> entry_params_shardings;
const auto num_parameters = module->entry_computation()->num_parameters();
entry_params_shardings.reserve(num_parameters);
for (int64_t i = 0; i < num_parameters; ++i) {
auto param = module->entry_computation()->parameter_instruction(i);
CHECK(param->has_sharding()) << "Missing sharding in entry parameter " << i;
entry_params_shardings.push_back(param->sharding());
}
module->set_spmd_parameters_shardings(entry_params_shardings);
auto entry_root = module->entry_computation()->root_instruction();
CHECK(entry_root->has_sharding()) << "Missing sharding in entry root.";
module->set_spmd_output_sharding(entry_root->sharding());
FlattenCallGraph flatten;
TF_ASSIGN_OR_RETURN(auto changed, flatten.Run(module));
SpmdLogger logger(options_.report_instruction_count,
!VLOG_IS_ON(1));
auto program_shape = module->entry_computation()->ComputeProgramShape();
int64_t next_channel_id = hlo_query::NextChannelId(*module);
HloSharding root_sharding = entry_root->sharding();
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
CHECK(call_graph->IsFlattened());
TF_ASSIGN_OR_RETURN(
bool partition_changed,
PartitionComputation(module->entry_computation(), root_sharding,
&next_channel_id, &logger, *call_graph));
changed |= partition_changed;
auto new_program_shape = module->entry_computation()->ComputeProgramShape();
if (!options_.allow_module_signature_change) {
TF_RET_CHECK(Shape::Equal().MinorToMajorOnlyInLayout()(
program_shape.result(), new_program_shape.result()))
<< "Result shape changed for the entry computation";
TF_RET_CHECK(program_shape.parameters_size() ==
new_program_shape.parameters_size())
<< "Parameter count changed for the entry computation";
for (int64_t i = 0; i < program_shape.parameters_size(); ++i) {
TF_RET_CHECK(Shape::Equal().MinorToMajorOnlyInLayout()(
program_shape.parameters(i), new_program_shape.parameters(i)))
<< "Parameter shape changed for the entry computation";
}
} else {
auto update_shape = [this](Shape* subshape, const xla::ShapeIndex& index) {
if (subshape->IsArray() && subshape->has_layout()) {
UpdateLayout(subshape);
}
};
const auto& old_entry_layout = module->entry_computation_layout();
for (int64_t i = 0; i < new_program_shape.parameters_size(); ++i) {
TF_RETURN_IF_ERROR(LayoutUtil::CopyLayoutBetweenShapes(
old_entry_layout.parameter_shape(i),
new_program_shape.mutable_parameters(i)));
ShapeUtil::ForEachMutableSubshape(new_program_shape.mutable_parameters(i),
update_shape);
}
TF_RETURN_IF_ERROR(LayoutUtil::CopyLayoutBetweenShapes(
old_entry_layout.result_shape(), new_program_shape.mutable_result()));
ShapeUtil::ForEachMutableSubshape(new_program_shape.mutable_result(),
update_shape);
HloModuleConfig config = module->config();
*config.mutable_entry_computation_layout() =
ComputationLayout(new_program_shape, false);
module->set_config(config);
}
XLA_VLOG_LINES(1, SpmdLogger::ReportAfterPartition(
*module, options_.report_instruction_count));
XLA_VLOG_LINES(1, logger.MakeReport());
if (changed) {
HloPassPipeline pass("spmd-cleanup");
pass.AddPass<HloDCE>(true);
pass.AddPass<TupleSimplifier>();
pass.AddPass<HloDCE>(true);
pass.AddPass<HloCSE>(false);
pass.AddPass<FlattenCallGraph>();
TF_RETURN_IF_ERROR(pass.Run(module, execution_threads).status());
}
TF_RETURN_IF_ERROR(ClearShardingAttributes(module, execution_threads));
return changed;
}
absl::Status SpmdPartitioner::PreprocessSharding(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
for (HloComputation* computation : module->computations(execution_threads)) {
for (HloInstruction* hlo : computation->instructions()) {
if (hlo->HasSideEffectNoRecurse() && hlo->opcode() != HloOpcode::kRng &&
(hlo->opcode() != HloOpcode::kCustomCall ||
GetCustomCallPartitioner(hlo->custom_call_target()) == nullptr)) {
TF_RET_CHECK(hlo->has_sharding())
<< "Side-effect HLO must have sharding: " << hlo->ToString();
TF_RET_CHECK(!HasReplicatedSharding(hlo->sharding()) ||
CanSideEffectingHaveReplicatedSharding(hlo))
<< "side-effect HLO cannot have a replicated sharding: "
<< hlo->ToString();
}
if (!hlo->has_sharding()) {
if (hlo->opcode() == HloOpcode::kRng) {
hlo->set_sharding(HloSharding::AssignDevice(0));
} else {
hlo->set_sharding(
HloSharding::Single(hlo->shape(), HloSharding::Replicate()));
}
}
}
}
if (!options_.allow_module_signature_change) {
const HloComputation* entry = module->entry_computation();
TF_RET_CHECK(entry->root_instruction()->has_sharding());
const HloSharding& root_sharding = entry->root_instruction()->sharding();
if (!root_sharding.UniqueDevice().has_value()) {
if (root_sharding.IsTuple()) {
TF_RET_CHECK(absl::c_all_of(root_sharding.tuple_elements(),
[](const HloSharding& s) {
return s.IsReplicated() || s.IsManual();
}))
<< "Unsupported entry root sharding: " << root_sharding.ToString();
} else {
TF_RET_CHECK(root_sharding.IsReplicated() || root_sharding.IsManual())
<< "Unsupported entry root sharding: " << root_sharding.ToString();
}
}
for (const HloInstruction* param : entry->parameter_instructions()) {
TF_RET_CHECK(param->has_sharding());
TF_RET_CHECK(param->sharding().IsReplicated() ||
param->sharding().UniqueDevice().has_value())
<< "Unsupported entry parameter sharding:"
<< param->sharding().ToString();
}
}
return absl::OkStatus();
}
absl::Status SpmdPartitioner::PreprocessHlos(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
auto skip_copy_operands = [](HloInstruction* operand,
bool check_single_use =
true) -> HloInstruction* {
while (operand->user_count() == 1 &&
operand->opcode() == HloOpcode::kCopy) {
operand = operand->mutable_operand(0);
}
if (check_single_use && operand->user_count() != 1) {
return nullptr;
}
return operand;
};
for (HloComputation* computation : module->computations(execution_threads)) {
for (HloInstruction* hlo : computation->MakeInstructionPostOrder()) {
if (hlo->sharding().IsTileMaximal() || hlo->sharding().IsManual()) {
continue;
}
if (hlo->opcode() == HloOpcode::kSlice) {
HloInstruction* operand = skip_copy_operands(hlo->mutable_operand(0));
if (operand == nullptr || operand->sharding() != hlo->sharding()) {
continue;
}
if (operand->opcode() == HloOpcode::kPad) {
std::optional<PaddingConfig> merged_padding =
operand->padding_config();
bool may_have_multi_halo_exchanges = false;
for (int64_t i = 0; i < hlo->shape().rank(); ++i) {
const auto& dim = operand->padding_config().dimensions(i);
if (dim.interior_padding() != 0 || hlo->slice_strides(i) != 1) {
merged_padding = std::nullopt;
break;
}
if (hlo->sharding().tile_assignment().dim(i) != 1 &&
(dim.edge_padding_low() != 0 || dim.edge_padding_high() != 0) &&
hlo->shape().dimensions(i) != operand->shape().dimensions(i)) {
may_have_multi_halo_exchanges = true;
}
auto* merged_dim = merged_padding->mutable_dimensions(i);
merged_dim->set_edge_padding_low(dim.edge_padding_low() -
hlo->slice_starts(i));
merged_dim->set_edge_padding_high(hlo->slice_limits(i) -
operand->shape().dimensions(i));
}
if (merged_padding.has_value() && may_have_multi_halo_exchanges) {
HloInstruction* new_pad =
computation->AddInstruction(HloInstruction::CreatePad(
hlo->shape(), operand->mutable_operand(0),
operand->mutable_operand(1), *merged_padding));
new_pad->set_metadata(operand->metadata());
new_pad->set_sharding(hlo->sharding());
TF_RETURN_IF_ERROR(hlo->ReplaceAllUsesWith(new_pad));
TF_RETURN_IF_ERROR(
computation->RemoveInstructionAndUnusedOperands(hlo));
}
}
}
if (hlo->opcode() == HloOpcode::kConcatenate) {
const int64_t dim = hlo->concatenate_dimension();
if (hlo->sharding().tile_assignment().dim(dim) == 1) {
continue;
}
if (hlo->operand_count() == 2) {
HloInstruction* lhs = skip_copy_operands(hlo->mutable_operand(0));
HloInstruction* rhs = skip_copy_operands(hlo->mutable_operand(1));
if (lhs == nullptr || rhs == nullptr) {
continue;
}
const int64_t amount = FindRotateRightPattern(hlo, lhs, rhs);
if (amount < 0) {
continue;
}
TF_RETURN_IF_ERROR(HandleRotateRightWhilePreprocessing(computation));
HloInstruction* to_rotate = lhs->mutable_operand(0);
HloInstruction* rotate = computation->AddInstruction(
CreateCustomCallSPMDInternal_RotateRight(to_rotate, dim, amount));
rotate->set_metadata(hlo->metadata());
rotate->set_sharding(hlo->sharding());
TF_RETURN_IF_ERROR(hlo->ReplaceAllUsesWith(rotate));
TF_RETURN_IF_ERROR(
computation->RemoveInstructionAndUnusedOperands(hlo));
} else if (hlo->operand_count() == 3) {
HloInstruction* lhs = skip_copy_operands(hlo->mutable_operand(0));
HloInstruction* mid = skip_copy_operands(hlo->mutable_operand(1),
false);
HloInstruction* rhs = skip_copy_operands(hlo->mutable_operand(2));
std::optional<PadWithWrapPattern> pad_pattern =
FindPadWithWrapPattern(hlo, lhs, mid, rhs);
if (!pad_pattern) {
continue;
}
PaddingConfig padding_config =
MakeNoPaddingConfig(hlo->shape().rank());
auto* padding_config_dim = padding_config.mutable_dimensions(dim);
const int64_t low_pad = lhs->shape().dimensions(dim);
const int64_t high_pad = rhs->shape().dimensions(dim);
padding_config_dim->set_edge_padding_low(low_pad);
padding_config_dim->set_edge_padding_high(high_pad);
HloInstruction* zero =
computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(hlo->shape().element_type())));
zero->set_sharding(HloSharding::Replicate());
HloInstruction* pad =
computation->AddInstruction(HloInstruction::CreatePad(
hlo->shape(), mid, zero, padding_config));
pad->set_metadata(hlo->metadata());
pad->set_sharding(hlo->sharding());
const int64_t padded_size = hlo->shape().dimensions(dim);
const int rotate_lhs_amount =
padded_size - (pad_pattern->lhs_slice_start + low_pad);
HloInstruction* rotate_lhs = computation->AddInstruction(
CreateCustomCallSPMDInternal_RotateRight(pad, dim,
rotate_lhs_amount));
rotate_lhs->set_metadata(hlo->metadata());
rotate_lhs->set_sharding(hlo->sharding());
auto apply_modifiers =
[&](HloInstruction* inst,
const std::vector<const HloInstruction*>& modifiers) {
for (auto it = modifiers.crbegin(), end = modifiers.crend();
it != end; ++it) {
const HloInstruction* modifier = *it;
Shape new_shape = ShapeUtil::ChangeElementType(
inst->shape(), modifier->shape().element_type());
inst = computation->AddInstruction(
modifier->CloneWithNewOperands(new_shape, {inst}));
}
return inst;
};
rotate_lhs = apply_modifiers(rotate_lhs, pad_pattern->lhs_modifiers);
const int64_t rotate_rhs_amount =
padded_size - (pad_pattern->rhs_slice_start + low_pad + high_pad);
HloInstruction* rotate_rhs = computation->AddInstruction(
CreateCustomCallSPMDInternal_RotateRight(pad, dim,
rotate_rhs_amount));
rotate_rhs->set_metadata(hlo->metadata());
rotate_rhs->set_sharding(hlo->sharding());
rotate_rhs = apply_modifiers(rotate_rhs, pad_pattern->rhs_modifiers);
const Shape iota_shape =
ShapeUtil::ChangeElementType(hlo->shape(), U32);
HloInstruction* iota = computation->AddInstruction(
HloInstruction::CreateIota(iota_shape, dim));
iota->set_metadata(hlo->metadata());
iota->set_sharding(hlo->sharding());
struct SelectSpec {
int64_t limit;
HloInstruction* hlo;
Comparison::Direction cmp;
};
const std::array<SelectSpec, 2> selects = {
{
{low_pad, rotate_lhs, Comparison::Direction::kLt},
{padded_size - high_pad, rotate_rhs,
Comparison::Direction::kGe}}};
Shape pred_shape = ShapeUtil::ChangeElementType(hlo->shape(), PRED);
HloInstruction* merged = pad;
for (const SelectSpec& select_spec : selects) {
HloInstruction* limit =
computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<uint32_t>(select_spec.limit)));
limit->set_sharding(HloSharding::Replicate());
HloInstruction* limit_bcast = computation->AddInstruction(
HloInstruction::CreateBroadcast(iota_shape, limit, {}));
limit_bcast->set_metadata(hlo->metadata());
limit_bcast->set_sharding(hlo->sharding());
HloInstruction* compare =
computation->AddInstruction(HloInstruction::CreateCompare(
pred_shape, iota, limit_bcast, select_spec.cmp));
compare->set_metadata(hlo->metadata());
compare->set_sharding(hlo->sharding());
merged = computation->AddInstruction(HloInstruction::CreateTernary(
hlo->shape(), HloOpcode::kSelect, compare, select_spec.hlo,
merged));
merged->set_metadata(hlo->metadata());
merged->set_sharding(hlo->sharding());
}
TF_RETURN_IF_ERROR(hlo->ReplaceAllUsesWith(merged));
TF_RETURN_IF_ERROR(
computation->RemoveInstructionAndUnusedOperands(hlo));
}
}
}
}
return absl::OkStatus();
}
}
} | #include "xla/service/spmd/spmd_partitioner.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/hlo/utils/hlo_sharding_util.h"
#include "xla/layout_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_verifier.h"
#include "xla/service/sharding_format_picker.h"
#include "xla/service/spmd/spmd_prepare.h"
#include "xla/shape.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace spmd {
namespace {
using ::testing::_;
using ::testing::AllOf;
namespace op = xla::testing::opcode_matchers;
class SpmdPartitioningTest
: public HloTestBase,
public ::testing::WithParamInterface<ShardingFormatPicker::ShardingType> {
public:
absl::StatusOr<std::unique_ptr<HloModule>> PartitionComputation(
absl::string_view hlo_module, int64_t num_devices,
bool conv_halo_exchange_always_on_lhs = true,
bool choose_faster_windowed_einsum = false,
bool unroll_windowed_einsum = false,
bool bidirectional_windowed_einsum = false,
int64_t threshold_for_windowed_einsum_mib = -1,
PartitioningMethod gather_method = PartitioningMethod::kIndexParallel,
PartitioningMethod scatter_method = PartitioningMethod::kIndexParallel) {
SpmdPartitionerOptions options;
options.conv_halo_exchange_always_on_lhs = conv_halo_exchange_always_on_lhs;
options.allow_module_signature_change = true;
options.choose_faster_windowed_einsum_over_mem =
choose_faster_windowed_einsum;
options.unroll_windowed_einsum = unroll_windowed_einsum;
options.bidirectional_windowed_einsum = bidirectional_windowed_einsum;
if (threshold_for_windowed_einsum_mib >= 0) {
options.threshold_for_windowed_einsum_mib =
threshold_for_windowed_einsum_mib;
}
options.gather_partition_method = gather_method;
options.scatter_partition_method = scatter_method;
auto collective_ops_creator =
GetDefaultCollectiveOpsCreator(num_devices, 1);
collective_ops_creator.create_cross_partition_all_gather = nullptr;
HloModuleConfig config = GetModuleConfigForTest();
config.set_use_spmd_partitioning(true);
config.set_num_partitions(num_devices);
TF_ASSIGN_OR_RETURN(auto module,
ParseAndReturnVerifiedModule(hlo_module, config));
ShardingFormatPicker format_picker(GetParam());
TF_ASSIGN_OR_RETURN(bool changed, format_picker.Run(module.get()));
if (changed) {
VLOG(1) << "Sharding format changed: "
<< module->ToString(HloPrintOptions()
.set_print_program_shape(false)
.set_print_operand_shape(false));
}
HloPassPipeline pass("spmd-partitioning");
pass.AddPass<HloVerifier>(false,
false);
pass.AddPass<SpmdPrepare>();
pass.AddPass<SpmdPartitioner>(num_devices, 1, options,
collective_ops_creator);
pass.AddPass<HloVerifier>(false,
false);
TF_RETURN_IF_ERROR(pass.Run(module.get()).status());
VerifyNoShardingOnCollectives(module.get());
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
void VerifyNoShardingOnCollectives(HloModule* module) {
for (const HloComputation* c : module->computations()) {
for (const HloInstruction* inst : c->instructions()) {
if (!absl::c_linear_search(
std::vector<HloOpcode>{
HloOpcode::kAllToAll, HloOpcode::kAllReduce,
HloOpcode::kAllGather, HloOpcode::kCollectivePermute,
HloOpcode::kReduceScatter},
inst->opcode())) {
continue;
}
EXPECT_FALSE(inst->has_sharding());
}
}
}
};
std::string TestParamToString(
const ::testing::TestParamInfo<ShardingFormatPicker::ShardingType>& data) {
switch (data.param) {
case ShardingFormatPicker::ShardingType::kV1:
return "V1";
case ShardingFormatPicker::ShardingType::kBestEffortV2:
return "BestEffortV2";
}
}
INSTANTIATE_TEST_SUITE_P(
All, SpmdPartitioningTest,
::testing::Values(ShardingFormatPicker::ShardingType::kV1,
ShardingFormatPicker::ShardingType::kBestEffortV2),
TestParamToString);
TEST_P(SpmdPartitioningTest, SingleDeviceToReplicated) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%constant = s32[2,3]{1,0} constant({{1,1,1},{1,1,1}}),
sharding={maximal device=0}
ROOT %copy = s32[2,3]{1,0} copy(%constant), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Copy(op::AllReduce(
op::Select(op::Broadcast(op::Compare()),
op::Constant(), op::Broadcast()))),
op::Shape("s32[2,3]")));
}
TEST_P(SpmdPartitioningTest, SingleDeviceCustomCall) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%constant = s32[2,3]{1,0} constant({{1,1,1},{1,1,1}}),
sharding={maximal device=0}
%cc = s32[2,3] custom-call(%constant), custom_call_target="SomeCustomCall",
sharding={maximal device=0}
ROOT %copy = s32[2,3]{1,0} copy(%cc), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* custom_call = FindInstruction(module.get(), "cc.1");
EXPECT_NE(custom_call, nullptr);
EXPECT_NE(custom_call->parent(), module->entry_computation());
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Copy(op::AllReduce(
op::Select(op::Broadcast(op::Compare()),
op::Conditional(), op::Broadcast()))),
op::Shape("s32[2,3]")));
}
TEST_P(SpmdPartitioningTest, SingleDeviceToSingleDevice) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%constant = s32[2,3]{1,0} constant({{1,1,1},{1,1,1}}),
sharding={maximal device=0}
ROOT %copy = s32[2,3]{1,0} copy(%constant), sharding={maximal device=1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
HloInstruction* root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
EXPECT_THAT(root, op::Copy(AllOf(op::Copy(op::AllReduce(op::Select(
op::Broadcast(op::Compare()),
op::Constant(), op::Broadcast()))),
op::Shape("s32[2,3]"))));
}
TEST_P(SpmdPartitioningTest, SingleDeviceToTiled) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%constant = s32[2,3]{1,0} constant({{1,1,1},{1,1,1}}),
sharding={maximal device=0}
ROOT %copy = s32[2,3]{1,0} copy(%constant),
sharding={devices=[2,1]1,0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(
op::Copy(op::DynamicSlice(
op::AllReduce(op::Select(
op::Broadcast(op::Compare(op::PartitionId(), op::Constant())),
op::Constant(), op::Broadcast())),
op::Reshape(op::DynamicSlice(op::Constant(), op::PartitionId())),
op::Constant())),
op::Shape("s32[1,3]")));
}
TEST_P(SpmdPartitioningTest, PartitionCall) {
absl::string_view hlo_string = R"(
HloModule jit_f
g {
Arg_0.6 = s32[8,2]{1,0} parameter(0), sharding={devices=[2,2]<=[4]}
constant.0 = s32[] constant(2), sharding={replicated}
broadcast.0 = s32[8,2]{1,0} broadcast(constant.0), dimensions={}, sharding={devices=[2,2]<=[4]}
ROOT multiply.9 = s32[8,2]{1,0} multiply(Arg_0.6, broadcast.0), sharding={devices=[2,2]<=[4]}
}
ENTRY main {
Arg_0.1 = s32[8,2]{1,0} parameter(0), sharding={devices=[2,2]<=[4]}
constant.1 = s32[] constant(3), sharding={replicated}
broadcast.1 = s32[8,2]{1,0} broadcast(constant.1), dimensions={}, sharding={devices=[2,2]<=[4]}
multiply.4 = s32[8,2]{1,0} multiply(Arg_0.1, broadcast.1), sharding={devices=[2,2]<=[4]}
ROOT call = s32[8,2]{1,0} call(multiply.4), to_apply=g, sharding={devices=[2,2]<=[4]}, backend_config={"flag_configs":[],"scoped_memory_configs":[],"compute_type":"COMPUTE_TYPE_DEFAULT","device_type":"DEVICE_TYPE_HOST","used_scoped_memory_configs":[]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Call(), op::Shape("s32[4,1]")));
HloInstruction* call_comp_root =
root->called_computations()[0]->root_instruction();
EXPECT_THAT(call_comp_root, AllOf(op::Multiply(op::Parameter(0),
op::Broadcast(op::Constant())),
op::Shape("s32[4,1]")));
}
TEST_P(SpmdPartitioningTest, TiledToReplicated) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%constant = s32[2,3]{1,0} constant({{1,1,1},{1,1,1}}),
sharding={devices=[2,1]0,1}
ROOT %copy = s32[2,3]{1,0} copy(%constant), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
op::Copy(op::AllReduce(AllOf(
op::DynamicUpdateSlice(
op::Broadcast(), AllOf(op::Constant(), op::Shape("s32[1,3]")),
op::Reshape(op::DynamicSlice(op::Constant(), op::PartitionId())),
op::Constant()),
op::Shape("s32[2,3]")))));
}
TEST_P(SpmdPartitioningTest,
TiledToReplicatedWhenV2ShardingGeneratesReplicaGroupV2) {
if (GetParam() != ShardingFormatPicker::ShardingType::kBestEffortV2) {
GTEST_SKIP() << "This test only runs when input sharding is in V2 format.";
}
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%constant = s32[4,3]{1,0} constant({{1,1,1},{1,1,1},{1,1,1},{1,1,1}}),
sharding={devices=[4,1]<=[4]}
ROOT %copy = s32[4,3]{1,0} copy(%constant), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto all_reduce_instruction =
std::find_if(module->entry_computation()->instructions().begin(),
module->entry_computation()->instructions().end(),
HloPredicateIsOp<HloOpcode::kAllReduce>);
EXPECT_NE(all_reduce_instruction,
module->entry_computation()->instructions().end());
EXPECT_TRUE((*all_reduce_instruction)
->device_list()
.iota_replica_group_list()
.has_value());
IotaReplicaGroupList list = (*all_reduce_instruction)
->device_list()
.iota_replica_group_list()
.value();
EXPECT_EQ(list.num_replica_groups(), 1);
EXPECT_EQ(list.num_devices_per_group(), 4);
EXPECT_THAT(list.reshape_dims(), ::testing::ElementsAre(4));
EXPECT_THAT(list.transpose_perm(), ::testing::ElementsAre(0));
}
TEST_P(SpmdPartitioningTest, TiledToSingleDevice) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%constant = s32[2,3]{1,0} constant({{1,1,1},{1,1,1}}),
sharding={devices=[2,1]0,1}
ROOT %copy = s32[2,3]{1,0} copy(%constant), sharding={maximal device=0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
op::Copy(op::Copy(op::AllReduce(AllOf(
op::DynamicUpdateSlice(
op::Broadcast(), AllOf(op::Constant(), op::Shape("s32[1,3]")),
op::Reshape(op::DynamicSlice(op::Constant(), op::PartitionId())),
op::Constant()),
op::Shape("s32[2,3]"))))));
}
TEST_P(SpmdPartitioningTest, TiledToTiledEven) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param= s32[8,2]{1,0} parameter(0), sharding={devices=[2,1]0,1}
ROOT %copy = s32[8,2]{1,0} copy(%param), sharding={devices=[1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(op::Copy(op::Reshape(op::Transpose(op::AllToAll(AllOf(
op::Reshape(op::Parameter()), op::Shape("s32[4,2,1]")))))),
op::Shape("s32[8,1]")));
}
TEST_P(SpmdPartitioningTest, TiledToTiledUneven) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param= f32[7,31,128]{2,1,0} parameter(0), sharding={devices=[1,2,1]0,1}
ROOT %copy = f32[7,31,128]{2,1,0} copy(%param), sharding={devices=[2,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(op::Copy(op::Slice(op::Reshape(AllOf(op::Transpose(op::AllToAll(
op::Reshape(AllOf(op::Pad(), op::Shape("f32[8,16,128]")))))))))));
}
TEST_P(SpmdPartitioningTest, GetTupleElementSwapDevice) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param.0 = (f32[2,3]{1,0}, u32[]) parameter(0),
sharding={{maximal device=1}, {maximal device=1}}
%gte.0 = f32[2,3]{1,0} get-tuple-element(%param.0), index=0,
sharding={maximal device=0}
%gte.1 = u32[] get-tuple-element(%param.0), index=1,
sharding={maximal device=0}
ROOT %tuple = (f32[2,3]{1,0}, u32[]) tuple(%gte.0, %gte.1),
sharding={{maximal device=0},{maximal device=0}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_THAT(root, op::Tuple());
EXPECT_THAT(root->operand(0),
op::Copy(op::AllReduce(op::Select(
op::Broadcast(op::Compare(op::PartitionId(), op::Constant())),
op::GetTupleElement(op::Parameter()), op::Broadcast()))));
EXPECT_THAT(root->operand(1),
op::Copy(op::AllReduce(op::Select(
op::Broadcast(op::Compare(op::PartitionId(), op::Constant())),
op::GetTupleElement(op::Parameter()), op::Broadcast()))));
}
TEST_P(SpmdPartitioningTest, GetTupleElementTiled) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param.0 = (f32[2,3]{1,0}, u32[2,3]{1,0}) parameter(0),
sharding={{replicated}, {replicated}}
gte.0 = f32[2,3]{1,0} get-tuple-element(param.0), index=0,
sharding={devices=[2,1]0,1}
gte.1 = u32[2,3]{1,0} get-tuple-element(param.0), index=1,
sharding={devices=[2,1]0,1}
ROOT %tuple = (f32[2,3]{1,0}, u32[2,3]{1,0}) tuple(gte.0, gte.1),
sharding={{devices=[2,1]0,1},{devices=[2,1]0,1}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_THAT(root, op::Tuple());
auto offset =
op::Reshape(op::DynamicSlice(op::Constant(), op::PartitionId()));
EXPECT_THAT(root->operand(0),
op::DynamicSlice(op::GetTupleElement(op::Parameter()), offset,
op::Constant()));
EXPECT_THAT(root->operand(1),
op::DynamicSlice(op::GetTupleElement(op::Parameter()), offset,
op::Constant()));
}
TEST_P(SpmdPartitioningTest, TiledInfeed) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
token0 = token[] after-all(), sharding={maximal device=0}
infeed = (f32[8,2]{1,0}, token[]) infeed(token0),
sharding={{devices=[2,1]0,1}, {maximal device=0}}
ROOT infeed.data = f32[8,2]{1,0} get-tuple-element(infeed), index=0,
sharding={maximal device=0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
op::Copy(op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(),
op::GetTupleElement(
AllOf(op::Infeed(), op::Shape("(f32[4,2]{1,0}, token[])"))),
op::Reshape(op::DynamicSlice(op::Constant(), op::PartitionId())),
op::Constant()))));
}
TEST_P(SpmdPartitioningTest, UnevenTiledInfeed) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
token0 = token[] after-all(), sharding={maximal device=0}
infeed = (f32[9,2]{1,0}, token[]) infeed(token0),
sharding={{devices=[2,1]0,1}, {maximal device=0}}
ROOT infeed.data = f32[9,2]{1,0} get-tuple-element(infeed), index=0,
sharding={devices=[2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root, AllOf(op::Shape("f32[5,2]"), op::GetTupleElement(op::Conditional(
op::Convert(op::PartitionId()),
op::AfterAll(), op::AfterAll()))));
EXPECT_THAT(
root->operand(0)->called_computations()[0]->root_instruction(),
AllOf(op::Shape("(f32[5,2], token[])"), op::Infeed(op::Parameter())));
auto second_infeed =
AllOf(op::Shape("(f32[4,2], token[])"), op::Infeed(op::Parameter()));
EXPECT_THAT(root->operand(0)->called_computations()[1]->root_instruction(),
AllOf(op::Shape("(f32[5,2], token[])"),
op::Tuple(op::Pad(op::GetTupleElement(second_infeed),
op::Constant()),
op::GetTupleElement(second_infeed))));
}
TEST_P(SpmdPartitioningTest, UnevenTiledTupleInfeed) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
token0 = token[] after-all(), sharding={maximal device=0}
infeed = ((f32[9,2]{1,0}, f32[2]{0}), token[]) infeed(token0),
sharding={{devices=[2,1]0,1}, {replicated}, {maximal device=0}}
ROOT infeed.data = (f32[9,2]{1,0}, f32[2]{0}) get-tuple-element(infeed),
index=0, sharding={{devices=[2,1]0,1}, {replicated}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("(f32[5,2], f32[2])"),
op::GetTupleElement(op::Conditional(
op::Convert(op::PartitionId()), op::AfterAll(),
op::AfterAll()))));
EXPECT_THAT(root->operand(0)->called_computations()[0]->root_instruction(),
AllOf(op::Shape("((f32[5,2], f32[2]), token[])"),
op::Infeed(op::Parameter())));
auto second_infeed = AllOf(op::Shape("((f32[4,2], f32[2]), token[])"),
op::Infeed(op::Parameter()));
EXPECT_THAT(
root->operand(0)->called_computations()[1]->root_instruction(),
AllOf(op::Shape("((f32[5,2], f32[2]), token[])"),
op::Tuple(op::Tuple(op::Pad(op::GetTupleElement(
op::GetTupleElement(second_infeed)),
op::Constant()),
op::GetTupleElement(
op::GetTupleElement(second_infeed))),
op::GetTupleElement(second_infeed))));
}
TEST_P(SpmdPartitioningTest, MixedTupleInfeed) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
token0 = token[] after-all(), sharding={maximal device=0}
infeed = ((f32[9,2]{1,0}, f32[2]{0}), token[]) infeed(token0),
sharding={{maximal device=0}, {maximal device=1}, {maximal device=0}}
ROOT infeed.data = (f32[9,2]{1,0}, f32[2]{0}) get-tuple-element(infeed),
index=0, sharding={{maximal device=0}, {maximal device=1}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("(f32[9,2], f32[2])"),
op::GetTupleElement(op::Conditional(
op::Convert(op::PartitionId()), op::AfterAll(),
op::AfterAll()))));
auto first_infeed = AllOf(op::Shape("((f32[9,2], ()), token[])"),
op::Infeed(op::Parameter()));
EXPECT_THAT(root->operand(0)->called_computations()[0]->root_instruction(),
AllOf(op::Shape("((f32[9,2], f32[2]), token[])"),
op::Tuple(op::Tuple(op::GetTupleElement(
op::GetTupleElement(first_infeed)),
op::Broadcast(op::Constant())),
op::GetTupleElement(first_infeed))));
auto second_infeed =
AllOf(op::Shape("(((), f32[2]), token[])"), op::Infeed(op::Parameter()));
EXPECT_THAT(root->operand(0)->called_computations()[1]->root_instruction(),
AllOf(op::Shape("((f32[9,2], f32[2]), token[])"),
op::Tuple(op::Tuple(op::Broadcast(op::Constant()),
op::GetTupleElement(op::GetTupleElement(
second_infeed))),
op::GetTupleElement(second_infeed))));
}
TEST_P(SpmdPartitioningTest, TiledToReplicatedReduce) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
constant = f32[3,3]{1,0} constant({{1,1,1},{1,1,1},{1,1,1}}),
sharding={devices=[2,1]0,1}
constant.1 = f32[] constant(0), sharding={replicated}
ROOT reduce = f32[] reduce(constant, constant.1), dimensions={0,1},
to_apply=sum, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
op::AllReduce(op::Reduce(
op::Select(
op::Compare(op::Add(op::Iota(), op::Broadcast(op::Reshape())),
op::Broadcast(op::Constant())),
AllOf(op::Shape("f32[2,3]{1,0}"),
op::DynamicSlice(op::Pad(op::Constant(), op::Constant()),
op::Reshape(), op::Constant())),
op::Broadcast(op::Constant())),
op::Constant())));
}
TEST_P(SpmdPartitioningTest, TiledElementwise) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
constant = f32[3,3]{1,0} constant({{1,1,1},{1,1,1},{1,1,1}}),
sharding={devices=[2,1]0,1}
constant.1 = f32[3,3]{1,0} constant({{2,2,2},{2,2,2},{2,2,2}}),
sharding={replicated}
multiply = f32[3,3]{1,0} multiply(constant, constant.1),
sharding={devices=[2,1]0,1}
ROOT add = f32[3,3]{1,0} add(multiply, constant.1),
sharding={devices=[2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(
op::Shape("f32[2,3]{1,0}"),
op::Add(op::Multiply(
op::DynamicSlice(op::Pad(op::Constant(), op::Constant()),
op::Reshape(), op::Constant()),
op::DynamicSlice(op::Pad(op::Constant(), op::Constant()),
op::Reshape(), op::Constant())),
op::DynamicSlice(op::Pad(op::Constant(), op::Constant()),
op::Reshape(), op::Constant()))));
}
TEST_P(SpmdPartitioningTest, TiledAllReduce) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
parameter = f32[3,3]{1,0} parameter(0), sharding={devices=[2,1]0,1}
ROOT all-reduce = f32[3,3]{1,0} all-reduce(parameter), to_apply=sum,
replica_groups={}, sharding={devices=[2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root, AllOf(op::Shape("f32[2,3]{1,0}"), op::AllReduce(op::Parameter(0))));
}
TEST_P(SpmdPartitioningTest, BroadcastOnlyNewDimsSharded) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
constant = f32[4,3]{1,0} constant({{1,1,1},{1,1,1},{1,1,1},{1,1,1}}),
sharding={replicated}
ROOT broadcast = f32[3,4,3]{2,1,0} broadcast(constant), dimensions={1,2},
sharding={devices=[2,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[2,4,3]{2,1,0}"),
op::Broadcast(op::Constant())));
}
TEST_P(SpmdPartitioningTest, BroadcastOnlyOldDimsSharded) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
constant = f32[4,3]{1,0} constant({{1,1,1},{1,1,1},{1,1,1},{1,1,1}}),
sharding={replicated}
ROOT broadcast = f32[4,4,3]{2,1,0} broadcast(constant), dimensions={1,2},
sharding={devices=[1,2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[4,2,3]{2,1,0}"),
op::Broadcast(op::DynamicSlice(
op::Constant(), op::Reshape(), op::Constant()))));
}
TEST_P(SpmdPartitioningTest, BroadcastBothOldAndNewDimsSharded) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
constant = f32[4,3]{1,0} constant({{1,1,1},{1,1,1},{1,1,1},{1,1,1}}),
sharding={replicated}
ROOT broadcast = f32[4,4,3]{2,1,0} broadcast(constant), dimensions={1,2},
sharding={devices=[2,2,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(op::Shape("f32[2,2,3]{2,1,0}"),
op::Broadcast(AllOf(op::Shape("f32[2,3]{1,0}"),
op::DynamicSlice(op::Constant(), op::Reshape(),
op::Constant())))));
}
TEST_P(SpmdPartitioningTest,
BroadcastBothOldAndNewDimsShardedPartiallySharded) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %entry {
%param = f32[4,3]{1,0} parameter(0),
sharding={devices=[1,2,4]<=[2,2,2]T(1,0,2) last_tile_dim_replicate}
ROOT %broadcast = f32[4,4,3]{2,1,0} broadcast(%param), dimensions={1,2},
sharding={devices=[2,1,2,2]<=[8] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(op::Shape("f32[2,4,2]"),
op::Broadcast(AllOf(op::Shape("f32[4,2]"), op::Parameter(0)))));
}
TEST_P(SpmdPartitioningTest,
ConvWithParallelDimAndNonParallelSpatialDimPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,12,12,24,32] parameter(0)
%lhs.copy = f32[32,12,12,24,32] copy(%lhs),
sharding={devices=[2,2,1,1,1]<=[4]}
%rhs = f32[32,6,6,16,32] parameter(1)
%rhs.copy = f32[32,6,6,16,32] copy(%rhs),
sharding={devices=[2,2,1,1,1]<=[4]}
ROOT %conv = f32[32,7,7,24,16] convolution(%lhs.copy, %rhs.copy),
dim_labels=012bf_012oi->012bf,
window={size=32x6x6 stride=31x1x1 lhs_dilate=32x1x1},
sharding={devices=[2,2,1,1,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(op::Copy(op::DynamicSlice(
op::Parameter(), op::Reshape(), op::Reshape(),
op::Constant(), op::Constant(), op::Constant())),
op::Shape("f32[16,6,12,24,32]"));
const auto rhs = AllOf(op::Copy(op::DynamicSlice(
op::Parameter(), op::Reshape(), op::Reshape(),
op::Constant(), op::Constant(), op::Constant())),
op::Shape("f32[16,3,6,16,32]"));
auto resharded_rhs =
AllOf(op::Shape("f32[16,6,6,16,32]"),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(), rhs, op::Constant(), op::Reshape(),
op::Constant(), op::Constant(), op::Constant())));
auto left_halo = AllOf(op::CollectivePermute(op::Slice(lhs)),
op::Shape("f32[16,2,12,24,32]"));
auto right_halo = AllOf(op::CollectivePermute(op::Slice(lhs)),
op::Shape("f32[16,3,12,24,32]"));
EXPECT_THAT(
root,
AllOf(op::Convolution(
op::Select(op::Compare(),
op::DynamicSlice(
op::Concatenate(left_halo, lhs, right_halo),
op::Constant(), op::Add(), op::Constant(),
op::Constant(), op::Constant()),
op::Broadcast()),
resharded_rhs),
op::Shape("f32[16,4,7,24,16]")));
}
TEST_P(SpmdPartitioningTest, BroadcastPropagateTiledSharding) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
constant = f32[4,3]{1,0} constant({{1,1,1},{1,4,1},{1,3,1},{1,2,1}}),
sharding={devices=[2,1]0,1}
ROOT broadcast = f32[4,4,3]{2,1,0} broadcast(constant), dimensions={1,2},
sharding={devices=[1,2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[4,2,3]{2,1,0}"),
op::Broadcast(op::DynamicSlice(
op::Constant(), op::Reshape(), op::Constant()))));
}
TEST_P(SpmdPartitioningTest, OutfeedSingleDevice) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
token.0 = token[] after-all()
data = f32[1024]{0} parameter(0), sharding={maximal device=0}
outfeed = token[] outfeed(data, token.0), sharding={maximal device=0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("token[]"),
op::Conditional(
op::Compare(op::PartitionId(), op::Constant()),
op::Tuple(op::Parameter(0), op::AfterAll()),
op::Tuple(op::Parameter(0), op::AfterAll()))));
HloInstruction* root_b0 = root->branch_computation(0)->root_instruction();
EXPECT_THAT(root_b0,
AllOf(op::Shape("token[]"),
op::Outfeed(op::GetTupleElement(op::Parameter(), 0),
op::GetTupleElement(op::Parameter(), 1))));
HloInstruction* root_b1 = root->branch_computation(1)->root_instruction();
EXPECT_THAT(root_b1, AllOf(op::Shape("token[]"), op::AfterAll()));
}
TEST_P(SpmdPartitioningTest, OutfeedEvenlyTiled) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
token.0 = token[] after-all()
data = f32[1024]{0} parameter(0), sharding={devices=[2]0,1}
ROOT outfeed = token[] outfeed(data, token.0), sharding={devices=[2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("token[]"),
op::Outfeed(op::Parameter(), op::AfterAll())));
}
TEST_P(SpmdPartitioningTest, OutfeedTupleEvenlyTiled) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
token.0 = token[] after-all()
data = (f32[1024,2]{1,0}, f32[2]{0}) parameter(0), sharding={{devices=[2,1]0,1},
{devices=[2]0,1}}
ROOT outfeed = token[] outfeed(data, token.0),
outfeed_shape=(f32[1024,2]{0,1}, f32[2]{0}), sharding={{devices=[2,1]0,1},
{devices=[2]0,1}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("token[]"),
op::Outfeed(op::Parameter(), op::AfterAll())));
auto expected_layout0 = LayoutUtil::MakeLayout({0, 1});
auto expected_layout1 = LayoutUtil::MakeLayout({0});
EXPECT_TRUE(LayoutUtil::Equal(root->outfeed_shape().tuple_shapes(0).layout(),
expected_layout0));
EXPECT_TRUE(LayoutUtil::Equal(root->outfeed_shape().tuple_shapes(1).layout(),
expected_layout1));
}
TEST_P(SpmdPartitioningTest, OutfeedReplicated) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
token.0 = token[] after-all()
data = (f32[1024,2]{1,0}, f32[2]{0}) parameter(0), sharding={{devices=[2,1]0,1},
{replicated}}
ROOT outfeed = token[] outfeed(data, token.0), sharding={{devices=[2,1]0,1},
{replicated}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("token[]"),
op::Outfeed(op::Parameter(), op::AfterAll())));
}
TEST_P(SpmdPartitioningTest, OutfeedUnevenlyTiled) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
token.0 = token[] after-all()
data = (f32[1023,2]{1,0}, f32[3]{0}) parameter(0), sharding={{devices=[2,1]0,1},
{devices=[2]0,1}}
outfeed = token[] outfeed(data, token.0),
outfeed_shape=(f32[1023,2]{0,1}, f32[3]{0}), sharding={{devices=[2,1]0,1},
{devices=[2]0,1}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root, AllOf(op::Shape("token[]"),
op::Conditional(op::Convert(),
op::Tuple(op::Parameter(), op::AfterAll()),
op::Tuple(op::Parameter(), op::AfterAll()))));
auto first_outfeed =
AllOf(op::Shape("(f32[512,2], f32[2])"), op::GetTupleElement());
EXPECT_THAT(root->called_computations()[0]->root_instruction(),
AllOf(op::Shape("token[]"),
op::Outfeed(first_outfeed, op::GetTupleElement())));
auto second_outfeed = AllOf(op::Shape("(f32[511,2], f32[1])"), op::Tuple());
EXPECT_THAT(root->called_computations()[1]->root_instruction(),
AllOf(op::Shape("token[]"),
op::Outfeed(second_outfeed, op::GetTupleElement())));
auto expected_layout0 = LayoutUtil::MakeLayout({0, 1});
auto expected_layout1 = LayoutUtil::MakeLayout({0});
auto first_outfeed_instr = root->called_computations()[0]->root_instruction();
auto second_outfeed_instr =
root->called_computations()[1]->root_instruction();
EXPECT_TRUE(LayoutUtil::Equal(
first_outfeed_instr->outfeed_shape().tuple_shapes(0).layout(),
expected_layout0));
EXPECT_TRUE(LayoutUtil::Equal(
first_outfeed_instr->outfeed_shape().tuple_shapes(1).layout(),
expected_layout1));
EXPECT_TRUE(LayoutUtil::Equal(
second_outfeed_instr->outfeed_shape().tuple_shapes(0).layout(),
expected_layout0));
EXPECT_TRUE(LayoutUtil::Equal(
second_outfeed_instr->outfeed_shape().tuple_shapes(1).layout(),
expected_layout1));
}
TEST_P(SpmdPartitioningTest, ReduceWindowReplicatedInput) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
constant = f32[6,2]{1,0} constant({{1,1},{1,4},{2,1},{3,1},{1,2},{2,2}}),
sharding={replicated}
constant.1 = f32[] constant(0), sharding={replicated}
ROOT reduce-window = f32[3,2]{1,0} reduce-window(constant, constant.1),
window={size=3x1 stride=2x1 pad=1_0x0_0}, to_apply=sum,
sharding={devices=[2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(op::Shape("f32[2,2]{1,0}"),
op::ReduceWindow(
op::DynamicSlice(AllOf(op::Shape("f32[9,2]{1,0}"),
op::Pad(op::Constant(), op::Constant())),
op::Multiply(op::Reshape(), op::Constant()),
op::Constant()),
op::Constant())));
}
TEST_P(SpmdPartitioningTest, ReduceWindowTiledNegativeLeftHalo) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
constant = f32[6,2]{1,0} constant({{1,1},{1,4},{2,1},{3,1},{1,2},{2,2}}),
sharding={devices=[2,1]0,1}
constant.1 = f32[] constant(0), sharding={replicated}
ROOT %reduce-window = f32[3,2]{1,0} reduce-window(%constant, %constant.1),
window={size=3x1 stride=2x1 pad=0_1x0_0}, to_apply=sum,
sharding={devices=[2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
auto sharded_input =
op::DynamicSlice(op::Constant(), op::Reshape(), op::Constant());
auto right_halo = AllOf(op::Shape("f32[2,2]{1,0}"),
op::CollectivePermute(op::Slice(sharded_input)));
auto pre_masking = op::DynamicSlice(
AllOf(
op::Shape("f32[6,2]{1,0}"),
op::Pad(op::Concatenate(sharded_input, right_halo), op::Constant())),
op::Reshape(), op::Constant());
auto index_in_padded = op::Add(
op::Iota(), op::Broadcast(op::Multiply(op::Reshape(), op::Constant())));
auto masked =
op::Select(op::Compare(index_in_padded, op::Broadcast(op::Constant())),
pre_masking, op::Broadcast(op::Constant()));
EXPECT_THAT(root, AllOf(op::Shape("f32[2,2]{1,0}"),
op::ReduceWindow(masked, op::Constant())));
}
TEST_P(SpmdPartitioningTest, ReduceWindowTiledOneSideHaloBeyondNeighbor) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
param = f32[9,2] parameter(0), sharding={devices=[5,1]0,1,2,3,4}
constant.1 = f32[] constant(0), sharding={replicated}
ROOT reduce-window = f32[5,2]{1,0} reduce-window(param, constant.1),
window={size=4x1 stride=2x1 pad=3_0x0_0}, to_apply=sum,
sharding={devices=[5,1]0,1,2,3,4}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 5));
VLOG(1) << module->ToString();
auto halo0 = AllOf(op::Shape("f32[1,2]"),
op::CollectivePermute(op::Slice(op::Parameter(0))));
auto halo1 =
AllOf(op::Shape("f32[2,2]"), op::CollectivePermute(op::Parameter(0)));
auto pre_mask =
AllOf(op::Shape("f32[4,2]"),
op::Concatenate(halo0, halo1, op::Slice(op::Parameter(0))));
auto masked =
op::Select(op::Compare(op::Add(op::Iota(), op::Broadcast(op::Multiply())),
op::Broadcast(op::Constant())),
pre_mask, op::Broadcast(op::Constant()));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[1,2]{1,0}"),
op::ReduceWindow(masked, op::Constant())));
}
TEST_P(SpmdPartitioningTest, ReduceWindowTiledOneSideUnequalHalo) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
constant = f32[9,2]{1,0} constant(
{{1,1},{1,4},{2,1},{3,1},{1,2},{2,2},{4,1},{1,2},{2,1}}),
sharding={devices=[3,1]0,1,2}
constant.1 = f32[] constant(0), sharding={replicated}
ROOT reduce-window = f32[5,2]{1,0} reduce-window(constant, constant.1),
window={size=3x1 stride=2x1 pad=1_1x0_0}, to_apply=sum,
sharding={devices=[3,1]0,1,2}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 3));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
auto sharded_input =
op::DynamicSlice(op::Constant(), op::Reshape(), op::Constant());
auto right_halo = AllOf(op::Shape("f32[2,2]{1,0}"),
op::CollectivePermute(op::Slice(sharded_input)));
auto pre_masking = op::DynamicSlice(
AllOf(
op::Shape("f32[7,2]{1,0}"),
op::Pad(op::Concatenate(sharded_input, right_halo), op::Constant())),
op::Reshape(), op::Constant());
auto index_in_padded = op::Add(
op::Iota(), op::Broadcast(op::Multiply(op::Reshape(), op::Constant())));
auto masked = op::Select(
op::And(op::Compare(index_in_padded, op::Broadcast(op::Constant())),
op::Compare(index_in_padded, op::Broadcast(op::Constant()))),
pre_masking, op::Broadcast(op::Constant()));
EXPECT_THAT(root, AllOf(op::Shape("f32[2,2]{1,0}"),
op::ReduceWindow(masked, op::Constant())));
}
TEST_P(SpmdPartitioningTest, ReduceWindowTiledTwoSideHalo) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
constant = f32[4,2]{1,0} constant({{1,1},{1,4},{2,1},{3,1}}),
sharding={devices=[2,1]0,1}
constant.1 = f32[] constant(0), sharding={replicated}
ROOT reduce-window = f32[2,2]{1,0} reduce-window(constant, constant.1),
window={size=5x1 stride=3x1 pad=2_2x0_0}, to_apply=sum,
sharding={devices=[2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
auto sharded_input =
op::DynamicSlice(op::Constant(), op::Reshape(), op::Constant());
auto left_halo = AllOf(op::Shape("f32[1,2]{1,0}"),
op::CollectivePermute(op::Slice(sharded_input)));
auto right_halo = AllOf(op::Shape("f32[1,2]{1,0}"),
op::CollectivePermute(op::Slice(sharded_input)));
auto pre_masking = AllOf(
op::Shape("f32[5,2]{1,0}"),
op::DynamicSlice(
AllOf(op::Shape("f32[6,2]{1,0}"),
op::Pad(op::Concatenate(left_halo, sharded_input, right_halo),
op::Constant())),
op::Reshape(), op::Constant()));
auto index_in_padded = op::Add(
op::Iota(), op::Broadcast(op::Multiply(op::Reshape(), op::Constant())));
auto masked = op::Select(
op::And(op::Compare(index_in_padded, op::Broadcast(op::Constant())),
op::Compare(index_in_padded, op::Broadcast(op::Constant()))),
pre_masking, op::Broadcast(op::Constant()));
EXPECT_THAT(root, AllOf(op::Shape("f32[1,2]{1,0}"),
op::ReduceWindow(masked, op::Constant())));
}
TEST_P(SpmdPartitioningTest, ReduceWindowTiled2D) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
token0 = token[] after-all(), sharding={maximal device=0}
infeed = (f32[4,4,2,2]{3,2,1,0}, token[]) infeed(token0),
sharding={{devices=[2,2,1,1]<=[4]}, {maximal device=0}}
infeed.data = f32[4,4,2,2]{3,2,1,0} get-tuple-element(infeed), index=0,
sharding={devices=[2,2,1,1]<=[4]}
constant = f32[] constant(0), sharding={replicated}
ROOT reduce-window = f32[2,2,2,2]{3,2,1,0} reduce-window(infeed.data, constant),
window={size=5x5x1x1 stride=3x3x1x1 pad=2_2x2_2x0_0x0_0}, to_apply=sum,
sharding={devices=[2,2,1,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
auto sharded_input = AllOf(op::Shape("f32[2,2,2,2]{3,2,1,0}"),
op::GetTupleElement(op::Infeed()));
auto dim0_left_halo = AllOf(op::Shape("f32[1,2,2,2]{3,2,1,0}"),
op::CollectivePermute(op::Slice(sharded_input)));
auto dim0_right_halo = AllOf(op::Shape("f32[1,2,2,2]{3,2,1,0}"),
op::CollectivePermute(op::Slice(sharded_input)));
auto dim0_pre_masking = op::DynamicSlice(
AllOf(op::Shape("f32[6,2,2,2]{3,2,1,0}"),
op::Pad(
op::Concatenate(dim0_left_halo, sharded_input, dim0_right_halo),
op::Constant())),
op::Reshape(), op::Constant(), op::Constant(), op::Constant());
auto dim0_index_in_padded = op::Add(
op::Iota(), op::Broadcast(op::Multiply(op::Reshape(), op::Constant())));
auto dim0_masked = op::Select(
op::And(op::Compare(dim0_index_in_padded, op::Broadcast(op::Constant())),
op::Compare(dim0_index_in_padded, op::Broadcast(op::Constant()))),
dim0_pre_masking, op::Broadcast(op::Constant()));
auto dim0_resharded = AllOf(op::Shape("f32[5,2,2,2]{3,2,1,0}"), dim0_masked);
auto dim1_left_halo = AllOf(op::Shape("f32[5,1,2,2]{3,2,1,0}"),
op::CollectivePermute(op::Slice(dim0_resharded)));
auto dim1_right_halo =
AllOf(op::Shape("f32[5,1,2,2]{3,2,1,0}"),
op::CollectivePermute(op::Slice(dim0_resharded)));
auto dim1_pre_masking = op::DynamicSlice(
AllOf(op::Shape("f32[5,6,2,2]{3,2,1,0}"),
op::Pad(op::Concatenate(dim1_left_halo, dim0_resharded,
dim1_right_halo),
op::Constant())),
op::Constant(), op::Reshape(), op::Constant(), op::Constant());
auto dim1_index_in_padded = op::Add(
op::Iota(), op::Broadcast(op::Multiply(op::Reshape(), op::Constant())));
auto dim1_masked = op::Select(
op::And(op::Compare(dim1_index_in_padded, op::Broadcast(op::Constant())),
op::Compare(dim1_index_in_padded, op::Broadcast(op::Constant()))),
dim1_pre_masking, op::Broadcast(op::Constant()));
auto dim1_resharded = AllOf(op::Shape("f32[5,5,2,2]{3,2,1,0}"), dim1_masked);
EXPECT_THAT(root, AllOf(op::Shape("f32[1,1,2,2]{3,2,1,0}"),
op::ReduceWindow(dim1_resharded, op::Constant())));
}
TEST_P(SpmdPartitioningTest, ConvolutionLhsTiledRhsReplicated) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,224,224,3] parameter(0)
%lhs.copy = f32[128,224,224,3] copy(f32[128,224,224,3] %lhs),
sharding={devices=[1,2,1,1]0,1}
%rhs = f32[7,7,3,64] parameter(1)
%rhs.copy = f32[7,7,3,64] copy(f32[7,7,3,64] %rhs),
sharding={replicated}
ROOT %conv = f32[128,112,112,64] convolution(
f32[128,224,224,3] %lhs.copy,
f32[7,7,3,64] %rhs.copy),
window={size=7x7 stride=2x2 pad=3_3x3_3},
dim_labels=b01f_01io->b01f,
sharding={devices=[1,2,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[128,112,224,3]"));
const auto rhs = AllOf(op::Copy(op::Parameter()), op::Shape("f32[7,7,3,64]"));
auto left_halo = AllOf(op::CollectivePermute(op::Slice(lhs)),
op::Shape("f32[128,3,224,3]"));
auto right_halo = AllOf(op::CollectivePermute(op::Slice(lhs)),
op::Shape("f32[128,2,224,3]"));
EXPECT_THAT(root,
AllOf(op::Convolution(
op::Select(op::And(),
op::Concatenate(left_halo, lhs, right_halo),
op::Broadcast()),
rhs),
op::Shape("f32[128,56,112,64]")));
}
TEST_P(SpmdPartitioningTest, ConvolutionLhsTiledRhsReplicatedNeedReshard) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,224,224,3] parameter(0)
%lhs.copy = f32[128,224,224,3] copy(f32[128,224,224,3] %lhs),
sharding={devices=[2,1,1,1]0,1}
%rhs = f32[7,7,3,64] parameter(1)
%rhs.copy = f32[7,7,3,64] copy(f32[7,7,3,64] %rhs),
sharding={replicated}
ROOT %conv = f32[128,112,112,64] convolution(
f32[128,224,224,3] %lhs.copy,
f32[7,7,3,64] %rhs.copy),
window={size=7x7 stride=2x2 pad=3_3x3_3},
dim_labels=b01f_01io->b01f,
sharding={devices=[1,2,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Reshape(), op::Constant(),
op::Constant(), op::Constant())),
op::Shape("f32[64,224,224,3]"));
auto all_to_all =
AllOf(op::AllToAll(op::Reshape(lhs)), op::Shape("f32[64,2,112,224,3]"));
auto reshard_lhs = AllOf(op::Reshape(op::Transpose(all_to_all)),
op::Shape("f32[128,112,224,3]"));
const auto rhs = AllOf(op::Copy(op::Parameter()), op::Shape("f32[7,7,3,64]"));
auto left_halo = AllOf(op::CollectivePermute(op::Slice(reshard_lhs)),
op::Shape("f32[128,3,224,3]"));
auto right_halo = AllOf(op::CollectivePermute(op::Slice(reshard_lhs)),
op::Shape("f32[128,2,224,3]"));
EXPECT_THAT(
root,
AllOf(op::Convolution(
op::Select(op::And(),
op::Concatenate(left_halo, reshard_lhs, right_halo),
op::Broadcast()),
rhs),
op::Shape("f32[128,56,112,64]")));
}
TEST_P(SpmdPartitioningTest, ConvolutionLhsTiledRhsReplicatedReordered) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[224,224,3,128] parameter(0)
%lhs.copy = f32[224,224,3,128] copy(%lhs), sharding={devices=[2,1,1,1]0,1}
%rhs = f32[7,7,3,64] parameter(1)
%rhs.copy = f32[7,7,3,64] copy(%rhs), sharding={replicated}
ROOT %conv = f32[128,112,112,64] convolution(%lhs.copy, %rhs.copy),
window={size=7x7 stride=2x2 pad=3_3x3_3},
dim_labels=01fb_01io->b01f,
sharding={devices=[1,2,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Reshape(), op::Constant(),
op::Constant(), op::Constant())),
op::Shape("f32[112,224,3,128]"));
const auto rhs = AllOf(op::Copy(op::Parameter()), op::Shape("f32[7,7,3,64]"));
auto left_halo = AllOf(op::CollectivePermute(op::Slice(lhs)),
op::Shape("f32[3,224,3,128]"));
auto right_halo = AllOf(op::CollectivePermute(op::Slice(lhs)),
op::Shape("f32[2,224,3,128]"));
EXPECT_THAT(root,
AllOf(op::Convolution(
op::Select(op::And(),
op::Concatenate(left_halo, lhs, right_halo),
op::Broadcast()),
rhs),
op::Shape("f32[128,56,112,64]")));
}
TEST_P(SpmdPartitioningTest,
ConvolutionBaseDilationSameStartPatternLhsTiledRhsReplicated) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,7,7,512] parameter(0)
%lhs.copy = f32[128,7,7,512] copy(%lhs),
sharding={devices=[1,2,1,1]0,1}
%rhs = f32[3,3,512,512] parameter(1)
%rhs.copy = f32[3,3,512,512] copy(%rhs),
sharding={replicated}
ROOT %conv = f32[128,4,4,512] convolution(%lhs.copy, %rhs.copy),
window={size=3x3 stride=4x4 pad=1_1x1_1 lhs_dilate=2x2 rhs_reversal=1x1},
dim_labels=b01f_01io->b01f,
sharding={devices=[1,2,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto sliced_lhs =
AllOf(op::Slice(op::Copy(op::DynamicSlice(
op::Pad(op::Parameter(), op::Constant()), op::Constant(),
op::Reshape(), op::Constant(), op::Constant()))),
op::Shape("f32[128,3,7,512]"));
const auto rhs =
AllOf(op::Copy(op::Parameter()), op::Shape("f32[3,3,512,512]"));
EXPECT_THAT(root, AllOf(op::Convolution(sliced_lhs, rhs),
op::Shape("f32[128,2,4,512]")));
EXPECT_EQ(root->window().dimensions(0).padding_low(), 1);
EXPECT_EQ(root->window().dimensions(0).padding_high(), 1);
}
TEST_P(SpmdPartitioningTest,
ConvolutionBaseDilationStride1LhsTiledRhsReplicated) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,7,7,512] parameter(0)
%lhs.copy = f32[128,7,7,512] copy(%lhs),
sharding={devices=[1,2,1,1]0,1}
%rhs = f32[3,3,512,512] parameter(1)
%rhs.copy = f32[3,3,512,512] copy(%rhs),
sharding={replicated}
ROOT %conv = f32[128,14,14,512] convolution(%lhs.copy, %rhs.copy),
window={size=3x3 pad=1_2x1_2 lhs_dilate=2x2 rhs_reversal=1x1},
dim_labels=b01f_01io->b01f,
sharding={devices=[1,2,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(), op::Constant()),
op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[128,4,7,512]"));
const auto rhs =
AllOf(op::Copy(op::Parameter()), op::Shape("f32[3,3,512,512]"));
auto left_halo = AllOf(op::CollectivePermute(op::Slice(lhs)),
op::Shape("f32[128,1,7,512]"));
auto start_window = op::Multiply(op::Reshape(), op::Constant());
auto start_input_element = op::Divide(start_window, op::Constant());
auto dynamic_offset_for_padded_concat = op::Subtract(
op::Constant(), op::Subtract(op::Multiply(op::Reshape(), op::Constant()),
start_input_element));
auto pre_masking =
AllOf(op::Shape("f32[128,5,7,512]"),
op::DynamicSlice(
AllOf(op::Shape("f32[128,6,7,512]"),
op::Pad(op::Concatenate(left_halo, lhs), op::Constant())),
op::Constant(), dynamic_offset_for_padded_concat,
op::Constant(), op::Constant()));
auto masked = op::Select(
op::Compare(op::Add(op::Iota(), op::Broadcast(start_input_element)),
op::Broadcast(op::Constant())),
pre_masking, op::Broadcast(op::Constant()));
auto dynamic_offset_on_output = op::Subtract(
start_window, op::Multiply(start_input_element, op::Constant()));
EXPECT_THAT(root,
AllOf(op::DynamicSlice(AllOf(op::Convolution(masked, rhs),
op::Shape("f32[128,8,14,512]")),
op::Constant(), dynamic_offset_on_output,
op::Constant(), op::Constant()),
op::Shape("f32[128,7,14,512]")));
EXPECT_EQ(root->operand(0)->window().dimensions(0).padding_low(), 1);
EXPECT_EQ(root->operand(0)->window().dimensions(0).padding_high(), 0);
}
TEST_P(SpmdPartitioningTest, SelectAndScatterNoOverlap) {
absl::string_view hlo_string = R"(
HloModule module
ge {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT compare = pred[] compare(a, b), direction=GE
}
sum {
c = f32[] parameter(0)
d = f32[] parameter(1)
ROOT add = f32[] add(c, d)
}
ENTRY entry {
%param = f32[11,4]{1,0} parameter(0)
%param.copy = f32[11,4] copy(%param), sharding={devices=[4,1]<=[4]}
constant = f32[4,2]{1,0} constant({{1,2},{3,4},{1,0},{2,8}}),
sharding={devices=[4,1]<=[4]}
constant.1 = f32[] constant(0), sharding={replicated}
ROOT select-and-scatter = f32[11,4]{1,0} select-and-scatter(param.copy,
constant, constant.1), window={size=3x2 stride=3x2 pad=0_1x0_0},
select=ge, scatter=sum, sharding={devices=[4,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto source =
AllOf(op::Shape("f32[1,2]{1,0}"),
op::DynamicSlice(op::Constant(), op::Reshape(), op::Constant()));
auto masked_data = AllOf(
op::Shape("f32[3,4]{1,0}"),
op::Select(
op::Compare(op::Add(op::Iota(), op::Broadcast(op::Multiply(
op::Reshape(), op::Constant()))),
op::Broadcast(op::Constant())),
op::Copy(op::DynamicSlice(op::Pad(op::Parameter(), op::Constant()),
op::Reshape(), op::Constant())),
op::Broadcast(op::Constant())));
EXPECT_THAT(root,
AllOf(op::SelectAndScatter(masked_data, source, op::Constant()),
op::Shape("f32[3,4]{1,0}")));
EXPECT_EQ(root->window().dimensions(0).padding_low(), 0);
EXPECT_EQ(root->window().dimensions(0).padding_high(), 0);
}
TEST_P(SpmdPartitioningTest, SelectAndScatterNoOverlapReshard) {
absl::string_view hlo_string = R"(
HloModule module
ge {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT compare = pred[] compare(a, b), direction=GE
}
sum {
c = f32[] parameter(0)
d = f32[] parameter(1)
ROOT add = f32[] add(c, d)
}
ENTRY entry {
%param = f32[11,4]{1,0} parameter(0)
%param.copy = f32[11,4] copy(%param),
sharding={devices=[1,4]<=[4]}
constant = f32[4,2]{1,0} constant({{1,2},{3,4},{1,0},{2,8}}),
sharding={devices=[4,1]<=[4]}
constant.1 = f32[] constant(0), sharding={replicated}
ROOT select-and-scatter = f32[11,4]{1,0} select-and-scatter(param.copy,
constant, constant.1), window={size=3x2 stride=3x2 pad=0_1x0_0},
select=ge, scatter=sum, sharding={devices=[4,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto source =
AllOf(op::Shape("f32[1,2]{1,0}"),
op::DynamicSlice(op::Constant(), op::Reshape(), op::Constant()));
auto operand = AllOf(op::Copy(op::DynamicSlice(
op::Parameter(0), op::Constant(), op::Reshape())),
op::Shape("f32[11,1]"));
auto reshard_operand = op::Reshape(op::Transpose(
op::AllToAll(op::Reshape(op::Pad(operand, op::Constant())))));
auto masked_data = AllOf(
op::Shape("f32[3,4]{1,0}"),
op::Select(
op::Compare(op::Add(op::Iota(), op::Broadcast(op::Multiply(
op::Reshape(), op::Constant()))),
op::Broadcast(op::Constant())),
reshard_operand, op::Broadcast(op::Constant())));
EXPECT_THAT(root,
AllOf(op::SelectAndScatter(masked_data, source, op::Constant()),
op::Shape("f32[3,4]{1,0}")));
EXPECT_EQ(root->window().dimensions(0).padding_low(), 0);
EXPECT_EQ(root->window().dimensions(0).padding_high(), 0);
}
TEST_P(SpmdPartitioningTest, SelectAndScatterWithOverlap) {
absl::string_view hlo_string = R"(
HloModule module
ge {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT compare = pred[] compare(a, b), direction=GE
}
sum {
c = f32[] parameter(0)
d = f32[] parameter(1)
ROOT add = f32[] add(c, d)
}
ENTRY entry {
%param = f32[11,4]{1,0} parameter(0)
%param.copy = f32[11,4] copy(%param),
sharding={devices=[4,1]<=[4]}
constant = f32[6,2]{1,0} constant({{1,2},{3,4},{1,0},{2,8},{6,6},{1,9}}),
sharding={devices=[4,1]<=[4]}
constant.1 = f32[] constant(0), sharding={replicated}
ROOT select-and-scatter = f32[11,4]{1,0} select-and-scatter(param.copy,
constant, constant.1), window={size=3x2 stride=2x2 pad=1_1x0_0},
select=ge, scatter=sum, sharding={devices=[4,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto source_shard =
AllOf(op::Shape("f32[2,2]{1,0}"),
op::DynamicSlice(op::Pad(), op::Reshape(), op::Constant()));
auto source_left_halo = op::CollectivePermute(source_shard);
auto required_source_shard_start =
op::Divide(op::Multiply(op::Reshape(), op::Constant()), op::Constant());
auto source_with_halo = op::DynamicSlice(
AllOf(op::Shape("f32[5,2]{1,0}"),
op::Pad(op::Concatenate(source_left_halo, source_shard),
op::Constant())),
op::Subtract(op::Constant(),
op::Subtract(op::Multiply(op::Reshape(), op::Constant()),
required_source_shard_start)),
op::Constant());
auto masked_source_with_halo = AllOf(
AllOf(op::Shape("f32[3,2]{1,0}")),
op::Select(
op::Compare(
op::Add(op::Iota(), op::Broadcast(required_source_shard_start)),
op::Broadcast(op::Constant())),
source_with_halo, op::Broadcast(op::Constant())));
auto data_shard =
AllOf(op::Shape("f32[3,4]{1,0}"),
op::Copy(op::DynamicSlice(op::Pad(op::Parameter(), op::Constant()),
op::Reshape(), op::Constant())));
auto data_left_halo = AllOf(op::Shape("f32[2,4]{1,0}"),
op::CollectivePermute(op::Slice(data_shard)));
auto data_right_halo = AllOf(op::Shape("f32[2,4]{1,0}"),
op::CollectivePermute(op::Slice(data_shard)));
auto required_data_start_on_padded =
op::Multiply(required_source_shard_start, op::Constant());
auto left_halo_size = op::Subtract(
op::Add(op::Multiply(op::Reshape(), op::Constant()), op::Constant()),
required_data_start_on_padded);
auto data_with_halo =
AllOf(op::Shape("f32[7,4]{1,0}"),
op::DynamicSlice(
AllOf(op::Shape("f32[8,4]{1,0}"),
op::Pad(op::Concatenate(data_left_halo, data_shard,
data_right_halo),
op::Constant())),
op::Subtract(op::Constant(), left_halo_size), op::Constant()));
auto index_on_padded =
op::Add(op::Iota(), op::Broadcast(required_data_start_on_padded));
auto masked_data_with_halo = op::Select(
op::And(op::Compare(index_on_padded, op::Broadcast(op::Constant())),
op::Compare(index_on_padded, op::Broadcast(op::Constant()))),
data_with_halo, op::Broadcast(op::Constant()));
EXPECT_THAT(
root, AllOf(op::DynamicSlice(op::SelectAndScatter(masked_data_with_halo,
masked_source_with_halo,
op::Constant()),
left_halo_size, op::Constant()),
op::Shape("f32[3,4]{1,0}")));
EXPECT_EQ(root->operand(0)->window().dimensions(0).padding_low(), 0);
EXPECT_EQ(root->operand(0)->window().dimensions(0).padding_high(), 0);
}
TEST_P(SpmdPartitioningTest, ConvolutionLhsTiledRhsTiled) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,56,56,64] parameter(0)
%lhs.copy = f32[128,56,56,64] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[128,56,56,256] parameter(1)
%rhs.copy = f32[128,56,56,256] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
ROOT %conv = f32[1,1,64,256] convolution(%lhs.copy, %rhs.copy),
window={size=56x56}, dim_labels=f01b_i01o->01bf, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[128,28,56,64]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[128,28,56,256]"));
EXPECT_THAT(root, AllOf(op::AllReduce(op::Convolution(lhs, rhs)),
op::Shape("f32[1,1,64,256]")));
}
TEST_P(SpmdPartitioningTest,
ConvolutionLhsTiledRhsTiledWhenV2ShardingGeneratesReplicaGroupV2) {
if (GetParam() != ShardingFormatPicker::ShardingType::kBestEffortV2) {
GTEST_SKIP() << "This test only runs when input sharding is in V2 format.";
}
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,56,56,64] parameter(0)
%lhs.copy = f32[128,56,56,64] copy(%lhs), sharding={devices=[1,8,1,1]<=[8]}
%rhs = f32[128,56,56,256] parameter(1)
%rhs.copy = f32[128,56,56,256] copy(%rhs), sharding={devices=[1,8,1,1]<=[8]}
ROOT %conv = f32[1,1,64,256] convolution(%lhs.copy, %rhs.copy),
window={size=56x56}, dim_labels=f01b_i01o->01bf, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto all_reduce_instruction =
std::find_if(module->entry_computation()->instructions().begin(),
module->entry_computation()->instructions().end(),
HloPredicateIsOp<HloOpcode::kAllReduce>);
EXPECT_NE(all_reduce_instruction,
module->entry_computation()->instructions().end());
EXPECT_TRUE((*all_reduce_instruction)
->device_list()
.iota_replica_group_list()
.has_value());
IotaReplicaGroupList list = (*all_reduce_instruction)
->device_list()
.iota_replica_group_list()
.value();
EXPECT_EQ(list.num_replica_groups(), 1);
EXPECT_EQ(list.num_devices_per_group(), 8);
EXPECT_THAT(list.reshape_dims(), ::testing::ElementsAre(8));
EXPECT_THAT(list.transpose_perm(), ::testing::ElementsAre(0));
}
TEST_P(SpmdPartitioningTest, ConvolutionLhsTiledRhsTiledWindowReversal) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[5,128,64] parameter(0), sharding={devices=[2,1,1]0,1}
%rhs = f32[5,128,256] parameter(1), sharding={devices=[2,1,1]1,0}
ROOT %conv = f32[1,64,256] convolution(%lhs, %rhs),
window={size=5 rhs_reversal=1}, dim_labels=0fb_0io->0bf,
sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto lhs_masked =
AllOf(op::Shape("f32[3,128,64]"), op::Select(_, op::Parameter(0), _));
const auto rhs_left_padded =
op::Concatenate(op::CollectivePermute(op::Slice(op::Parameter(1))),
op::Slice(op::Parameter(1)));
const auto rhs_masked =
AllOf(op::Shape("f32[3,128,256]"), op::Select(_, rhs_left_padded, _));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::AllReduce(op::Convolution(lhs_masked, rhs_masked)),
op::Shape("f32[1,64,256]")));
}
TEST_P(SpmdPartitioningTest, DotLhsTiledRhsTiledWithReshard) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,56,56,64] parameter(0)
%lhs.copy = f32[128,56,56,64] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[128,56,56,256] parameter(1)
%rhs.copy = f32[128,56,56,256] copy(%rhs), sharding={devices=[2,1,1,1]0,1}
ROOT %conv = f32[1,1,64,256] convolution(%lhs.copy, %rhs.copy),
window={size=56x56}, dim_labels=f01b_i01o->01bf, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[128,28,56,64]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Reshape(), op::Constant(),
op::Constant(), op::Constant())),
op::Shape("f32[64,56,56,256]"));
auto all_to_all =
AllOf(op::AllToAll(op::Reshape(lhs)), op::Shape("f32[2,64,28,56,64]"));
auto reshard = AllOf(op::Reshape(op::Transpose(all_to_all)));
EXPECT_THAT(root, AllOf(op::AllReduce(op::Convolution(reshard, rhs)),
op::Shape("f32[1,1,64,256]")));
}
TEST_P(SpmdPartitioningTest, ConvolutionLhsTiledRhsTiledWithReshard) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,56,56,512] parameter(0)
%lhs.copy = f32[128,56,56,512] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[128,28,28,64] parameter(1)
%rhs.copy = f32[128,28,28,64] copy(%rhs), sharding={devices=[2,1,1,1]0,1}
ROOT %conv = f32[1,1,512,64] convolution(%lhs.copy, %rhs.copy),
window={size=28x28 pad=0_-1x0_-1 rhs_dilate=2x2},
dim_labels=f01b_i01o->01bf, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[128,28,56,512]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Reshape(), op::Constant(),
op::Constant(), op::Constant())),
op::Shape("f32[64,28,28,64]"));
auto all_to_all =
AllOf(op::AllToAll(op::Reshape(rhs)), op::Shape("f32[64,2,14,28,64]"));
auto reshard = op::Reshape(op::Transpose(all_to_all));
EXPECT_THAT(root,
AllOf(op::AllReduce(op::Convolution(op::Slice(lhs), reshard)),
op::Shape("f32[1,1,512,64]")));
}
TEST_P(SpmdPartitioningTest,
ConvolutionLhsTiledRhsTiled_UnevenDilatedRHSPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[8,28,28,8] parameter(0)
%lhs.copy = f32[8,28,28,8] copy(%lhs), sharding={devices=[1,4,1,1]<=[4]}
%rhs = f32[8,14,14,64] parameter(1)
%rhs.copy = f32[8,14,14,64] copy(%rhs), sharding={devices=[1,4,1,1]<=[4]}
ROOT %conv = f32[1,1,8,64] convolution(%lhs.copy, %rhs.copy),
window={size=14x14 pad=0_-1x0_-1 rhs_dilate=2x2},
dim_labels=f01b_i01o->01bf, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[8,7,28,8]"));
const auto rhs = AllOf(op::Pad(op::Parameter(), op::Constant()),
op::Shape("f32[8,16,14,64]"));
auto selected_rhs = AllOf(
op::Select(op::Compare(),
op::Copy(op::DynamicSlice(rhs, op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Broadcast()),
op::Shape("f32[8,4,14,64]"));
auto right_halo =
AllOf(op::CollectivePermute(op::Slice(lhs)), op::Shape("f32[8,2,28,8]"));
auto selected_lhs =
AllOf(op::DynamicSlice(
op::Pad(op::Concatenate(lhs, right_halo), op::Constant()),
op::Constant(), op::Reshape(), op::Constant(), op::Constant()),
op::Shape("f32[8,7,28,8]"));
EXPECT_THAT(root,
AllOf(op::AllReduce(op::Convolution(selected_lhs, selected_rhs)),
op::Shape("f32[1,1,8,64]")));
}
TEST_P(SpmdPartitioningTest, ConvolutionLhsTiledRhsTiledWithPadding) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,28,28,128] parameter(0)
%lhs.copy = f32[32,28,28,128] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[32,28,28,64] parameter(1)
%rhs.copy = f32[32,28,28,64] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
ROOT %conv = f32[3,3,128,64] convolution(%lhs.copy, %rhs.copy),
window={size=28x28 pad=1_1x1_1}, dim_labels=f01b_i01o->01bf, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 2,
false));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,14,28,128]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,14,28,64]"));
auto left_halo = AllOf(op::CollectivePermute(op::Slice(rhs)),
op::Shape("f32[32,1,28,64]"));
auto right_halo = AllOf(op::CollectivePermute(op::Slice(rhs)),
op::Shape("f32[32,1,28,64]"));
EXPECT_THAT(root,
AllOf(op::AllReduce(op::Convolution(
lhs, AllOf(op::Concatenate(left_halo, rhs, right_halo),
op::Shape("f32[32,16,28,64]")))),
op::Shape("f32[3,3,128,64]")));
}
TEST_P(SpmdPartitioningTest, ConvolutionLhsTiledRhsTiledWindowDilate) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,224,224,3] parameter(0)
%lhs.copy = f32[128,224,224,3] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[128,112,112,64] parameter(1)
%rhs.copy = f32[128,112,112,64] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
ROOT %conv = f32[7,7,3,64] convolution(%lhs.copy, %rhs.copy),
window={size=112x112 pad=3_2x3_2 rhs_dilate=2x2}, dim_labels=f01b_i01o->01bf, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 2,
false));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[128,112,224,3]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[128,56,112,64]"));
auto left_halo = AllOf(op::CollectivePermute(op::Slice(rhs)),
op::Shape("f32[128,2,112,64]"));
auto right_halo = AllOf(op::CollectivePermute(op::Slice(rhs)),
op::Shape("f32[128,2,112,64]"));
EXPECT_THAT(root,
AllOf(op::AllReduce(op::Convolution(
lhs, AllOf(op::Concatenate(left_halo, rhs, right_halo),
op::Shape("f32[128,60,112,64]")))),
op::Shape("f32[7,7,3,64]")));
}
TEST_P(SpmdPartitioningTest,
ConvolutionLhsTiledRhsTiledWindowDilateNegativeRhsPadding) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,56,56,256] parameter(0)
%lhs.copy = f32[128,56,56,256] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[128,28,28,512] parameter(1)
%rhs.copy = f32[128,28,28,512] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
ROOT %conv = f32[1,1,256,512] convolution(%lhs.copy, %rhs.copy),
window={size=28x28 pad=0_-1x0_-1 rhs_dilate=2x2}, dim_labels=f01b_i01o->01bf, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 2,
false));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[128,28,56,256]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[128,14,28,512]"));
EXPECT_THAT(root, AllOf(op::AllReduce(op::Convolution(lhs, rhs)),
op::Shape("f32[1,1,256,512]")));
}
TEST_P(SpmdPartitioningTest, ConvolutionLhsTiledRhsTiledWindowDilateUneven) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,14,14,512] parameter(0)
%lhs.copy = f32[128,14,14,512] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[128,7,7,512] parameter(1)
%rhs.copy = f32[128,7,7,512] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
ROOT %conv = f32[3,3,512,512] convolution(%lhs.copy, %rhs.copy),
window={size=7x7 pad=1_0x1_0 rhs_dilate=2x2}, dim_labels=f01b_i01o->01bf, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 2,
false));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[128,7,14,512]"));
const auto rhs = AllOf(
op::Select(op::Compare(),
op::Copy(op::DynamicSlice(
op::Pad(op::Parameter(), op::Constant()), op::Constant(),
op::Reshape(), op::Constant(), op::Constant())),
op::Broadcast()),
op::Shape("f32[128,4,7,512]"));
auto left_halo = AllOf(op::CollectivePermute(op::Slice(rhs)),
op::Shape("f32[128,1,7,512]"));
EXPECT_THAT(root,
AllOf(op::AllReduce(op::Convolution(
AllOf(op::DynamicSlice(op::Pad(lhs, op::Constant()),
op::Constant(), op::Subtract(),
op::Constant(), op::Constant()),
op::Shape("f32[128,10,14,512]")),
AllOf(op::Concatenate(left_halo, rhs),
op::Shape("f32[128,5,7,512]")))),
op::Shape("f32[3,3,512,512]")));
}
TEST_P(SpmdPartitioningTest, ConvolutionLhsTiledRhsTiledWithPadding_HaloOnLhs) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,28,28,128] parameter(0)
%lhs.copy = f32[32,28,28,128] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[32,28,28,64] parameter(1)
%rhs.copy = f32[32,28,28,64] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
ROOT %conv = f32[3,3,128,64] convolution(%lhs.copy, %rhs.copy),
window={size=28x28 pad=1_1x1_1}, dim_labels=f01b_i01o->01bf, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,14,28,128]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,14,28,64]"));
auto left_halo = AllOf(op::CollectivePermute(op::Slice(lhs)),
op::Shape("f32[32,1,28,128]"));
auto right_halo = AllOf(op::CollectivePermute(op::Slice(lhs)),
op::Shape("f32[32,1,28,128]"));
EXPECT_THAT(root, AllOf(op::AllReduce(op::Convolution(
AllOf(op::Concatenate(left_halo, lhs, right_halo),
op::Shape("f32[32,16,28,128]")),
rhs)),
op::Shape("f32[3,3,128,64]")));
}
TEST_P(SpmdPartitioningTest,
ConvolutionLhsTiledRhsTiledWindowDilate_HaloOnLhs) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,224,224,3] parameter(0)
%lhs.copy = f32[128,224,224,3] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[128,112,112,64] parameter(1)
%rhs.copy = f32[128,112,112,64] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
ROOT %conv = f32[7,7,3,64] convolution(%lhs.copy, %rhs.copy),
window={size=112x112 pad=3_2x3_2 rhs_dilate=2x2}, dim_labels=f01b_i01o->01bf, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[128,112,224,3]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[128,56,112,64]"));
auto left_halo = AllOf(op::CollectivePermute(op::Slice(lhs)),
op::Shape("f32[128,3,224,3]"));
auto right_halo = AllOf(op::CollectivePermute(op::Slice(lhs)),
op::Shape("f32[128,2,224,3]"));
EXPECT_THAT(root, AllOf(op::AllReduce(op::Convolution(
AllOf(op::Concatenate(left_halo, lhs, right_halo),
op::Shape("f32[128,117,224,3]")),
rhs)),
op::Shape("f32[7,7,3,64]")));
}
TEST_P(SpmdPartitioningTest,
ConvolutionLhsTiledRhsTiledWindowDilateNegativeRhsPadding_HaloOnLhs) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,56,56,256] parameter(0)
%lhs.copy = f32[128,56,56,256] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[128,28,28,512] parameter(1)
%rhs.copy = f32[128,28,28,512] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
ROOT %conv = f32[1,1,256,512] convolution(%lhs.copy, %rhs.copy),
window={size=28x28 pad=0_-1x0_-1 rhs_dilate=2x2}, dim_labels=f01b_i01o->01bf, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[128,28,56,256]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[128,14,28,512]"));
EXPECT_THAT(root, AllOf(op::AllReduce(op::Convolution(op::Slice(lhs), rhs)),
op::Shape("f32[1,1,256,512]")));
}
TEST_P(SpmdPartitioningTest,
ConvolutionLhsTiledRhsTiledWindowDilateUneven_HaloOnLhs) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,14,14,512] parameter(0)
%lhs.copy = f32[128,14,14,512] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[128,7,7,512] parameter(1)
%rhs.copy = f32[128,7,7,512] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
ROOT %conv = f32[3,3,512,512] convolution(%lhs.copy, %rhs.copy),
window={size=7x7 pad=1_0x1_0 rhs_dilate=2x2}, dim_labels=f01b_i01o->01bf, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[128,7,14,512]"));
const auto rhs = AllOf(
op::Select(op::Compare(),
op::Copy(op::DynamicSlice(
op::Pad(op::Parameter(), op::Constant()), op::Constant(),
op::Reshape(), op::Constant(), op::Constant())),
op::Broadcast()),
op::Shape("f32[128,4,7,512]"));
auto right_halo = AllOf(op::CollectivePermute(op::Slice(lhs)),
op::Shape("f32[128,1,14,512]"));
EXPECT_THAT(
root, AllOf(op::AllReduce(op::Convolution(
AllOf(op::DynamicSlice(
AllOf(op::Pad(op::Concatenate(lhs, right_halo),
op::Constant()),
op::Shape("f32[128,10,14,512]")),
op::Constant(), op::Reshape(), op::Constant(),
op::Constant()),
op::Shape("f32[128,9,14,512]")),
rhs)),
op::Shape("f32[3,3,512,512]")));
}
TEST_P(SpmdPartitioningTest, ConcatenateAlongNonPartitionedDimension) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[14,257] parameter(0)
%param0.copy = f32[14,257] copy(%param0), sharding={devices=[2,1]0,1}
%param1 = f32[14,116] parameter(1)
%param1.copy = f32[14,116] copy(%param1), sharding={devices=[2,1]0,1}
ROOT %concatenate = f32[14,373] concatenate(%param0.copy, %param1.copy),
dimensions={1}, sharding={devices=[2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Copy(op::DynamicSlice(op::Parameter(), op::Reshape(),
op::Constant())),
op::Shape("f32[7,257]"));
auto param1 = AllOf(op::Copy(op::DynamicSlice(op::Parameter(), op::Reshape(),
op::Constant())),
op::Shape("f32[7,116]"));
EXPECT_THAT(root,
AllOf(op::Concatenate(param0, param1), op::Shape("f32[7,373]")));
}
TEST_P(SpmdPartitioningTest, ConcatenateAlongPartitionedDimension) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[14,257] parameter(0)
%param0.copy = f32[14,257] copy(%param0), sharding={devices=[1,2]0,1}
%param1 = f32[14,116] parameter(1)
%param1.copy = f32[14,116] copy(%param1), sharding={devices=[1,2]0,1}
ROOT %concatenate = f32[14,373] concatenate(%param0.copy, %param1.copy),
dimensions={1}, sharding={devices=[1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(), op::Constant()),
op::Constant(), op::Reshape())),
op::Shape("f32[14,129]"));
auto param0_adjusted =
AllOf(op::Select(op::Compare(op::Add(), op::Broadcast(op::Constant())),
param0, op::Broadcast(op::Constant())),
op::Shape("f32[14,129]"));
auto param1 = AllOf(op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(),
op::Reshape())),
op::Shape("f32[14,58]"));
EXPECT_THAT(root, AllOf(op::DynamicSlice(
AllOf(op::AllReduce(op::DynamicUpdateSlice(
op::DynamicUpdateSlice(
op::Broadcast(), param0_adjusted,
op::Constant(), op::Multiply()),
param1, op::Constant(), op::Add())),
op::Shape("f32[14,374]")),
op::Constant(), op::Multiply()),
op::Shape("f32[14,187]")));
}
TEST_P(SpmdPartitioningTest, ConcatenateAlongBothDimensions) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[14,257] parameter(0), sharding={devices=[2,2]<=[4]}
%param1 = f32[14,116] parameter(1), sharding={devices=[2,2]<=[4]}
ROOT %concatenate = f32[14,373] concatenate(%param0, %param1),
dimensions={1}, sharding={devices=[2,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(0), op::Shape("f32[7,129]"));
auto param0_adjusted =
AllOf(op::Select(op::Compare(op::Add(), op::Broadcast(op::Constant())),
param0, op::Broadcast(op::Constant())),
op::Shape("f32[7,129]"));
auto param1 = AllOf(op::Parameter(1), op::Shape("f32[7,58]"));
EXPECT_THAT(root, AllOf(op::DynamicSlice(
AllOf(op::AllReduce(op::DynamicUpdateSlice(
op::DynamicUpdateSlice(
op::Broadcast(), param0_adjusted,
op::Constant(), op::Multiply()),
param1, op::Constant(), op::Add())),
op::Shape("f32[7,374]")),
op::Constant(), op::Multiply()),
op::Shape("f32[7,187]")));
}
TEST_P(SpmdPartitioningTest, PadAlongNonPartitionedDimension) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[128,14,257] parameter(0), sharding={devices=[1,1,2]0,1}
%const = f32[] constant(0)
ROOT %pad = f32[128,17,257] pad(%param0, %const), padding=0_0x1_2x0_0,
sharding={devices=[1,1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(), op::Shape("f32[128,14,129]"));
EXPECT_THAT(root, AllOf(op::Pad(param0, op::Constant()),
op::Shape("f32[128,17,129]")));
}
TEST_P(SpmdPartitioningTest, PadAlongNonPartitionedDimensionReshard) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[128,14,257] parameter(0), sharding={replicated}
%const = f32[] constant(0)
ROOT %pad = f32[128,17,257] pad(%param0, %const), padding=0_0x1_2x0_0,
sharding={devices=[1,1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(), op::Shape("f32[128,14,257]"));
auto operand = op::DynamicSlice(op::Pad(param0, _), op::Constant(),
op::Constant(), op::Multiply());
EXPECT_THAT(root, AllOf(op::Pad(operand, op::Constant()),
op::Shape("f32[128,17,129]")));
}
TEST_P(SpmdPartitioningTest, PadAlongPartitionedDimension) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[14,257] parameter(0), sharding={devices=[1,2]0,1}
%const = f32[] constant(0)
ROOT %pad = f32[14,259] pad(%param0, %const), padding=0_0x0_2,
sharding={devices=[1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(), op::Shape("f32[14,129]"));
auto after_halo_exchange =
AllOf(op::Shape("f32[14,130]"),
op::Concatenate(param0, op::CollectivePermute(op::Slice(param0))));
auto pad = AllOf(op::Shape("f32[14,131]"),
op::Pad(after_halo_exchange, op::Constant()));
EXPECT_THAT(root, op::Select(_, op::DynamicSlice(pad, op::Constant(), _), _));
}
TEST_P(SpmdPartitioningTest, PadAlongPartitionedDimensionWithInteriorPadding) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[7] parameter(0), sharding={devices=[2]0,1}
%param1 = f32[] parameter(1), sharding={replicated}
ROOT %pad = f32[22] pad(%param0, %param1), padding=2_1_2,
sharding={devices=[2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(), op::Shape("f32[4]"));
auto after_halo_exchange = AllOf(
op::Shape("f32[4]"),
op::DynamicSlice(
AllOf(op::Shape("f32[5]"),
op::Pad(AllOf(op::Shape("f32[4]"),
op::Concatenate(
op::CollectivePermute(op::Slice(param0)),
op::Slice(param0))),
op::Parameter(1))),
_));
auto pad = op::Pad(after_halo_exchange, op::Parameter(1));
EXPECT_THAT(root, op::DynamicSlice(pad, _));
}
TEST_P(SpmdPartitioningTest, PartialReplicatePad) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[11,7] parameter(0),
sharding={devices=[1,2,2]<=[4] last_tile_dim_replicate}
%param1 = f32[] parameter(1), sharding={replicated}
ROOT %pad = f32[27,22] pad(%param0, %param1), padding=2_4_1x2_1_2,
sharding={devices=[1,2,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(), op::Shape("f32[11,4]"));
auto after_halo_exchange = AllOf(
op::Shape("f32[11,4]"),
op::DynamicSlice(
AllOf(op::Shape("f32[11,5]"),
op::Pad(AllOf(op::Shape("f32[11,4]"),
op::Concatenate(
op::CollectivePermute(op::Slice(param0)),
op::Slice(param0))),
op::Parameter(1))),
op::Constant(), _));
auto pad = op::Pad(after_halo_exchange, op::Parameter(1));
EXPECT_THAT(root, AllOf(op::DynamicSlice(pad, op::Constant(), _),
op::Shape("f32[27,11]")));
}
TEST_P(SpmdPartitioningTest, SliceAlongNonPartitionedDimension) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[128,14,257] parameter(0)
%param0.copy = f32[128,14,257] copy(%param0), sharding={devices=[1,1,2]0,1}
ROOT %slice = f32[128,11,257] slice(%param0.copy),
slice={[0:128:1], [2:13:1], [0:257:1]}, sharding={devices=[1,1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(
op::Copy(op::DynamicSlice(op::Pad(op::Parameter(), op::Constant()),
op::Constant(), op::Constant(), op::Reshape())),
op::Shape("f32[128,14,129]"));
EXPECT_THAT(root, AllOf(op::Slice(param0), op::Shape("f32[128,11,129]")));
}
TEST_P(SpmdPartitioningTest, SliceAlongPartitionedDimension) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[128,14,257] parameter(0), sharding={devices=[1,1,2]0,1}
ROOT %slice = f32[63,14,251] slice(%param0),
slice={[2:128:2], [0:14:1], [5:256:1]}, sharding={devices=[1,1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(0), op::Shape("f32[128,14,129]"));
EXPECT_THAT(
root,
AllOf(op::Slice(AllOf(
op::DynamicSlice(
AllOf(op::Concatenate(
op::Slice(param0),
AllOf(op::CollectivePermute(op::Slice(param0)),
op::Shape("f32[128,14,2]"))),
op::Shape("f32[128,14,129]")),
op::Constant(), op::Constant(), op::Add()),
op::Shape("f32[128,14,126]"))),
op::Shape("f32[63,14,126]")));
}
TEST_P(SpmdPartitioningTest, SliceAlongPartitionedDimension2) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[4] parameter(0), sharding={devices=[4]<=[4]}
ROOT %slice = f32[1] slice(%param0),
slice={[3:4]}, sharding={devices=[4]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(0), op::Shape("f32[1]"));
EXPECT_THAT(root, AllOf(op::Copy(op::CollectivePermute(param0)),
op::Shape("f32[1]")));
}
TEST_P(SpmdPartitioningTest, MergedPadThenSliceShiftRight) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[4] parameter(0), sharding={devices=[4]<=[4]}
%init = f32[] constant(2.0)
%pad = f32[5] pad(%param0, %init), padding=1_0, sharding={devices=[4]<=[4]}
%copy = f32[5] copy(%pad), sharding={devices=[4]<=[4]}
%copy.1 = f32[5] copy(%copy), sharding={devices=[4]<=[4]}
ROOT %slice = f32[4] slice(%copy.1), slice={[0:4]}, sharding={devices=[4]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(0), op::Shape("f32[1]"));
EXPECT_THAT(root, AllOf(op::Select(_, op::CollectivePermute(param0), _),
op::Shape("f32[1]")));
}
TEST_P(SpmdPartitioningTest, MergedPadThenSliceShiftRightNoMasking) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[4] parameter(0), sharding={devices=[4]<=[4]}
%init = f32[] constant(0)
%pad = f32[5] pad(%param0, %init), padding=1_0, sharding={devices=[4]<=[4]}
%copy = f32[5] copy(%pad), sharding={devices=[4]<=[4]}
%copy.1 = f32[5] copy(%copy), sharding={devices=[4]<=[4]}
ROOT %slice = f32[4] slice(%copy.1), slice={[0:4]}, sharding={devices=[4]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(0), op::Shape("f32[1]"));
EXPECT_THAT(root, AllOf(op::CollectivePermute(param0), op::Shape("f32[1]")));
}
TEST_P(SpmdPartitioningTest, MergedSliceThenConcatRotateRight) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[12] parameter(0), sharding={devices=[4]<=[4]}
%slice0 = f32[2] slice(%param0), slice={[10:12]}, sharding={devices=[4]<=[4]}
%slice1 = f32[10] slice(%param0), slice={[0:10]}, sharding={devices=[4]<=[4]}
ROOT %concat = f32[12] concatenate(%slice0, %slice1), dimensions={0},
sharding={devices=[4]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(0), op::Shape("f32[3]"));
auto rotate = op::Concatenate(op::CollectivePermute(op::Slice(param0)),
op::Slice(param0));
EXPECT_THAT(root, AllOf(rotate, op::Shape("f32[3]")));
}
TEST_P(SpmdPartitioningTest,
MergedSliceThenConcatRotateRightWithAlignedPadding) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[6] parameter(0), sharding={devices=[4]<=[4]}
%slice0 = f32[2] slice(%param0), slice={[4:6]}, sharding={devices=[4]<=[4]}
%slice1 = f32[4] slice(%param0), slice={[0:4]}, sharding={devices=[4]<=[4]}
ROOT %concat = f32[6] concatenate(%slice0, %slice1), dimensions={0},
sharding={devices=[4]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(0), op::Shape("f32[2]"));
EXPECT_THAT(root, op::CollectivePermute(param0));
}
TEST_P(SpmdPartitioningTest,
MergedSliceThenConcatRotateRightWithUnalignedPadding) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[10] parameter(0), sharding={devices=[4]<=[4]}
%slice0 = f32[6] slice(%param0), slice={[4:10]}, sharding={devices=[4]<=[4]}
%slice1 = f32[4] slice(%param0), slice={[0:4]}, sharding={devices=[4]<=[4]}
ROOT %concat = f32[10] concatenate(%slice0, %slice1), dimensions={0},
sharding={devices=[4]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(0), op::Shape("f32[3]"));
auto rotate0 = op::CollectivePermute(param0);
auto rotate1 = op::Concatenate(op::CollectivePermute(op::Slice(param0)),
op::CollectivePermute(op::Slice(param0)));
EXPECT_THAT(root,
AllOf(op::Select(_, rotate1, rotate0), op::Shape("f32[3]")));
}
TEST_P(SpmdPartitioningTest,
PartialReplicateSliceAlongNonPartitionedDimension) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[128,14,257] parameter(0), sharding={devices=[1,1,2,2]<=[4] last_tile_dim_replicate}
ROOT %slice = f32[128,11,257] slice(%param0),
slice={[0:128:1], [2:13:1], [0:257:1]}, sharding={devices=[1,1,2,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(), op::Shape("f32[128,14,129]"));
EXPECT_THAT(root, AllOf(op::Slice(param0), op::Shape("f32[128,11,129]")));
}
TEST_P(SpmdPartitioningTest, PartialReplicateSliceAlongPartitionedDimension) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[128,14,257] parameter(0), sharding={devices=[1,1,2,2]<=[4] last_tile_dim_replicate}
ROOT %slice = f32[63,14,251] slice(%param0),
slice={[2:128:2], [0:14:1], [5:256:1]}, sharding={devices=[1,1,2,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(), op::Shape("f32[128,14,129]"));
EXPECT_THAT(
root,
AllOf(
op::Slice(AllOf(
op::DynamicSlice(
AllOf(op::Concatenate(
op::Slice(param0),
AllOf(op::CollectivePermute(op::Slice(param0)),
op::Shape("f32[128,14,2]"))),
op::Shape("f32[128,14,129]")),
op::Constant(), op::Constant(),
op::Add(op::Multiply(op::Reshape(op::DynamicSlice(
op::Constant(), op::PartitionId())),
op::Constant()),
op::Constant())),
op::Shape("f32[128,14,126]"))),
op::Shape("f32[63,14,126]")));
}
TEST_P(SpmdPartitioningTest, DeviceMaximalTupleSort) {
absl::string_view hlo_string = R"(
HloModule module
ge {
p.0 = f32[] parameter(0)
p.1 = f32[] parameter(1)
p.2 = s32[] parameter(2)
p.3 = s32[] parameter(3)
ROOT compare = pred[] compare(p.0, p.1), direction=GT
}
ENTRY %main {
%p.0 = f32[3]{0} parameter(0), sharding={maximal device=0}
%iota = s32[3]{0} iota(), iota_dimension=0, sharding={maximal device=0}
ROOT %sort = (f32[3]{0}, s32[3]{0}) sort(p.0, iota), dimensions={0},
to_apply=ge, sharding={{maximal device=0}, {maximal device=0}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Sort(op::Parameter(0), op::Iota()),
op::Shape("(f32[3], s32[3])")));
}
TEST_P(SpmdPartitioningTest, SortAlongNonPartitionedDimension) {
absl::string_view hlo_string = R"(
HloModule module
ge {
p.0.lhs.1247 = f32[]{:T(256)} parameter(0), sharding={replicated}
bitcast-convert = s32[]{:T(256)} bitcast-convert(p.0.lhs.1247), sharding={replicated}
constant = s32[]{:T(256)} constant(0), sharding={replicated}
compare = pred[]{:T(256)} compare(bitcast-convert, constant), direction=LT, sharding={replicated}
constant.1 = u32[]{:T(256)} constant(2147483647), sharding={replicated}
bitcast-convert.1 = u32[]{:T(256)} bitcast-convert(p.0.lhs.1247), sharding={replicated}
subtract = u32[]{:T(256)} subtract(constant.1, bitcast-convert.1), sharding={replicated}
bitcast-convert.2 = s32[]{:T(256)} bitcast-convert(subtract), sharding={replicated}
select = s32[]{:T(256)} select(compare, bitcast-convert.2, bitcast-convert), sharding={replicated}
p.0.rhs.1248 = f32[]{:T(256)} parameter(1), sharding={replicated}
bitcast-convert.3 = s32[]{:T(256)} bitcast-convert(p.0.rhs.1248), sharding={replicated}
compare.1 = pred[]{:T(256)} compare(bitcast-convert.3, constant), direction=LT, sharding={replicated}
bitcast-convert.4 = u32[]{:T(256)} bitcast-convert(p.0.rhs.1248), sharding={replicated}
subtract.1 = u32[]{:T(256)} subtract(constant.1, bitcast-convert.4), sharding={replicated}
bitcast-convert.5 = s32[]{:T(256)} bitcast-convert(subtract.1), sharding={replicated}
select.1 = s32[]{:T(256)} select(compare.1, bitcast-convert.5, bitcast-convert.3), sharding={replicated}
compare.2 = pred[]{:T(256)} compare(select, select.1), direction=GT, sharding={replicated}
compare.258 = pred[]{:T(256)} compare(select.1, select), direction=GT, sharding={replicated}
compare.259 = pred[]{:T(256)} compare(compare.2, compare.258), direction=EQ, sharding={replicated}
p.1.lhs.1249 = s32[]{:T(256)} parameter(2), sharding={replicated}
p.1.rhs.1250 = s32[]{:T(256)} parameter(3), sharding={replicated}
compare.260 = pred[]{:T(256)} compare(p.1.lhs.1249, p.1.rhs.1250), direction=LT, sharding={replicated}
ROOT select.86 = pred[]{:T(256)} select(compare.259, compare.260, compare.2), sharding={replicated}
}
ENTRY entry {
%param0 = f32[128,14,257] parameter(0)
%param0.copy = f32[128,14,257] copy(%param0), sharding={devices=[1,2,1]0,1}
%param1 = s32[128,14,257] parameter(1)
%param1.copy = s32[128,14,257] copy(%param1), sharding={devices=[1,2,1]0,1}
ROOT %sort.6 = (f32[128,14,257]{2,1,0:T(8,128)}, s32[128,14,257]{2,1,0:T(8,128)})
sort(%param0.copy, %param1.copy), dimensions={2}, is_stable=true,
to_apply=%ge, sharding={{devices=[1,2,1]0,1},{devices=[1,2,1]0,1}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("f32[128,7,257]"));
auto param1 =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(1), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("s32[128,7,257]"));
EXPECT_THAT(root, AllOf(op::Sort(param0, param1),
op::Shape("(f32[128,7,257], s32[128,7,257])")));
}
TEST_P(SpmdPartitioningTest, PartitionCustomCall) {
absl::string_view hlo_string = R"(
HloModule cluster_2013453984438090939__.47
ENTRY %cluster_2013453984438090939__.47
(arg_tuple.1: ()) -> (bf16[2,2000], s32[2,2000]) {
%arg_tuple.1 = bf16[2,209664] parameter(0)
%copy.arg_tuple.1 = bf16[2,209664] copy(%arg_tuple.1), sharding={devices=[1,2]0,1}
%custom-call = (bf16[2,2000]{1,0}, s32[2,2000]{1,0})
custom-call(bf16[2,209664]{1,0} %copy.arg_tuple.1), custom_call_target="TopK"
%get-tuple-element = bf16[2,2000]{1,0}
get-tuple-element((bf16[2,2000]{1,0}, s32[2,2000]{1,0}) %custom-call),
index=0, sharding={replicated}
%get-tuple-element.1 = s32[2,2000]{1,0} get-tuple-element((bf16[2,2000]{1,0},
s32[2,2000]{1,0}) %custom-call), index=1, sharding={replicated}
ROOT %tuple.46 = (bf16[2,2000]{1,0}, s32[2,2000]{1,0})
tuple(bf16[2,2000]{1,0} %get-tuple-element, s32[2,2000]{1,0}
%get-tuple-element.1), sharding={{replicated}, {replicated}},
metadata={op_name="XLA_Retvals"}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto custom_call = FindInstruction(module.get(), "custom-call.1");
EXPECT_EQ(custom_call->operand(0)->shape().dimensions(1), 104832);
auto sort = FindInstruction(module.get(), "sort");
EXPECT_EQ(sort->operand(0)->shape().dimensions(1), 4000);
EXPECT_EQ(sort->operand(1)->shape().dimensions(1), 4000);
}
TEST_P(SpmdPartitioningTest, PartitionCustomCall_BatchPartitionedDims) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[8,32128] parameter(0)
%copy.0 = f32[8,32128] copy(%param0), sharding={devices=[8,1]<=[8]}
%custom-call = (f32[8,2]{1,0}, s32[8,2]{1,0})
custom-call(%copy.0), custom_call_target="TopK"
%get-tuple-element = f32[8,2]{1,0}
get-tuple-element((f32[8,2]{1,0}, s32[8,2]{1,0}) %custom-call), index=0,
sharding={devices=[8,1]<=[8]}
%get-tuple-element.1 = s32[8,2]{1,0}
get-tuple-element((f32[8,2]{1,0}, s32[8,2]{1,0}) %custom-call), index=1,
sharding={devices=[8,1]<=[8]}
ROOT %tuple = (f32[8,2]{1,0}, s32[8,2]{1,0})
tuple(%get-tuple-element, %get-tuple-element.1),
sharding={{replicated}, {replicated}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
LOG(ERROR) << module->ToString();
auto custom_call = FindInstruction(module.get(), "custom-call.1");
EXPECT_EQ(custom_call->operand(0)->shape().dimensions(1), 32128);
auto sort = FindInstruction(module.get(), "sort");
EXPECT_EQ(sort->operand(0)->shape().dimensions(0), 1);
EXPECT_EQ(sort->operand(0)->shape().dimensions(1), 2);
EXPECT_EQ(sort->operand(1)->shape().dimensions(0), 1);
EXPECT_EQ(sort->operand(1)->shape().dimensions(1), 2);
}
TEST_P(SpmdPartitioningTest, PartitionCustomCall_TwoPartitionedDims) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[8,32128] parameter(0)
%copy.0 = f32[8,32128] copy(%param0), sharding={devices=[4,2]<=[8]}
%custom-call = (f32[8,2]{1,0}, s32[8,2]{1,0})
custom-call(%copy.0), custom_call_target="TopK"
%get-tuple-element = f32[8,2]{1,0}
get-tuple-element((f32[8,2]{1,0}, s32[8,2]{1,0}) %custom-call), index=0,
sharding={devices=[4,1,2]<=[8] last_tile_dim_replicate}
%get-tuple-element.1 = s32[8,2]{1,0}
get-tuple-element((f32[8,2]{1,0}, s32[8,2]{1,0}) %custom-call), index=1,
sharding={devices=[4,1,2]<=[8] last_tile_dim_replicate}
ROOT %tuple = (f32[8,2]{1,0}, s32[8,2]{1,0})
tuple(%get-tuple-element, %get-tuple-element.1),
sharding={{replicated}, {replicated}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto custom_call = FindInstruction(module.get(), "custom-call.1");
EXPECT_EQ(custom_call->operand(0)->shape().dimensions(1), 16064);
auto sort = FindInstruction(module.get(), "sort");
EXPECT_EQ(sort->operand(0)->shape().dimensions(0), 2);
EXPECT_EQ(sort->operand(0)->shape().dimensions(1), 4);
EXPECT_EQ(sort->operand(1)->shape().dimensions(0), 2);
EXPECT_EQ(sort->operand(1)->shape().dimensions(1), 4);
}
TEST_P(SpmdPartitioningTest, PartitionSortInTopK) {
absl::string_view hlo_string = R"(
HloModule module
%compare-greater-than.8 (p.0.lhs.9: bf16[], p.0.rhs.10: bf16[], p.1.lhs.11:
s32[], p.1.rhs.12: s32[]) -> pred[] {
%p.1.lhs.11 = s32[] parameter(2)
%p.1.rhs.12 = s32[] parameter(3)
%p.0.lhs.9 = bf16[] parameter(0)
%convert.13 = f32[] convert(bf16[] %p.0.lhs.9)
%bitcast-convert.16 = s32[] bitcast-convert(f32[] %convert.13)
%constant.20 = s32[] constant(0)
%compare.21 = pred[] compare(s32[] %bitcast-convert.16, s32[] %constant.20),
direction=LT
%constant.15 = u32[] constant(2147483647)
%bitcast-convert.17 = u32[] bitcast-convert(f32[] %convert.13)
%subtract.18 = u32[] subtract(u32[] %constant.15, u32[] %bitcast-convert.17)
%bitcast-convert.19 = s32[] bitcast-convert(u32[] %subtract.18)
%select.22 = s32[] select(pred[] %compare.21, s32[] %bitcast-convert.19, s32[]
%bitcast-convert.16)
%p.0.rhs.10 = bf16[] parameter(1)
%convert.14 = f32[] convert(bf16[] %p.0.rhs.10)
%bitcast-convert.24 = s32[] bitcast-convert(f32[] %convert.14)
%constant.28 = s32[] constant(0)
%compare.29 = pred[] compare(s32[] %bitcast-convert.24, s32[] %constant.28),
direction=LT
%constant.23 = u32[] constant(2147483647)
%bitcast-convert.25 = u32[] bitcast-convert(f32[] %convert.14)
%subtract.26 = u32[] subtract(u32[] %constant.23, u32[] %bitcast-convert.25)
%bitcast-convert.27 = s32[] bitcast-convert(u32[] %subtract.26)
%select.30 = s32[] select(pred[] %compare.29, s32[] %bitcast-convert.27, s32[]
%bitcast-convert.24)
ROOT %compare.31 = pred[] compare(s32[] %select.22, s32[] %select.30),
direction=GT
}
ENTRY entry
(arg_tuple.1: ()) -> (bf16[2,2000], s32[2,2000]) {
%arg_tuple.1 = bf16[2,209664] parameter(0)
%copy.arg_tuple.1 = bf16[2,209664] copy(%arg_tuple.1), sharding={devices=[1,2]0,1}
%iota.7 = s32[2,209664] iota(), iota_dimension=1,
metadata={op_type="TopKV2" op_name="TopKV2"}
%sort.32 = (bf16[2,209664], s32[2,209664])
sort(bf16[2,209664] %copy.arg_tuple.1, s32[2,209664] %iota.7),
dimensions={1}, is_stable=true, to_apply=%compare-greater-than.8,
metadata={op_type="TopKV2" op_name="TopKV2"}
%get-tuple-element.33 = bf16[2,209664]
get-tuple-element((bf16[2,209664], s32[2,209664]) %sort.32),
index=0, metadata={op_type="TopKV2" op_name="TopKV2"}
%slice.34 = bf16[2,2000] slice(bf16[2,209664]
%get-tuple-element.33), slice={[0:2], [0:2000]},
metadata={op_type="TopKV2" op_name="TopKV2"}
%get-tuple-element.35 = s32[2,209664]
get-tuple-element((bf16[2,209664], s32[2,209664]) %sort.32),
index=1, metadata={op_type="TopKV2" op_name="TopKV2"}
%slice.36 = s32[2,2000] slice(s32[2,209664]
%get-tuple-element.35), slice={[0:2], [0:2000]},
metadata={op_type="TopKV2" op_name="TopKV2"}
ROOT %tuple.46 = (bf16[2,2000], s32[2,2000])
tuple(bf16[2,2000] %slice.34, s32[2,2000]
%slice.36), sharding={{replicated}, {replicated}},
metadata={op_name="XLA_Retvals"}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto sort = FindInstruction(module.get(), "sort.0");
EXPECT_EQ(sort->operand(0)->shape().dimensions(1), 104832);
EXPECT_EQ(sort->operand(1)->shape().dimensions(1), 104832);
auto final_sort = FindInstruction(module.get(), "sort.1");
EXPECT_EQ(final_sort->operand(0)->shape().dimensions(1), 4000);
EXPECT_EQ(final_sort->operand(1)->shape().dimensions(1), 4000);
}
TEST_P(SpmdPartitioningTest, PartitionSortInTopKWhenComparisonWithSelect) {
absl::string_view hlo_string = R"(
HloModule module
%compare-greater-than.8 (p.0.lhs.2566: bf16[],
p.0.rhs.2567: bf16[], p.1.lhs.2586: s32[], p.1.rhs.2587: s32[]) -> pred[] {
%p.0.lhs.2566 = bf16[] parameter(0)
%convert.164 = f32[] convert(bf16[] %p.0.lhs.2566)
%bitcast-convert.48 = s32[] bitcast-convert(f32[] %convert.164)
%constant.285 = s32[] constant(0)
%compare.84 = pred[] compare(s32[] %bitcast-convert.48, s32[] %constant.285),
direction=LT
%constant.286 = u32[] constant(2147483647)
%bitcast-convert.49 = u32[] bitcast-convert(f32[] %convert.164)
%subtract.84 = u32[] subtract(u32[] %constant.286, u32[] %bitcast-convert.49)
%bitcast-convert.50 = s32[] bitcast-convert(u32[] %subtract.84)
%select.40 = s32[] select(pred[] %compare.84, s32[] %bitcast-convert.50,
s32[] %bitcast-convert.48)
%p.0.rhs.2567 = bf16[] parameter(1)
%convert.165 = f32[] convert(bf16[] %p.0.rhs.2567)
%bitcast-convert.51 = s32[] bitcast-convert(f32[] %convert.165)
%compare.85 = pred[] compare(s32[] %bitcast-convert.51, s32[] %constant.285),
direction=LT
%bitcast-convert.52 = u32[] bitcast-convert(f32[] %convert.165)
%subtract.85 = u32[] subtract(u32[] %constant.286, u32[] %bitcast-convert.52)
%bitcast-convert.53 = s32[] bitcast-convert(u32[] %subtract.85)
%select.41 = s32[] select(pred[] %compare.85, s32[] %bitcast-convert.53,
s32[] %bitcast-convert.51)
%compare.86 = pred[] compare(s32[] %select.40, s32[] %select.41), direction=GT
%compare.1645 = pred[] compare(s32[] %select.41, s32[] %select.40), direction=GT
%compare.1646 = pred[] compare(pred[] %compare.86, pred[] %compare.1645),
direction=EQ
%p.1.lhs.2586 = s32[] parameter(2)
%p.1.rhs.2587 = s32[] parameter(3)
%compare.1647 = pred[] compare(s32[] %p.1.lhs.2586, s32[] %p.1.rhs.2587),
direction=LT
ROOT %select.1054 = pred[] select(pred[] %compare.1646, pred[] %compare.1647,
pred[] %compare.86)
}
ENTRY entry
(arg_tuple.1: ()) -> (bf16[2,2000], s32[2,2000]) {
%arg_tuple.1 = bf16[2,209664] parameter(0)
%copy.arg_tuple.1 = bf16[2,209664] copy(%arg_tuple.1), sharding={devices=[1,2]0,1}
%iota.7 = s32[2,209664] iota(), iota_dimension=1,
metadata={op_type="TopKV2" op_name="TopKV2"}
%sort.32 = (bf16[2,209664], s32[2,209664])
sort(bf16[2,209664] %copy.arg_tuple.1, s32[2,209664] %iota.7),
dimensions={1}, is_stable=true, to_apply=%compare-greater-than.8,
metadata={op_type="TopKV2" op_name="TopKV2"}
%get-tuple-element.33 = bf16[2,209664]
get-tuple-element((bf16[2,209664], s32[2,209664]) %sort.32),
index=0, metadata={op_type="TopKV2" op_name="TopKV2"}
%slice.34 = bf16[2,2000] slice(bf16[2,209664]
%get-tuple-element.33), slice={[0:2], [0:2000]},
metadata={op_type="TopKV2" op_name="TopKV2"}
%get-tuple-element.35 = s32[2,209664]
get-tuple-element((bf16[2,209664], s32[2,209664]) %sort.32),
index=1, metadata={op_type="TopKV2" op_name="TopKV2"}
%slice.36 = s32[2,2000] slice(s32[2,209664]
%get-tuple-element.35), slice={[0:2], [0:2000]},
metadata={op_type="TopKV2" op_name="TopKV2"}
ROOT %tuple.46 = (bf16[2,2000], s32[2,2000])
tuple(bf16[2,2000] %slice.34, s32[2,2000]
%slice.36), sharding={{replicated}, {replicated}},
metadata={op_name="XLA_Retvals"}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto sort = FindInstruction(module.get(), "sort.0");
EXPECT_EQ(sort->operand(0)->shape().dimensions(1), 104832);
EXPECT_EQ(sort->operand(1)->shape().dimensions(1), 104832);
auto final_sort = FindInstruction(module.get(), "sort.1");
EXPECT_EQ(final_sort->operand(0)->shape().dimensions(1), 4000);
EXPECT_EQ(final_sort->operand(1)->shape().dimensions(1), 4000);
}
TEST_P(SpmdPartitioningTest, NoPartitionSortInTopKWhenSecondOperandIsNotIota) {
absl::string_view hlo_string = R"(
HloModule module
%compare-greater-than.8 (p.0.lhs.2566: bf16[],
p.0.rhs.2567: bf16[], p.1.lhs.2586: s32[], p.1.rhs.2587: s32[]) -> pred[] {
%p.0.lhs.2566 = bf16[] parameter(0)
%convert.164 = f32[] convert(bf16[] %p.0.lhs.2566)
%bitcast-convert.48 = s32[] bitcast-convert(f32[] %convert.164)
%constant.285 = s32[] constant(0)
%compare.84 = pred[] compare(s32[] %bitcast-convert.48, s32[] %constant.285),
direction=LT
%constant.286 = u32[] constant(2147483647)
%bitcast-convert.49 = u32[] bitcast-convert(f32[] %convert.164)
%subtract.84 = u32[] subtract(u32[] %constant.286, u32[] %bitcast-convert.49)
%bitcast-convert.50 = s32[] bitcast-convert(u32[] %subtract.84)
%select.40 = s32[] select(pred[] %compare.84, s32[] %bitcast-convert.50,
s32[] %bitcast-convert.48)
%p.0.rhs.2567 = bf16[] parameter(1)
%convert.165 = f32[] convert(bf16[] %p.0.rhs.2567)
%bitcast-convert.51 = s32[] bitcast-convert(f32[] %convert.165)
%compare.85 = pred[] compare(s32[] %bitcast-convert.51, s32[] %constant.285),
direction=LT
%bitcast-convert.52 = u32[] bitcast-convert(f32[] %convert.165)
%subtract.85 = u32[] subtract(u32[] %constant.286, u32[] %bitcast-convert.52)
%bitcast-convert.53 = s32[] bitcast-convert(u32[] %subtract.85)
%select.41 = s32[] select(pred[] %compare.85, s32[] %bitcast-convert.53,
s32[] %bitcast-convert.51)
%compare.86 = pred[] compare(s32[] %select.40, s32[] %select.41), direction=GT
%compare.1645 = pred[] compare(s32[] %select.41, s32[] %select.40), direction=GT
%compare.1646 = pred[] compare(pred[] %compare.86, pred[] %compare.1645),
direction=EQ
%p.1.lhs.2586 = s32[] parameter(2)
%p.1.rhs.2587 = s32[] parameter(3)
%compare.1647 = pred[] compare(s32[] %p.1.lhs.2586, s32[] %p.1.rhs.2587),
direction=LT
ROOT %select.1054 = pred[] select(pred[] %compare.1646, pred[] %compare.1647,
pred[] %compare.86)
}
ENTRY entry {
%arg_tuple.1 = bf16[2,209664] parameter(0)
%arg_tuple.2 = s32[2,209664] parameter(1)
%copy.arg_tuple.1 = bf16[2,209664] copy(%arg_tuple.1), sharding={devices=[1,2]0,1}
%sort.32 = (bf16[2,209664], s32[2,209664])
sort(bf16[2,209664] %copy.arg_tuple.1, s32[2,209664] %arg_tuple.2),
dimensions={1}, is_stable=true, to_apply=%compare-greater-than.8,
metadata={op_type="TopKV2" op_name="TopKV2"}
%get-tuple-element.33 = bf16[2,209664]
get-tuple-element((bf16[2,209664], s32[2,209664]) %sort.32),
index=0, metadata={op_type="TopKV2" op_name="TopKV2"}
%slice.34 = bf16[2,2000] slice(bf16[2,209664]
%get-tuple-element.33), slice={[0:2], [0:2000]},
metadata={op_type="TopKV2" op_name="TopKV2"}
%get-tuple-element.35 = s32[2,209664]
get-tuple-element((bf16[2,209664], s32[2,209664]) %sort.32),
index=1, metadata={op_type="TopKV2" op_name="TopKV2"}
%slice.36 = s32[2,2000] slice(s32[2,209664]
%get-tuple-element.35), slice={[0:2], [0:2000]},
metadata={op_type="TopKV2" op_name="TopKV2"}
ROOT %tuple.46 = (bf16[2,2000], s32[2,2000])
tuple(bf16[2,2000] %slice.34, s32[2,2000]
%slice.36), sharding={{replicated}, {replicated}},
metadata={op_name="XLA_Retvals"}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto sort = FindInstruction(module.get(), "sort.0");
EXPECT_EQ(sort->operand(0)->shape().dimensions(1), 209664);
EXPECT_EQ(sort->operand(1)->shape().dimensions(1), 209664);
}
TEST_P(SpmdPartitioningTest, NoPartitionSortInTopKWhenNoPartitionInSortDim) {
absl::string_view hlo_string = R"(
HloModule module
%compare-greater-than.8 (p.0.lhs.2566: bf16[],
p.0.rhs.2567: bf16[], p.1.lhs.2586: s32[], p.1.rhs.2587: s32[]) -> pred[] {
%p.0.lhs.2566 = bf16[] parameter(0)
%convert.164 = f32[] convert(bf16[] %p.0.lhs.2566)
%bitcast-convert.48 = s32[] bitcast-convert(f32[] %convert.164)
%constant.285 = s32[] constant(0)
%compare.84 = pred[] compare(s32[] %bitcast-convert.48, s32[] %constant.285),
direction=LT
%constant.286 = u32[] constant(2147483647)
%bitcast-convert.49 = u32[] bitcast-convert(f32[] %convert.164)
%subtract.84 = u32[] subtract(u32[] %constant.286, u32[] %bitcast-convert.49)
%bitcast-convert.50 = s32[] bitcast-convert(u32[] %subtract.84)
%select.40 = s32[] select(pred[] %compare.84, s32[] %bitcast-convert.50,
s32[] %bitcast-convert.48)
%p.0.rhs.2567 = bf16[] parameter(1)
%convert.165 = f32[] convert(bf16[] %p.0.rhs.2567)
%bitcast-convert.51 = s32[] bitcast-convert(f32[] %convert.165)
%compare.85 = pred[] compare(s32[] %bitcast-convert.51, s32[] %constant.285),
direction=LT
%bitcast-convert.52 = u32[] bitcast-convert(f32[] %convert.165)
%subtract.85 = u32[] subtract(u32[] %constant.286, u32[] %bitcast-convert.52)
%bitcast-convert.53 = s32[] bitcast-convert(u32[] %subtract.85)
%select.41 = s32[] select(pred[] %compare.85, s32[] %bitcast-convert.53,
s32[] %bitcast-convert.51)
%compare.86 = pred[] compare(s32[] %select.40, s32[] %select.41), direction=GT
%compare.1645 = pred[] compare(s32[] %select.41, s32[] %select.40), direction=GT
%compare.1646 = pred[] compare(pred[] %compare.86, pred[] %compare.1645),
direction=EQ
%p.1.lhs.2586 = s32[] parameter(2)
%p.1.rhs.2587 = s32[] parameter(3)
%compare.1647 = pred[] compare(s32[] %p.1.lhs.2586, s32[] %p.1.rhs.2587),
direction=LT
ROOT %select.1054 = pred[] select(pred[] %compare.1646, pred[] %compare.1647,
pred[] %compare.86)
}
ENTRY entry
(arg_tuple.1: ()) -> (bf16[2,2000], s32[2,2000]) {
%arg_tuple.1 = bf16[2,209664] parameter(0)
%copy.arg_tuple.1 = bf16[2,209664] copy(%arg_tuple.1), sharding={devices=[2,1]0,1}
%iota.7 = s32[2,209664] iota(), iota_dimension=1,
metadata={op_type="TopKV2" op_name="TopKV2"}
%sort.32 = (bf16[2,209664], s32[2,209664])
sort(bf16[2,209664] %copy.arg_tuple.1, s32[2,209664] %iota.7),
dimensions={1}, is_stable=true, to_apply=%compare-greater-than.8,
metadata={op_type="TopKV2" op_name="TopKV2"}
%get-tuple-element.33 = bf16[2,209664]
get-tuple-element((bf16[2,209664], s32[2,209664]) %sort.32),
index=0, metadata={op_type="TopKV2" op_name="TopKV2"}
%slice.34 = bf16[2,2000] slice(bf16[2,209664]
%get-tuple-element.33), slice={[0:2], [0:2000]},
metadata={op_type="TopKV2" op_name="TopKV2"}
%get-tuple-element.35 = s32[2,209664]
get-tuple-element((bf16[2,209664], s32[2,209664]) %sort.32),
index=1, metadata={op_type="TopKV2" op_name="TopKV2"}
%slice.36 = s32[2,2000] slice(s32[2,209664]
%get-tuple-element.35), slice={[0:2], [0:2000]},
metadata={op_type="TopKV2" op_name="TopKV2"}
ROOT %tuple.46 = (bf16[2,2000], s32[2,2000])
tuple(bf16[2,2000] %slice.34, s32[2,2000]
%slice.36), sharding={{replicated}, {replicated}},
metadata={op_name="XLA_Retvals"}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto sort = FindInstruction(module.get(), "sort.0");
EXPECT_EQ(sort->operand(0)->shape().dimensions(1), 209664);
EXPECT_EQ(sort->operand(1)->shape().dimensions(1), 209664);
}
TEST_P(SpmdPartitioningTest, NoPartitionSortInTopKWhenSliceInOtherDim) {
absl::string_view hlo_string = R"(
HloModule module
%compare-greater-than.8 (p.0.lhs.2566: bf16[],
p.0.rhs.2567: bf16[], p.1.lhs.2586: s32[], p.1.rhs.2587: s32[]) -> pred[] {
%p.0.lhs.2566 = bf16[] parameter(0)
%convert.164 = f32[] convert(bf16[] %p.0.lhs.2566)
%bitcast-convert.48 = s32[] bitcast-convert(f32[] %convert.164)
%constant.285 = s32[] constant(0)
%compare.84 = pred[] compare(s32[] %bitcast-convert.48, s32[] %constant.285),
direction=LT
%constant.286 = u32[] constant(2147483647)
%bitcast-convert.49 = u32[] bitcast-convert(f32[] %convert.164)
%subtract.84 = u32[] subtract(u32[] %constant.286, u32[] %bitcast-convert.49)
%bitcast-convert.50 = s32[] bitcast-convert(u32[] %subtract.84)
%select.40 = s32[] select(pred[] %compare.84, s32[] %bitcast-convert.50,
s32[] %bitcast-convert.48)
%p.0.rhs.2567 = bf16[] parameter(1)
%convert.165 = f32[] convert(bf16[] %p.0.rhs.2567)
%bitcast-convert.51 = s32[] bitcast-convert(f32[] %convert.165)
%compare.85 = pred[] compare(s32[] %bitcast-convert.51, s32[] %constant.285),
direction=LT
%bitcast-convert.52 = u32[] bitcast-convert(f32[] %convert.165)
%subtract.85 = u32[] subtract(u32[] %constant.286, u32[] %bitcast-convert.52)
%bitcast-convert.53 = s32[] bitcast-convert(u32[] %subtract.85)
%select.41 = s32[] select(pred[] %compare.85, s32[] %bitcast-convert.53,
s32[] %bitcast-convert.51)
%compare.86 = pred[] compare(s32[] %select.40, s32[] %select.41), direction=GT
%compare.1645 = pred[] compare(s32[] %select.41, s32[] %select.40), direction=GT
%compare.1646 = pred[] compare(pred[] %compare.86, pred[] %compare.1645),
direction=EQ
%p.1.lhs.2586 = s32[] parameter(2)
%p.1.rhs.2587 = s32[] parameter(3)
%compare.1647 = pred[] compare(s32[] %p.1.lhs.2586, s32[] %p.1.rhs.2587),
direction=LT
ROOT %select.1054 = pred[] select(pred[] %compare.1646, pred[] %compare.1647,
pred[] %compare.86)
}
ENTRY entry {
%arg_tuple.1 = bf16[2,209664] parameter(0)
%copy.arg_tuple.1 = bf16[2,209664] copy(%arg_tuple.1), sharding={devices=[1,2]0,1}
%iota.7 = s32[2,209664] iota(), iota_dimension=1,
metadata={op_type="TopKV2" op_name="TopKV2"}
%sort.32 = (bf16[2,209664], s32[2,209664])
sort(bf16[2,209664] %copy.arg_tuple.1, s32[2,209664] %iota.7),
dimensions={1}, is_stable=true, to_apply=%compare-greater-than.8,
metadata={op_type="TopKV2" op_name="TopKV2"}
%get-tuple-element.33 = bf16[2,209664]
get-tuple-element((bf16[2,209664], s32[2,209664]) %sort.32),
index=0, metadata={op_type="TopKV2" op_name="TopKV2"}
%slice.34 = bf16[1,209664] slice(bf16[2,209664]
%get-tuple-element.33), slice={[0:1], [0:209664]},
metadata={op_type="TopKV2" op_name="TopKV2"}
%get-tuple-element.35 = s32[2,209664]
get-tuple-element((bf16[2,209664], s32[2,209664]) %sort.32),
index=1, metadata={op_type="TopKV2" op_name="TopKV2"}
%slice.36 = s32[1,209664] slice(s32[2,209664]
%get-tuple-element.35), slice={[0:1], [0:209664]},
metadata={op_type="TopKV2" op_name="TopKV2"}
ROOT %tuple.46 = (bf16[1,209664], s32[1,209664])
tuple(bf16[1,209664] %slice.34, s32[1,209664]
%slice.36), sharding={{replicated}, {replicated}},
metadata={op_name="XLA_Retvals"}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto sort = FindInstruction(module.get(), "sort.0");
EXPECT_EQ(sort->operand(0)->shape().dimensions(1), 209664);
EXPECT_EQ(sort->operand(1)->shape().dimensions(1), 209664);
}
TEST_P(SpmdPartitioningTest, SortShardedOnSortDim_SlowSortBug) {
absl::string_view hlo_string = R"(
HloModule module, entry_computation_layout={(f32[32768,65536]{1,0})->(f32[32768,65536]{1,0}, s32[32768,65536]{1,0})}
region_174.7326 {
Arg_0.7327 = f32[] parameter(0), sharding={replicated}
compare.7339 = pred[] compare(Arg_0.7327, Arg_0.7327), direction=NE, sharding={replicated}
constant.7332 = s32[] constant(2143289344), sharding={replicated}
constant.7334 = f32[] constant(0), sharding={replicated}
compare.7337 = pred[] compare(Arg_0.7327, constant.7334), direction=EQ, sharding={replicated}
constant.7333 = s32[] constant(0), sharding={replicated}
bitcast-convert.7335 = s32[] bitcast-convert(Arg_0.7327), sharding={replicated}
select.7338 = s32[] select(compare.7337, constant.7333, bitcast-convert.7335), sharding={replicated}
select.7340 = s32[] select(compare.7339, constant.7332, select.7338), sharding={replicated}
constant.1127 = s32[] constant(0), sharding={replicated}
compare.7343 = pred[] compare(select.7340, constant.1127), direction=LT, sharding={replicated}
constant.7331 = u32[] constant(2147483647), sharding={replicated}
bitcast-convert.7336 = u32[] bitcast-convert(Arg_0.7327), sharding={replicated}
subtract.7341 = u32[] subtract(constant.7331, bitcast-convert.7336), sharding={replicated}
bitcast-convert.7342 = s32[] bitcast-convert(subtract.7341), sharding={replicated}
select.7344 = s32[] select(compare.7343, bitcast-convert.7342, select.7340), sharding={replicated}
Arg_1.7328 = f32[] parameter(1), sharding={replicated}
compare.7349 = pred[] compare(Arg_1.7328, Arg_1.7328), direction=NE, sharding={replicated}
constant.1125 = s32[] constant(2143289344), sharding={replicated}
constant.1126 = f32[] constant(0), sharding={replicated}
compare.7347 = pred[] compare(Arg_1.7328, constant.1126), direction=EQ, sharding={replicated}
constant.1128 = s32[] constant(0), sharding={replicated}
bitcast-convert.7345 = s32[] bitcast-convert(Arg_1.7328), sharding={replicated}
select.7348 = s32[] select(compare.7347, constant.1128, bitcast-convert.7345), sharding={replicated}
select.7350 = s32[] select(compare.7349, constant.1125, select.7348), sharding={replicated}
constant.1129 = s32[] constant(0), sharding={replicated}
compare.7353 = pred[] compare(select.7350, constant.1129), direction=LT, sharding={replicated}
constant.1130 = u32[] constant(2147483647), sharding={replicated}
bitcast-convert.7346 = u32[] bitcast-convert(Arg_1.7328), sharding={replicated}
subtract.7351 = u32[] subtract(constant.1130, bitcast-convert.7346), sharding={replicated}
bitcast-convert.7352 = s32[] bitcast-convert(subtract.7351), sharding={replicated}
select.7354 = s32[] select(compare.7353, bitcast-convert.7352, select.7350), sharding={replicated}
compare.7355 = pred[] compare(select.7344, select.7354), direction=LT, sharding={replicated}
compare.24 = pred[] compare(select.7354, select.7344), direction=LT, sharding={replicated}
compare.25 = pred[] compare(compare.7355, compare.24), direction=EQ, sharding={replicated}
Arg_2.7329 = s32[] parameter(2), sharding={replicated}
Arg_3.7330 = s32[] parameter(3), sharding={replicated}
compare.26 = pred[] compare(Arg_2.7329, Arg_3.7330), direction=LT, sharding={replicated}
ROOT select.21 = pred[] select(compare.25, compare.26, compare.7355), sharding={replicated}
}
ENTRY entry {
param.0 = f32[32768,65536]{1,0} parameter(0)
negate.7325 = f32[32768,65536]{1,0} negate(param.0), sharding={devices=[1,64]<=[64]}
iota.30 = s32[32768,65536]{1,0} iota(), iota_dimension=1, sharding={devices=[1,64]<=[64]}
ROOT sort.0 = (f32[32768,65536]{1,0}, s32[32768,65536]{1,0}) sort(negate.7325, iota.30), dimensions={1}, is_stable=true, to_apply=region_174.7326, sharding={{devices=[1,64]<=[64]}, {devices=[1,64]<=[64]}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 64));
VLOG(1) << module->ToString();
auto sort = FindInstruction(module.get(), "sort.1");
for (auto operand : sort->operands()) {
EXPECT_EQ(operand->shape().dimensions(0), 512);
EXPECT_EQ(operand->shape().dimensions(1), 65536);
}
}
TEST_P(SpmdPartitioningTest, SortShardedOnSortDim_OneOperand) {
absl::string_view hlo_string = R"(
HloModule module, entry_computation_layout={(f32[1024,1024]{1,0})->f32[1024,1024]{1,0}}
compare {
p.0.lhs = f32[] parameter(0), sharding={replicated}
p.0.rhs = f32[] parameter(1), sharding={replicated}
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated}
}
ENTRY entry {
param.0 = f32[1024,1024]{1,0} parameter(0)
negate.0 = f32[1024,1024]{1,0} negate(param.0), sharding={devices=[1,8]<=[8]}
ROOT sort.0 = f32[1024,1024]{1,0} sort(negate.0), dimensions={1}, is_stable=true, to_apply=compare, sharding={devices=[1,8]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto sort = FindInstruction(module.get(), "sort.1");
for (auto operand : sort->operands()) {
EXPECT_EQ(operand->shape().dimensions(0), 128);
EXPECT_EQ(operand->shape().dimensions(1), 1024);
}
}
TEST_P(SpmdPartitioningTest, SortShardedOnSortDim_TwoOperands) {
absl::string_view hlo_string = R"(
HloModule module, entry_computation_layout={(f32[1024,1024]{1,0})->(f32[1024,1024]{1,0},s32[1024,1024]{1,0})}
compare {
p.0.lhs = f32[] parameter(0), sharding={replicated}
p.0.rhs = f32[] parameter(1), sharding={replicated}
p.1.lhs = s32[] parameter(2), sharding={replicated}
p.1.rhs = s32[] parameter(3), sharding={replicated}
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated}
}
ENTRY entry {
param.0 = f32[1024,1024]{1,0} parameter(0)
negate.0 = f32[1024,1024]{1,0} negate(param.0), sharding={devices=[1,8]<=[8]}
iota.0 = s32[1024,1024]{1,0} iota(), iota_dimension=1, sharding={devices=[1,8]<=[8]}
ROOT sort.0 = (f32[1024,1024]{1,0}, s32[1024,1024]{1,0}) sort(negate.0, iota.0), dimensions={1}, is_stable=true, to_apply=compare, sharding={{devices=[1,8]<=[8]},{devices=[1,8]<=[8]}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto sort = FindInstruction(module.get(), "sort.1");
for (auto operand : sort->operands()) {
EXPECT_EQ(operand->shape().dimensions(0), 128);
EXPECT_EQ(operand->shape().dimensions(1), 1024);
}
}
TEST_P(SpmdPartitioningTest, SortShardedOnSortDim_TwoOperands_FreeDimOfSize1) {
absl::string_view hlo_string = R"(
HloModule module
compare {
p.0.lhs = f32[] parameter(0), sharding={replicated}
p.0.rhs = f32[] parameter(1), sharding={replicated}
p.1.lhs = s32[] parameter(2), sharding={replicated}
p.1.rhs = s32[] parameter(3), sharding={replicated}
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated}
}
ENTRY entry {
param.0 = f32[1,1024]{1,0} parameter(0)
negate.0 = f32[1,1024]{1,0} negate(param.0), sharding={devices=[1,8]<=[8]}
iota.0 = s32[1,1024]{1,0} iota(), iota_dimension=1, sharding={devices=[1,8]<=[8]}
ROOT sort.0 = (f32[1,1024]{1,0}, s32[1,1024]{1,0}) sort(negate.0, iota.0), dimensions={1}, is_stable=true, to_apply=compare, sharding={{devices=[1,8]<=[8]},{devices=[1,8]<=[8]}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
for (HloInstruction* inst : module->entry_computation()->instructions()) {
if (inst->opcode() == HloOpcode::kSort) {
for (HloInstruction* operand : inst->operands()) {
EXPECT_EQ(operand->shape().dimensions(0), 1);
EXPECT_EQ(operand->shape().dimensions(1), 1024);
}
EXPECT_THAT(inst, op::Sort(op::AllReduce(), op::AllReduce()));
}
EXPECT_NE(inst->opcode(), HloOpcode::kAllToAll);
}
}
TEST_P(SpmdPartitioningTest, SortShardedOnSortDim_ThreeOperands) {
absl::string_view hlo_string = R"(
HloModule module, entry_computation_layout={(f32[1024,1024]{1,0})->(f32[1024,1024]{1,0},s32[1024,1024]{1,0},s32[1024,1024]{1,0})}
compare {
p.0.lhs = f32[] parameter(0), sharding={replicated}
p.0.rhs = f32[] parameter(1), sharding={replicated}
p.1.lhs = s32[] parameter(2), sharding={replicated}
p.1.rhs = s32[] parameter(3), sharding={replicated}
p.2.lhs = s32[] parameter(4), sharding={replicated}
p.2.rhs = s32[] parameter(5), sharding={replicated}
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated}
}
ENTRY entry {
param.0 = f32[1024,1024]{1,0} parameter(0)
negate.0 = f32[1024,1024]{1,0} negate(param.0), sharding={devices=[1,8]<=[8]}
iota.0 = s32[1024,1024]{1,0} iota(), iota_dimension=0, sharding={devices=[1,8]<=[8]}
iota.1 = s32[1024,1024]{1,0} iota(), iota_dimension=1, sharding={devices=[1,8]<=[8]}
ROOT sort.0 = (f32[1024,1024]{1,0}, s32[1024,1024]{1,0}, s32[1024,1024]{1,0}) sort(negate.0, iota.0, iota.1), dimensions={1}, is_stable=true, to_apply=compare, sharding={{devices=[1,8]<=[8]},{devices=[1,8]<=[8]},{devices=[1,8]<=[8]}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto sort = FindInstruction(module.get(), "sort.1");
for (auto operand : sort->operands()) {
EXPECT_EQ(operand->shape().dimensions(0), 128);
EXPECT_EQ(operand->shape().dimensions(1), 1024);
}
}
TEST_P(SpmdPartitioningTest, SortShardedOnSortDim_RankOne) {
absl::string_view hlo_string = R"(
HloModule module, entry_computation_layout={(f32[1024]{0})->(f32[1024]{0},s32[1024]{0})}
compare {
p.0.lhs = f32[] parameter(0), sharding={replicated}
p.0.rhs = f32[] parameter(1), sharding={replicated}
p.1.lhs = s32[] parameter(2), sharding={replicated}
p.1.rhs = s32[] parameter(3), sharding={replicated}
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated}
}
ENTRY entry {
param.0 = f32[1024]{0} parameter(0)
negate.0 = f32[1024]{0} negate(param.0), sharding={devices=[8]<=[8]}
iota.0 = s32[1024]{0} iota(), iota_dimension=0
ROOT sort.0 = (f32[1024]{0}, s32[1024]{0}) sort(negate.0, iota.0), dimensions={0}, is_stable=true, to_apply=compare
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto sort = FindInstruction(module.get(), "sort.1");
for (auto operand : sort->operands()) {
EXPECT_EQ(operand->shape().dimensions(0), 1024);
}
}
TEST_P(SpmdPartitioningTest, SortShardedOnSortDim_TwoFreeDivisibleDims) {
absl::string_view hlo_string = R"(
HloModule module, entry_computation_layout={(f32[8,1024,1024]{2,1,0})->(f32[8,1024,1024]{2,1,0},s32[8,1024,1024]{2,1,0})}
compare {
p.0.lhs = f32[] parameter(0), sharding={replicated}
p.0.rhs = f32[] parameter(1), sharding={replicated}
p.1.lhs = s32[] parameter(2), sharding={replicated}
p.1.rhs = s32[] parameter(3), sharding={replicated}
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated}
}
ENTRY entry {
param.0 = f32[8,1024,1024]{2,1,0} parameter(0)
negate.0 = f32[8,1024,1024]{2,1,0} negate(param.0), sharding={devices=[1,1,8]<=[8]}
iota.0 = s32[8,1024,1024]{2,1,0} iota(), iota_dimension=2, sharding={devices=[1,1,8]<=[8]}
ROOT sort.0 = (f32[8,1024,1024]{2,1,0}, s32[8,1024,1024]{2,1,0}) sort(negate.0, iota.0), dimensions={2}, is_stable=true, to_apply=compare, sharding={{devices=[1,1,8]<=[8]},{devices=[1,1,8]<=[8]}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto sort = FindInstruction(module.get(), "sort.1");
for (auto operand : sort->operands()) {
EXPECT_EQ(operand->shape().dimensions(0), 1);
EXPECT_EQ(operand->shape().dimensions(1), 1024);
EXPECT_EQ(operand->shape().dimensions(2), 1024);
}
}
TEST_P(SpmdPartitioningTest, SortShardedOnSortDim_OneFreeDivisibleDim) {
absl::string_view hlo_string = R"(
HloModule module, entry_computation_layout={(f32[7,1024,1024]{2,1,0})->(f32[7,1024,1024]{2,1,0},s32[7,1024,1024]{2,1,0})}
compare {
p.0.lhs = f32[] parameter(0), sharding={replicated}
p.0.rhs = f32[] parameter(1), sharding={replicated}
p.1.lhs = s32[] parameter(2), sharding={replicated}
p.1.rhs = s32[] parameter(3), sharding={replicated}
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated}
}
ENTRY entry {
param.0 = f32[7,1024,1024]{2,1,0} parameter(0)
negate.0 = f32[7,1024,1024]{2,1,0} negate(param.0), sharding={devices=[1,1,8]<=[8]}
iota.0 = s32[7,1024,1024]{2,1,0} iota(), iota_dimension=2, sharding={devices=[1,1,8]<=[8]}
ROOT sort.0 = (f32[7,1024,1024]{2,1,0}, s32[7,1024,1024]{2,1,0}) sort(negate.0, iota.0), dimensions={2}, is_stable=true, to_apply=compare, sharding={{devices=[1,1,8]<=[8]},{devices=[1,1,8]<=[8]}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto sort = FindInstruction(module.get(), "sort.1");
for (auto operand : sort->operands()) {
EXPECT_EQ(operand->shape().dimensions(0), 7);
EXPECT_EQ(operand->shape().dimensions(1), 128);
EXPECT_EQ(operand->shape().dimensions(2), 1024);
}
}
TEST_P(SpmdPartitioningTest, SortShardedOnSortDim_OneFreeNondivisibleDim) {
absl::string_view hlo_string = R"(
HloModule module, entry_computation_layout={(f32[7,1024,1024]{2,1,0})->(f32[7,1024,1024]{2,1,0},s32[7,1024,1024]{2,1,0})}
compare {
p.0.lhs = f32[] parameter(0), sharding={replicated}
p.0.rhs = f32[] parameter(1), sharding={replicated}
p.1.lhs = s32[] parameter(2), sharding={replicated}
p.1.rhs = s32[] parameter(3), sharding={replicated}
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated}
}
ENTRY entry {
param.0 = f32[7,1024,1024]{2,1,0} parameter(0)
negate.0 = f32[7,1024,1024]{2,1,0} negate(param.0), sharding={devices=[1,2,4]<=[8]}
iota.0 = s32[7,1024,1024]{2,1,0} iota(), iota_dimension=2, sharding={devices=[1,2,4]<=[8]}
ROOT sort.0 = (f32[7,1024,1024]{2,1,0}, s32[7,1024,1024]{2,1,0}) sort(negate.0, iota.0), dimensions={2}, is_stable=true, to_apply=compare, sharding={{devices=[1,2,4]<=[8]},{devices=[1,2,4]<=[8]}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto sort = FindInstruction(module.get(), "sort.1");
for (auto operand : sort->operands()) {
EXPECT_EQ(operand->shape().dimensions(0), 2);
EXPECT_EQ(operand->shape().dimensions(1), 512);
EXPECT_EQ(operand->shape().dimensions(2), 1024);
}
}
TEST_P(SpmdPartitioningTest, SortShardedOnSortDim_LastTileDimReplicate) {
absl::string_view hlo_string = R"(
HloModule module, entry_computation_layout={(f32[1024,1024]{1,0})->f32[1024,1024]{1,0}}
compare {
p.0.lhs = f32[] parameter(0), sharding={replicated}
p.0.rhs = f32[] parameter(1), sharding={replicated}
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated}
}
ENTRY entry {
param.0 = f32[1024,1024]{1,0} parameter(0)
negate.0 = f32[1024,1024]{1,0} negate(param.0), sharding={devices=[1,2,4]<=[8] last_tile_dim_replicate}
ROOT sort.0 = f32[1024,1024]{1,0} sort(negate.0), dimensions={1}, is_stable=true, to_apply=compare, sharding={devices=[1,2,4]<=[8] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto sort = FindInstruction(module.get(), "sort.1");
for (auto operand : sort->operands()) {
EXPECT_EQ(operand->shape().dimensions(0), 512);
EXPECT_EQ(operand->shape().dimensions(1), 1024);
}
}
TEST_P(SpmdPartitioningTest, ShardableTranspose) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[16,38,38,4] parameter(0)
%param0.copy = f32[16,38,38,4] copy(%param0), sharding={devices=[1,2,1,1]0,1}
ROOT %transpose = f32[16,4,38,38] transpose(%param0.copy),
dimensions={0,3,1,2}, sharding={devices=[1,1,2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[16,19,38,4]"));
EXPECT_THAT(root, AllOf(op::Transpose(param0), op::Shape("f32[16,4,19,38]")));
}
TEST_P(SpmdPartitioningTest, MultiDimensionShardedTranspose) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[16,38,38,4] parameter(0)
%param0.copy = f32[16,38,38,4] copy(%param0),
sharding={devices=[4,2,1,1]<=[8]}
ROOT %transpose = f32[38,4,16,38] transpose(%param0.copy),
dimensions={1,3,0,2}, sharding={devices=[2,1,4,1]<=[4,2]T(1,0)}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Reshape(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[4,19,38,4]"));
EXPECT_THAT(root, AllOf(op::Transpose(param0), op::Shape("f32[19,4,4,38]")));
}
TEST_P(SpmdPartitioningTest, NonShardableTranspose) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[16,38,38,4] parameter(0)
%param0.copy = f32[16,38,38,4] copy(%param0), sharding={devices=[1,2,1,1]0,1}
ROOT %transpose = f32[16,4,38,38] transpose(%param0.copy),
dimensions={0,3,1,2}, sharding={devices=[1,2,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto resahrd = AllOf(op::Reshape(op::Transpose(op::Reshape(op::AllToAll()))),
op::Shape("f32[16,38,38,2]"));
EXPECT_THAT(root, AllOf(op::Transpose(), op::Shape("f32[16,2,38,38]")));
}
TEST_P(SpmdPartitioningTest, PartialReplicateShardableTranspose) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[16,38,38,4] parameter(0)
%param0.copy = f32[16,38,38,4] copy(%param0),
sharding={devices=[1,2,1,1,2]<=[4] last_tile_dim_replicate}
ROOT %transpose = f32[16,4,38,38] transpose(%param0.copy),
dimensions={0,3,1,2},
sharding={devices=[1,1,2,1,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[16,19,38,4]"));
EXPECT_THAT(root, AllOf(op::Transpose(param0), op::Shape("f32[16,4,19,38]")));
}
TEST_P(SpmdPartitioningTest, PartialReplicateNonShardableTranspose) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[16,38,38,4] parameter(0)
%param0.copy = f32[16,38,38,4] copy(%param0),
sharding={devices=[1,2,1,1,2]<=[4] last_tile_dim_replicate}
ROOT %transpose = f32[16,4,38,38] transpose(%param0.copy),
dimensions={0,3,1,2},
sharding={devices=[1,2,1,1,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto resahrd = AllOf(op::Reshape(op::Transpose(op::Reshape(op::AllToAll()))),
op::Shape("f32[16,38,38,2]"));
EXPECT_THAT(root, AllOf(op::Transpose(), op::Shape("f32[16,2,38,38]")));
}
TEST_P(SpmdPartitioningTest, PartialReplicateMultiDimensionShardedTranspose) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[16,38,38,4] parameter(0)
%param0.copy = f32[16,38,38,4] copy(%param0),
sharding={devices=[2,2,1,1,2]<=[8] last_tile_dim_replicate}
ROOT %transpose = f32[38,4,16,38] transpose(%param0.copy),
dimensions={1,3,0,2},
sharding={devices=[2,1,2,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Reshape(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[8,19,38,4]"));
EXPECT_THAT(root, AllOf(op::Transpose(param0), op::Shape("f32[19,4,8,38]")));
}
TEST_P(SpmdPartitioningTest, ShardableReshape) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[38,38,324] parameter(0)
%param0.copy = f32[38,38,324] copy(%param0), sharding={devices=[2,1,1]0,1}
ROOT %reshape = f32[38,38,4,81] reshape(%param0.copy),
sharding={devices=[2,1,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[19,38,324]"));
EXPECT_THAT(root, AllOf(op::Reshape(param0), op::Shape("f32[19,38,4,81]")));
}
TEST_P(SpmdPartitioningTest, ReshapePartialHaloExchange) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[4,14,4] parameter(0), sharding={devices=[2,4,2]<=[16]}
ROOT %reshape = f32[2,2,2,7,2,2] reshape(%param0),
sharding={devices=[2,1,4,1,2,1]<=[16]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 16));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto halo_exchange =
AllOf(op::Concatenate(op::Copy(op::Parameter()), op::CollectivePermute(),
op::CollectivePermute(), op::CollectivePermute()));
EXPECT_THAT(
root,
AllOf(op::Reshape(op::DynamicSlice(op::Pad(halo_exchange, _), _, _, _)),
op::Shape("f32[1,2,1,7,1,2]")));
}
TEST_P(SpmdPartitioningTest, ReshapeWithReshard) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[38,38,324] parameter(0), sharding={devices=[2,1,1]0,1}
ROOT %reshape = f32[38,38,4,81] reshape(%param0),
sharding={devices=[1,2,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto input_reshard =
op::Reshape(op::Transpose(op::AllToAll(op::Reshape(op::Parameter(0)))));
EXPECT_THAT(root,
AllOf(op::Reshape(input_reshard), op::Shape("f32[38,19,4,81]")));
}
TEST_P(SpmdPartitioningTest, ReshapeWithReshard2) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[38,38,324] parameter(0), sharding={devices=[2,1,1]0,1}
ROOT %reshape = f32[38,38,2,162] reshape(%param0),
sharding={devices=[1,1,1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto local_reshape =
AllOf(op::Reshape(op::Parameter(0)), op::Shape("f32[19,38,2,162]"));
EXPECT_THAT(root, AllOf(op::Shape("f32[38,38,2,81]"),
op::Reshape(op::Transpose(
op::AllToAll(op::Reshape(local_reshape))))));
}
TEST_P(SpmdPartitioningTest, ReshapeWithReshard3) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %reshape {
p0 = bf16[80,64,2,2,2,2,2] parameter(0), sharding={devices=[16,8,1,1,1,1,1]<=[128]}
ROOT reshape = bf16[5120,4,8] reshape(p0), sharding={devices=[128,1,1]<=[128]}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module, PartitionComputation(hlo_string, 128));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto reshape = AllOf(op::Reshape(op::AllReduce(op::DynamicUpdateSlice(
_, op::Parameter(0), _, _, _, _, _, _, _))),
op::Shape("bf16[320,4,8]"));
EXPECT_THAT(root, AllOf(op::DynamicSlice(reshape, _, _, _),
op::Shape("bf16[40,4,8]")));
}
TEST_P(SpmdPartitioningTest, ReshapeWithReshard4) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %reshape {
p0 = bf16[80,64,8,2,2,2,2] parameter(0), sharding={devices=[16,1,8,1,1,1,1]<=[128]}
ROOT reshape = bf16[5120,16,8] reshape(p0), sharding={devices=[128,1,1]<=[128]}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module, PartitionComputation(hlo_string, 128));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::Reshape(op::Reshape(op::Transpose(op::AllToAll()))),
op::Shape("bf16[40,16,8]")));
}
TEST_P(SpmdPartitioningTest, PartialReplicateShardableReshape) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[38,38,324] parameter(0)
%param0.copy = f32[38,38,324] copy(%param0),
sharding={devices=[2,1,1,2]<=[4] last_tile_dim_replicate}
ROOT %reshape = f32[38,38,4,81] reshape(%param0.copy),
sharding={devices=[2,1,1,1,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[19,38,324]"));
EXPECT_THAT(root, AllOf(op::Reshape(param0), op::Shape("f32[19,38,4,81]")));
}
TEST_P(SpmdPartitioningTest, ReshapeMergeDimsWithHaloExchange) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = s32[2,3,7,10] parameter(0), sharding={devices=[1,1,2,1]0,1}
ROOT %reshape = s32[3,2,1,14,5] reshape(%input),
sharding={devices=[1,1,1,2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto reshape =
AllOf(op::Reshape(op::Parameter(0)), op::Shape("s32[3,2,1,8,5]"));
auto halo = op::CollectivePermute(op::Slice(reshape));
auto exchanged = op::DynamicSlice(op::Concatenate(halo, op::Slice(reshape)),
_, _, _, _, _);
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(exchanged, op::Shape("s32[3,2,1,7,5]")));
}
TEST_P(SpmdPartitioningTest, PartialReplicateReshapeMergeDimsWithHaloExchange) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = s32[2,3,7,10] parameter(0),
sharding={devices=[1,1,2,1,2]<=[4] last_tile_dim_replicate}
ROOT %reshape = s32[3,2,1,14,5] reshape(%input),
sharding={devices=[1,1,1,2,1,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto reshape =
AllOf(op::Reshape(op::Parameter(0)), op::Shape("s32[3,2,1,8,5]"));
auto halo = op::CollectivePermute(op::Slice(reshape));
auto exchanged = op::DynamicSlice(op::Concatenate(halo, op::Slice(reshape)),
_, _, _, _, _);
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(exchanged, op::Shape("s32[3,2,1,7,5]")));
}
TEST_P(SpmdPartitioningTest, TileToPartialReplicateHaloExchangeWithPadding) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[2,123]{1,0} parameter(0), sharding={devices=[8,1]<=[8]}
ROOT %reshape = f32[2,1,123]{2,1,0} reshape(%input),
sharding={devices=[2,1,1,4]<=[8] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto reshape = AllOf(op::Reshape(op::AllReduce(op::Select(
_,
op::Select(_, op::CollectivePermute(op::Parameter()),
op::Parameter()),
_))),
op::Shape("f32[1,1,123]"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, reshape);
}
TEST_P(SpmdPartitioningTest, InceptionV3_4_way_ReduceWindowDilated) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
%param0 = f32[128,5,5,768] parameter(0)
%param0.copy = f32[128,5,5,768] copy(%param0),
sharding={devices=[1,4,1,1]<=[4]}
%constant.1 = f32[] constant(0), sharding={replicated}
ROOT %rw = f32[128,17,17,768] reduce-window(%param0.copy, %constant.1),
window={size=1x5x5x1 pad=0_0x4_4x4_4x0_0 lhs_dilate=1x3x3x1},
to_apply=sum, sharding={devices=[1,4,1,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto input_shard = op::Copy(op::DynamicSlice(
op::Pad(op::Parameter(0), op::Constant()), op::Constant(), op::Reshape(),
op::Constant(), op::Constant()));
auto id_mul4_add1 =
op::Add(op::Multiply(op::Reshape(), op::Constant()), op::Constant());
auto id_mul5 = op::Multiply(op::Reshape(), op::Constant());
auto id_mul5_add1_div3 =
op::Divide(op::Add(id_mul5, op::Constant()), op::Constant());
auto before_masking = AllOf(
op::Shape("f32[128,3,5,768]"),
op::DynamicSlice(
AllOf(
op::Shape("f32[128,4,5,768]"),
op::Concatenate(op::CollectivePermute(input_shard), input_shard)),
op::Constant(),
op::Subtract(op::Constant(),
op::Subtract(id_mul4_add1, id_mul5_add1_div3)),
op::Constant(), op::Constant()));
auto masked = op::Select(
op::And(op::Compare(op::Add(op::Iota(), op::Broadcast(id_mul5_add1_div3)),
op::Broadcast(op::Constant())),
op::Compare(op::Add(op::Iota(), op::Broadcast(id_mul5_add1_div3)),
op::Broadcast(op::Constant()))),
before_masking, op::Broadcast(op::Constant()));
auto rw = AllOf(op::Shape("f32[128,7,17,768]"),
op::ReduceWindow(masked, op::Constant()));
auto final_slice_index = op::Subtract(
id_mul5,
op::Add(op::Multiply(id_mul5_add1_div3, op::Constant()), op::Constant()));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::Shape("f32[128,5,17,768]"),
op::DynamicSlice(rw, op::Constant(), final_slice_index,
op::Constant(), op::Constant())));
}
TEST_P(SpmdPartitioningTest, TiledToTiledReduce) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
%param0 = f32[4,32,32,128] parameter(0)
%param0.copy = f32[4,32,32,128] copy(%param0),
sharding={devices=[1,1,1,2]0,1}
%constant.1 = f32[] constant(0), sharding={replicated}
%reduce = f32[128] reduce(%param0.copy, %constant.1), dimensions={0,1,2},
to_apply=%sum, sharding={devices=[2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[4,32,32,64]"));
EXPECT_THAT(root,
AllOf(op::Reduce(param0, op::Constant()), op::Shape("f32[64]")));
}
TEST_P(SpmdPartitioningTest, PartialTiledToPartialTiledReduce) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
%param0 = f32[4,4] parameter(0),
sharding={devices=[2,2,2]<=[8] last_tile_dim_replicate}
%constant.1 = f32[] constant(0), sharding={replicated}
ROOT %reduce = f32[4] reduce(%param0, %constant.1), dimensions={0},
to_apply=%sum,
sharding={devices=[2,4]0,1,4,5,2,3,6,7 last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::AllReduce(op::Reduce(op::Parameter(0), op::Constant())),
op::Shape("f32[2]")));
}
TEST_P(SpmdPartitioningTest, DeviceMaximalTupleReduce) {
absl::string_view hlo_string = R"(
HloModule module
%minmax_func {
%lhs_value = f32[] parameter(0)
%rhs_value = f32[] parameter(2)
%compare.2 = pred[] compare(%lhs_value, %rhs_value), direction=GT
%select.4 = f32[] select(%compare.2, %lhs_value, %rhs_value)
%lhs_index = s32[] parameter(1)
%rhs_index = s32[] parameter(3)
%select.5 = s32[] select(%compare.2, %lhs_index, %rhs_index)
ROOT %tuple.2 = (f32[], s32[]) tuple(%select.4, %select.5)
}
ENTRY %main {
%param0 = f32[28,10] parameter(0), sharding={maximal device=0}
%param1 = s32[28,10] parameter(1), sharding={maximal device=0}
%init0 = f32[] parameter(2), sharding={maximal device=0}
%init1 = s32[] parameter(3), sharding={maximal device=0}
ROOT %reduce = (f32[28], s32[28]) reduce(%param0, %param1, %init0, %init1),
dimensions={1}, to_apply=%minmax_func,
sharding={{maximal device=0}, {maximal device=0}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Reduce(op::Parameter(0), op::Parameter(1),
op::Parameter(2), op::Parameter(3)),
op::Shape("(f32[28], s32[28])")));
}
TEST_P(SpmdPartitioningTest, TiledToTiledTupleReduce) {
absl::string_view hlo_string = R"(
HloModule module
%minmax_func {
%lhs_value = f32[] parameter(0)
%rhs_value = f32[] parameter(2)
%compare.2 = pred[] compare(%lhs_value, %rhs_value), direction=GT
%select.4 = f32[] select(%compare.2, %lhs_value, %rhs_value)
%lhs_index = s32[] parameter(1)
%rhs_index = s32[] parameter(3)
%select.5 = s32[] select(%compare.2, %lhs_index, %rhs_index)
ROOT %tuple.2 = (f32[], s32[]) tuple(%select.4, %select.5)
}
ENTRY %main {
%param0 = f32[28,10] parameter(0), sharding={devices=[2,1]0,1}
%param1 = s32[28,10] parameter(1), sharding={devices=[2,1]0,1}
%init0 = f32[] parameter(2)
%init1 = s32[] parameter(3)
ROOT %reduce = (f32[28], s32[28]) reduce(%param0, %param1, %init0, %init1),
dimensions={1}, to_apply=%minmax_func,
sharding={{devices=[2]0,1}, {devices=[2]0,1}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Reduce(op::Parameter(0), op::Parameter(1),
op::Parameter(2), op::Parameter(3)),
op::Shape("(f32[14], s32[14])")));
}
TEST_P(SpmdPartitioningTest, TiledToPartiallyTiledTupleReduce) {
absl::string_view hlo_string = R"(
HloModule module
%minmax_func {
%lhs_value = f32[] parameter(0)
%rhs_value = f32[] parameter(2)
%compare.2 = pred[] compare(%lhs_value, %rhs_value), direction=GT
%select.4 = f32[] select(%compare.2, %lhs_value, %rhs_value)
%lhs_index = s32[] parameter(1)
%rhs_index = s32[] parameter(3)
%select.5 = s32[] select(%compare.2, %lhs_index, %rhs_index)
ROOT %tuple.2 = (f32[], s32[]) tuple(%select.4, %select.5)
}
ENTRY %main {
%param0 = f32[28,12] parameter(0), sharding={devices=[2,4]<=[8]}
%param1 = s32[28,12] parameter(1), sharding={devices=[2,4]<=[8]}
%init0 = f32[] parameter(2)
%init1 = s32[] parameter(3)
ROOT %reduce = (f32[28], s32[28]) reduce(%param0, %param1, %init0, %init1),
dimensions={1}, to_apply=%minmax_func,
sharding={{devices=[2,4]<=[8] last_tile_dim_replicate},
{devices=[2,4]<=[8] last_tile_dim_replicate}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[14,3]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("s32[14,3]"), op::Parameter(1));
auto local_reduce =
AllOf(op::Reduce(lhs, rhs, op::Parameter(2), op::Parameter(3)),
op::Shape("(f32[14], s32[14])"));
auto reshape_l = AllOf(op::Reshape(op::GetTupleElement(local_reduce)),
op::Shape("f32[14,1]"));
auto reshape_r = AllOf(op::Reshape(op::GetTupleElement(local_reduce)),
op::Shape("s32[14,1]"));
auto broadcast_l =
AllOf(op::AllReduce(op::DynamicUpdateSlice(_, reshape_l, _, _)),
op::Shape("f32[14,4]"));
auto broadcast_r =
AllOf(op::AllReduce(op::DynamicUpdateSlice(_, reshape_r, _, _)),
op::Shape("s32[14,4]"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Reduce(broadcast_l, broadcast_r, op::Parameter(2),
op::Parameter(3)),
op::Shape("(f32[14], s32[14])")));
}
TEST_P(SpmdPartitioningTest, TupleReduceSubgroupManual) {
absl::string_view hlo_string = R"(
HloModule module
%minmax_func {
%lhs_value = f32[] parameter(0)
%rhs_value = f32[] parameter(2)
%compare.2 = pred[] compare(%lhs_value, %rhs_value), direction=GT
%select.4 = f32[] select(%compare.2, %lhs_value, %rhs_value)
%lhs_index = s32[] parameter(1)
%rhs_index = s32[] parameter(3)
%select.5 = s32[] select(%compare.2, %lhs_index, %rhs_index)
ROOT %tuple.2 = (f32[], s32[]) tuple(%select.4, %select.5)
}
ENTRY %main {
%param0 = f32[28,12] parameter(0),
sharding={devices=[1,2,2]<=[4] last_tile_dims={manual}}
%param1 = s32[28,12] parameter(1),
sharding={devices=[1,2,2]<=[4] last_tile_dims={manual}}
%init0 = f32[] parameter(2),
sharding={devices=[2,2]<=[4] last_tile_dims={replicated,manual}}
%init1 = s32[] parameter(3),
sharding={devices=[2,2]<=[4] last_tile_dims={replicated,manual}}
ROOT %reduce = (f32[28], s32[28]) reduce(%param0, %param1, %init0, %init1),
dimensions={1}, to_apply=%minmax_func,
sharding={{devices=[1,2,2]<=[4] last_tile_dims={replicated,manual}},
{devices=[1,2,2]<=[4] last_tile_dims={replicated,manual}}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[28,6]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("s32[28,6]"), op::Parameter(1));
auto local_reduce =
AllOf(op::Reduce(lhs, rhs, op::Parameter(2), op::Parameter(3)),
op::Shape("(f32[28], s32[28])"));
auto reshape_l = AllOf(op::Reshape(op::GetTupleElement(local_reduce)),
op::Shape("f32[28,1]"));
auto reshape_r = AllOf(op::Reshape(op::GetTupleElement(local_reduce)),
op::Shape("s32[28,1]"));
auto broadcast_l =
AllOf(op::AllReduce(op::DynamicUpdateSlice(_, reshape_l, _, _)),
op::Shape("f32[28,2]"));
auto broadcast_r =
AllOf(op::AllReduce(op::DynamicUpdateSlice(_, reshape_r, _, _)),
op::Shape("s32[28,2]"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Reduce(broadcast_l, broadcast_r, op::Parameter(2),
op::Parameter(3)),
op::Shape("(f32[28], s32[28])")));
}
TEST_P(SpmdPartitioningTest, TiledToTiledReduceOutputReshard) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
%param0 = f32[4,32,32,128] parameter(0)
%param0.copy = f32[4,32,32,128] copy(%param0),
sharding={devices=[1,2,1,1]0,1}
%constant.1 = f32[] constant(0), sharding={replicated}
%reduce = f32[128] reduce(%param0.copy, %constant.1), dimensions={0,1,2},
to_apply=%sum, sharding={devices=[2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[4,16,32,128]"));
EXPECT_THAT(root,
AllOf(op::DynamicSlice(
AllOf(op::AllReduce(op::Reduce(param0, op::Constant())),
op::Shape("f32[128]")),
op::Reshape()),
op::Shape("f32[64]")));
}
TEST_P(SpmdPartitioningTest, IotaAlongNonTileDimension) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
ROOT %iota = s32[16,80,91] iota(), iota_dimension=1,
sharding={devices=[1,1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Iota(), op::Shape("s32[16,80,46]")));
}
TEST_P(SpmdPartitioningTest, IotaAlongTileDimension) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
ROOT %iota = s32[16,80,91] iota(), iota_dimension=2,
sharding={devices=[1,1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Add(op::Iota(), op::Broadcast()),
op::Shape("s32[16,80,46]")));
}
TEST_P(SpmdPartitioningTest, U32IotaAlongTileDimension) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
ROOT %iota = u32[16,80,91] iota(), iota_dimension=2,
sharding={devices=[1,1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Add(op::Iota(), op::Broadcast()),
op::Shape("u32[16,80,46]")));
}
TEST_P(SpmdPartitioningTest, Conditional) {
absl::string_view hlo_string = R"(
HloModule module
Negate {
x = f32[4,5] parameter(0), sharding={devices=[2,1]0,1}
ROOT negate = f32[4,5] negate(x), sharding={devices=[2,1]0,1}
}
Identity {
y = f32[4,5] parameter(0), sharding={devices=[2,1]0,1}
ROOT copy = f32[4,5] copy(y), sharding={devices=[2,1]0,1}
}
ENTRY entry {
%param.0 = pred[] parameter(0)
%param.0.copy = pred[] copy(%param.0), sharding={maximal device=0}
%param.1 = f32[4,5] parameter(1)
%param.1.copy = f32[4,5] copy(%param.1), sharding={replicated}
%param.2 = f32[4,5] parameter(2)
%param.2.copy = f32[4,5] copy(%param.2), sharding={devices=[2,1]0,1}
ROOT cond = f32[4,5] conditional(%param.0.copy, %param.1.copy, %param.2.copy),
true_computation=Negate, false_computation=Identity,
sharding={devices=[2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto param0 = AllOf(op::Copy(op::Copy(op::Parameter()), op::Shape("pred[]")));
auto param1 = AllOf(op::Copy(op::Parameter()), op::Shape("f32[4,5]"));
auto param2 = AllOf(op::Copy(op::DynamicSlice(op::Parameter(), op::Reshape(),
op::Constant())),
op::Shape("f32[2,5]"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Conditional(op::AllReduce(), param1, param2),
op::Shape("f32[2,5]")));
auto then_branch_root = root->branch_computation(0)->root_instruction();
EXPECT_THAT(then_branch_root,
AllOf(op::Negate(op::DynamicSlice(op::Parameter(), op::Reshape(),
op::Constant())),
op::Shape("f32[2,5]")));
auto else_branch_root = root->branch_computation(1)->root_instruction();
EXPECT_THAT(else_branch_root,
AllOf(op::Copy(op::Parameter()), op::Shape("f32[2,5]")));
}
TEST_P(SpmdPartitioningTest, ConditionalManual) {
absl::string_view hlo_string = R"(
HloModule module
Negate {
x = f32[4,5] parameter(0), sharding={manual}
ROOT negate = f32[4,5] negate(x), sharding={manual}
}
Identity {
y = f32[4,5] parameter(0), sharding={manual}
ROOT copy = f32[4,5] copy(y), sharding={manual}
}
ENTRY entry {
%param.0 = pred[] parameter(0), sharding={manual}
%param.1 = f32[4,5] parameter(1), sharding={manual}
%param.2 = f32[4,5] parameter(2), sharding={manual}
ROOT cond = f32[4,5] conditional(%param.0, %param.1, %param.2),
true_computation=Negate, false_computation=Identity, sharding={manual}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto param0 = AllOf(op::Parameter(0), op::Shape("pred[]"));
auto param1 = AllOf(op::Parameter(1), op::Shape("f32[4,5]"));
auto param2 = AllOf(op::Parameter(2), op::Shape("f32[4,5]"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Conditional(param0, param1, param2),
op::Shape("f32[4,5]")));
}
TEST_P(SpmdPartitioningTest, ConditionalPartialManual) {
absl::string_view hlo_string = R"(
HloModule module
Negate {
x = f32[4] parameter(0), sharding={devices=[2,2]<=[4] last_tile_dims={manual}}
ROOT negate = f32[4] negate(x), sharding={devices=[2,2]<=[4] last_tile_dims={manual}}
}
Identity {
y = f32[4] parameter(0), sharding={devices=[2,2]<=[4] last_tile_dims={manual}}
ROOT copy = f32[4] copy(y), sharding={devices=[2,2]<=[4] last_tile_dims={manual}}
}
ENTRY entry {
%param.0 = pred[] parameter(0), sharding={devices=[2,2]<=[4] last_tile_dims={replicated, manual}}
%param.1 = f32[4] parameter(1), sharding={devices=[2,2]<=[4] last_tile_dims={manual}}
%param.2 = f32[4] parameter(2), sharding={devices=[2,2]<=[4] last_tile_dims={manual}}
ROOT cond = f32[4] conditional(%param.0, %param.1, %param.2),
true_computation=Negate, false_computation=Identity, sharding={devices=[2,2]<=[4] last_tile_dims={manual}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto param0 = AllOf(op::Parameter(0), op::Shape("pred[]"));
auto param1 = AllOf(op::Parameter(1), op::Shape("f32[2]"));
auto param2 = AllOf(op::Parameter(2), op::Shape("f32[2]"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Conditional(param0, param1, param2),
op::Shape("f32[2]")));
}
TEST_P(SpmdPartitioningTest, WhileManual) {
absl::string_view hlo_string = R"(
HloModule module
LoopCond {
x = s32[] parameter(0), sharding={manual}
const = s32[] constant(5), sharding={manual}
ROOT lt = pred[] compare(x, const), direction=LT, sharding={manual}
}
Inc {
x = s32[] parameter(0), sharding={manual}
const = s32[] constant(1), sharding={manual}
ROOT add = s32[] add(x, const), sharding={manual}
}
ENTRY entry {
zero = s32[] parameter(0), sharding={manual}
ROOT while = s32[] while(zero), body=Inc, condition=LoopCond,
sharding={manual}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto zero = AllOf(op::Parameter(0), op::Shape("s32[]"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::While(zero), op::Shape("s32[]")));
}
TEST_P(SpmdPartitioningTest, WhilePartialManual) {
absl::string_view hlo_string = R"(
HloModule module
LoopCond {
x = s32[] parameter(0), sharding={devices=[2,2]<=[4] last_tile_dims={manual, replicated}}
const = s32[] constant(5), sharding={devices=[2,2]<=[4] last_tile_dims={manual, replicated}}
ROOT lt = pred[] compare(x, const), direction=LT, sharding={devices=[2,2]<=[4] last_tile_dims={manual, replicated}}
}
Inc {
x = s32[] parameter(0), sharding={devices=[2,2]<=[4] last_tile_dims={manual, replicated}}
const = s32[] constant(1), sharding={devices=[2,2]<=[4] last_tile_dims={manual, replicated}}
ROOT add = s32[] add(x, const), sharding={devices=[2,2]<=[4] last_tile_dims={manual, replicated}}
}
ENTRY entry {
zero = s32[] parameter(0), sharding={devices=[2,2]<=[4] last_tile_dims={manual, replicated}}
ROOT while = s32[] while(zero), body=Inc, condition=LoopCond, sharding={devices=[2,2]<=[4] last_tile_dims={manual, replicated}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto zero = AllOf(op::Parameter(0), op::Shape("s32[]"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::While(zero), op::Shape("s32[]")));
}
TEST_P(SpmdPartitioningTest, TestWhileFrontendAttributes) {
absl::string_view hlo_string = R"(
HloModule module
LoopCond {
x = s32[] parameter(0), sharding={manual}
const = s32[] constant(5), sharding={manual}
ROOT lt = pred[] compare(x, const), direction=LT, sharding={manual}
}
Inc {
x = s32[] parameter(0), sharding={manual}
const = s32[] constant(1), sharding={manual}
ROOT add = s32[] add(x, const), sharding={manual}
}
ENTRY entry {
zero = s32[] parameter(0), sharding={manual}
ROOT while = s32[] while(zero), body=Inc, condition=LoopCond,
sharding={manual}, frontend_attributes={_xla_other_attribute="xyz"}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto zero = AllOf(op::Parameter(0), op::Shape("s32[]"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->frontend_attributes().map().at("_xla_other_attribute"),
"xyz");
EXPECT_THAT(root, AllOf(op::While(zero), op::Shape("s32[]")));
}
TEST_P(SpmdPartitioningTest, SelectAndScatter_RetinaNet) {
absl::string_view hlo_string = R"(
HloModule module
ge {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT compare = pred[] compare(a, b), direction=GE
}
sum {
c = f32[] parameter(0)
d = f32[] parameter(1)
ROOT add = f32[] add(c, d)
}
ENTRY entry {
%param.0 = f32[32,128,384,64] parameter(0)
%param.0.copy = f32[32,128,384,64] copy(%param.0),
sharding={devices=[1,8,1,1]<=[8]}
%param.1 = f32[32,64,192,64] parameter(1)
%param.1.copy = f32[32,64,192,64] copy(%param.1),
sharding={devices=[1,8,1,1]<=[8]}
constant.1 = f32[] constant(0), sharding={replicated}
ROOT select-and-scatter = f32[32,128,384,64] select-and-scatter(param.0.copy,
%param.1.copy, constant.1), window={size=1x1x1x1 stride=1x2x2x1},
select=ge, scatter=sum, sharding={devices=[1,8,1,1]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto source = AllOf(
op::Shape("f32[32,8,192,64]"),
op::Copy(op::DynamicSlice(op::Parameter(1), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())));
auto data = AllOf(
op::Shape("f32[32,16,384,64]"),
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())));
EXPECT_THAT(root, op::SelectAndScatter(data, source, op::Constant()));
EXPECT_EQ(root->window().dimensions(0).padding_low(), 0);
EXPECT_EQ(root->window().dimensions(0).padding_high(), 0);
}
TEST_P(SpmdPartitioningTest, TiledDot) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,64] parameter(0)
%lhs.copy = f32[128,64] copy(%lhs), sharding={devices=[1,2]0,1}
%rhs = f32[64,256] parameter(1)
%rhs.copy = f32[64,256] copy(%rhs), sharding={devices=[2,1]0,1}
ROOT %conv = f32[128,256] convolution(%lhs.copy, %rhs.copy),
dim_labels=bf_io->bf, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 2,
false));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(op::Copy(op::DynamicSlice(
op::Parameter(), op::Constant(), op::Reshape())),
op::Shape("f32[128,32]"));
const auto rhs = AllOf(op::Copy(op::DynamicSlice(
op::Parameter(), op::Reshape(), op::Constant())),
op::Shape("f32[32,256]"));
EXPECT_THAT(root, AllOf(op::AllReduce(op::Convolution(lhs, rhs)),
op::Shape("f32[128,256]")));
}
TEST_P(SpmdPartitioningTest, TiledDotOutputTiled) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,64] parameter(0)
%lhs.copy = f32[128,64] copy(%lhs), sharding={devices=[1,2]0,1}
%rhs = f32[64,256] parameter(1)
%rhs.copy = f32[64,256] copy(%rhs), sharding={devices=[2,1]0,1}
ROOT %conv = f32[128,256] convolution(%lhs.copy, %rhs.copy),
dim_labels=bf_io->bf, sharding={devices=[1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(op::Copy(op::DynamicSlice(
op::Parameter(), op::Constant(), op::Reshape())),
op::Shape("f32[128,32]"));
const auto rhs = AllOf(op::Copy(op::DynamicSlice(
op::Parameter(), op::Reshape(), op::Constant())),
op::Shape("f32[32,256]"));
EXPECT_THAT(root, AllOf(op::DynamicSlice(
AllOf(op::AllReduce(op::Convolution(lhs, rhs)),
op::Shape("f32[128,256]")),
op::Constant(), op::Reshape()),
op::Shape("f32[128,128]")));
}
TEST_P(SpmdPartitioningTest, BatchPartitionedConvolution) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,256,256] parameter(0)
%lhs.copy = f32[128,256,256] copy(%lhs), sharding={devices=[1,2,1]0,1}
%rhs = f32[256,8,1] parameter(1)
%rhs.copy = f32[256,8,1] copy(%rhs), sharding={replicated}
ROOT %conv = f32[128,256,8] convolution(%lhs.copy, %rhs.copy),
window={size=1}, dim_labels=0bf_io0->0bf, sharding={devices=[1,2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("f32[128,128,256]"));
const auto rhs = AllOf(op::Copy(op::Parameter(1)), op::Shape("f32[256,8,1]"));
EXPECT_THAT(root,
AllOf(op::Convolution(lhs, rhs), op::Shape("f32[128,128,8]")));
}
TEST_P(SpmdPartitioningTest, DotOutputFeaturePartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[24,64] parameter(0)
%lhs.copy = f32[24,64] copy(%lhs), sharding={replicated}
%rhs = f32[39296,64] parameter(1)
%rhs.copy = f32[39296,64] copy(%rhs), sharding={devices=[2,1]0,1}
ROOT %dot = f32[24,39296] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={1}, rhs_contracting_dims={1},
sharding={devices=[1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(op::Copy(op::Parameter(0)), op::Shape("f32[24,64]"));
const auto rhs = AllOf(op::Copy(op::DynamicSlice(
op::Parameter(1), op::Reshape(), op::Constant())),
op::Shape("f32[19648,64]"));
EXPECT_THAT(root, AllOf(op::Dot(lhs, rhs), op::Shape("f32[24,19648]")));
}
TEST_P(SpmdPartitioningTest, WindowedEinsumTwoContractingDimsLhsReshard) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%p0 = f32[2048,2,3264]{2,1,0} parameter(0), sharding={devices=[1,1,2]0,1}
%p1 = f32[2,3264,2176]{2,1,0} parameter(1), sharding={devices=[2,1,1]0,1}
ROOT %dot.224 = f32[2048,2176]{1,0} dot(f32[2048,2,3264]{2,1,0} %p0, f32[2,3264,2176]{2,1,0} %p1), lhs_contracting_dims={1,2}, rhs_contracting_dims={0,1}, sharding={devices=[1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 2,
true,
false,
false,
false,
0));
VLOG(1) << module->ToString();
const auto arg0 = AllOf(
op::Reshape(op::Transpose(op::AllToAll(op::Reshape(op::Parameter(0))))),
op::Shape("f32[2048,1,3264]"));
const auto arg1 = AllOf(op::Parameter(1), op::Shape("f32[1,3264,2176]"));
const auto while_op =
AllOf(op::While(op::Tuple(arg0, arg1, op::Broadcast(), op::Broadcast(),
op::Constant())),
op::Shape("(f32[2048,1,3264]{2,1,0}, f32[1,3264,2176]{2,1,0},"
" f32[2048,1088]{1,0}, f32[2048,1088]{1,0}, u32[])"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root, AllOf(op::GetTupleElement(while_op), op::Shape("f32[2048,1088]")));
const auto while_loop = root->operand(0);
const auto next_i =
op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant());
auto lhs = AllOf(op::GetTupleElement(op::Parameter(0)),
op::Shape("f32[2048,1,3264]"));
auto rhs = AllOf(op::DynamicSlice(), op::Shape("f32[1,3264,1088]"));
auto dot_op = op::Dot(lhs, rhs);
auto add_op = op::Add(op::GetTupleElement(op::Parameter(0)), dot_op);
auto cond_op =
op::Conditional(op::Compare(next_i, op::Constant()), add_op, add_op);
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0)), cond_op,
op::GetTupleElement(op::Parameter(0)), next_i));
}
TEST_P(SpmdPartitioningTest, WindowedEinsumTwoContractingDimsRhsReshard) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%p0 = f32[4096,2,3264]{2,1,0} parameter(0), sharding={devices=[1,1,2]0,1}
%p1 = f32[2,3264,2176]{2,1,0} parameter(1), sharding={devices=[2,1,1]0,1}
ROOT %dot.224 = f32[4096,2176]{1,0} dot(f32[4096,2,3264]{2,1,0} %p0, f32[2,3264,2176]{2,1,0} %p1), lhs_contracting_dims={1,2}, rhs_contracting_dims={0,1}, sharding={devices=[1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 2,
true,
false,
false,
false,
0));
VLOG(1) << module->ToString();
const auto arg0 = AllOf(op::Parameter(0), op::Shape("f32[4096,2,1632]"));
const auto arg1 = AllOf(
op::Reshape(op::Transpose(op::AllToAll(op::Reshape(op::Parameter(1))))),
op::Shape("f32[2,1632,2176]"));
const auto while_op =
AllOf(op::While(op::Tuple(arg0, arg1, op::Broadcast(), op::Broadcast(),
op::Constant())),
op::Shape("(f32[4096,2,1632]{2,1,0}, f32[2,1632,2176]{2,1,0},"
" f32[4096,1088]{1,0}, f32[4096,1088]{1,0}, u32[])"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root, AllOf(op::GetTupleElement(while_op), op::Shape("f32[4096,1088]")));
const auto while_loop = root->operand(0);
const auto next_i =
op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant());
auto lhs = AllOf(op::GetTupleElement(op::Parameter(0)),
op::Shape("f32[4096,2,1632]"));
auto rhs = AllOf(op::DynamicSlice(), op::Shape("f32[2,1632,1088]"));
auto dot_op = op::Dot(lhs, rhs);
auto add_op = op::Add(op::GetTupleElement(op::Parameter(0)), dot_op);
auto cond_op =
op::Conditional(op::Compare(next_i, op::Constant()), add_op, add_op);
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0)), cond_op,
op::GetTupleElement(op::Parameter(0)), next_i));
}
TEST_P(SpmdPartitioningTest, ChooseWindowedEinsumOverIncreasedMemUsageOption) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%p0 = bf16[512,4,512]{2,1,0} parameter(0), sharding={devices=[16,1,4]<=[64]}
%p1 = bf16[512,4,512]{2,1,0} parameter(1), sharding={devices=[16,1,4]<=[64]}
%multiply.611 = bf16[512,4,512]{2,1,0} multiply(bf16[512,4,512]{2,1,0} %p0, bf16[512,4,512]{2,1,0} %p1), sharding={devices=[16,1,4]<=[64]}
%p2 = bf16[1,2048,768]{2,1,0} parameter(2), sharding={devices=[1,4,16]<=[16,4]T(1,0)}
%reshape.1074 = bf16[4,512,768]{2,1,0} reshape(bf16[1,2048,768]{2,1,0} %p2), sharding={devices=[4,1,16]<=[16,4]T(1,0)}
ROOT %dot.128 = bf16[512,768]{1,0} dot(bf16[512,4,512]{2,1,0} %multiply.611, bf16[4,512,768]{2,1,0} %reshape.1074), lhs_contracting_dims={1,2}, rhs_contracting_dims={0,1}, sharding={devices=[16,4]<=[64]}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 64,
true,
true,
false,
false,
0));
VLOG(1) << module->ToString();
const auto arg0 = AllOf(op::Reshape(), op::Shape("bf16[32,1,512]{2,1,0}"));
const auto arg1 = AllOf(op::AllReduce(), op::Shape("bf16[1,512,768]{2,1,0}"));
const auto while_op =
AllOf(op::While(op::Tuple(arg0, arg1, op::Broadcast(), op::Broadcast(),
op::Constant())),
op::Shape("(bf16[32,1,512]{2,1,0}, bf16[1,512,768]{2,1,0},"
" bf16[32,192]{1,0}, bf16[32,192]{1,0}, u32[])"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::GetTupleElement(while_op),
op::Shape("bf16[32,192]{1,0}")));
const auto while_loop = root->operand(0);
const auto next_i =
op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant());
auto lhs = AllOf(op::GetTupleElement(op::Parameter(0)),
op::Shape("bf16[32,1,512]{2,1,0}"));
auto rhs = AllOf(op::DynamicSlice(), op::Shape("bf16[1,512,192]{2,1,0}"));
auto dot_op = op::Dot(lhs, rhs);
auto add_op = op::Add(op::GetTupleElement(op::Parameter(0)), dot_op);
auto cond_op =
op::Conditional(op::Compare(next_i, op::Constant()), add_op, add_op);
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0)), cond_op,
op::GetTupleElement(op::Parameter(0)), next_i));
}
TEST_P(SpmdPartitioningTest, WindowedEinsumKeepBatchDimensionsSorted) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
p0 = bf16[64,1025,4096]{2,1,0} parameter(0), sharding={devices=[8,1,1,8]<=[64] last_tile_dim_replicate}
p1 = bf16[1,4096,16384]{2,1,0} parameter(1), sharding={devices=[1,8,8]<=[64]}
reshape.9434 = bf16[64,1025,32,128]{3,2,1,0} reshape(p0), sharding={devices=[8,1,1,1,8]<=[64] last_tile_dim_replicate}
reshape.9438 = bf16[32,128,16384]{2,1,0} reshape(p1), sharding={devices=[8,1,8]<=[64]}
ROOT dot.1104 = bf16[32,64,1025,16384]{3,2,1,0} dot(reshape.9434, reshape.9438), lhs_batch_dims={2}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, sharding={devices=[1,8,1,8]<=[64]}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 64,
true,
true,
true,
true,
0));
VLOG(1) << module->ToString();
TF_ASSERT_OK(HloVerifier(false,
false)
.Run(module.get())
.status());
const HloInstruction* while_inst =
module->entry_computation()->root_instruction()->operand(0);
for (HloInstruction* inst : while_inst->while_body()->instructions()) {
if (inst->opcode() == HloOpcode::kDot) {
auto lhs_batch_dims =
inst->dot_dimension_numbers().lhs_batch_dimensions();
CHECK_EQ(lhs_batch_dims.size(), 2);
CHECK_EQ(lhs_batch_dims[0], 2);
CHECK_EQ(lhs_batch_dims[1], 3);
auto rhs_batch_dims =
inst->dot_dimension_numbers().rhs_batch_dimensions();
CHECK_EQ(rhs_batch_dims.size(), 2);
CHECK_EQ(rhs_batch_dims[0], 0);
CHECK_EQ(rhs_batch_dims[1], 1);
}
}
}
TEST_P(SpmdPartitioningTest, DotPartialDeviceOrder) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,256,4096] parameter(0), sharding={devices=[1,1,2,2]1,3,0,2 last_tile_dim_replicate}
%rhs = f32[4096,2048] parameter(1), sharding={devices=[2,2]3,1,2,0}
ROOT %dot = f32[16,256,2048] dot(%lhs, %rhs),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={2}, rhs_contracting_dims={0},
sharding={devices=[1,1,2,2]2,3,0,1 last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(op::Parameter(0), op::Shape("f32[16,256,2048]"));
const auto rhs = AllOf(op::Parameter(1), op::Shape("f32[2048,1024]"));
EXPECT_THAT(root, AllOf(op::AllReduce(op::Dot(lhs, rhs)),
op::Shape("f32[16,256,1024]")));
}
TEST_P(SpmdPartitioningTest, EinsumBatchPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64] parameter(0)
%lhs.copy = f32[32,24,64] copy(%lhs), sharding={devices=[2,1,1]0,1}
%rhs = f32[32,39296,64] parameter(1)
%rhs.copy = f32[32,39296,64] copy(%rhs), sharding={devices=[2,1,1]0,1}
ROOT %dot = f32[32,24,39296] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2},
sharding={devices=[2,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(0), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[16,24,64]"));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(1), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[16,39296,64]"));
EXPECT_THAT(root, AllOf(op::Dot(lhs, rhs), op::Shape("f32[16,24,39296]")));
}
TEST_P(SpmdPartitioningTest, EinsumLHSandOutputBatchPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64] parameter(0)
%lhs.copy = f32[32,24,64] copy(%lhs), sharding={devices=[2,1,1]0,1}
%rhs = f32[32,39296,64] parameter(1)
%rhs.copy = f32[32,39296,64] copy(%rhs), sharding={replicated}
ROOT %dot = f32[32,24,39296] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2},
sharding={devices=[2,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(0), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[16,24,64]"));
const auto rhs =
AllOf(op::Copy(op::Parameter(1)), op::Shape("f32[32,39296,64]"));
EXPECT_THAT(root, AllOf(op::Dot(lhs, op::DynamicSlice(rhs, op::Reshape(),
op::Constant(),
op::Constant())),
op::Shape("f32[16,24,39296]")));
}
TEST_P(SpmdPartitioningTest, EinsumRHSandOutputBatchPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64] parameter(0)
%lhs.copy = f32[32,24,64] copy(%lhs), sharding={devices=[1,2,1]0,1}
%rhs = f32[32,39296,64] parameter(1)
%rhs.copy = f32[32,39296,64] copy(%rhs), sharding={devices=[2,1,1]0,1}
ROOT %dot = f32[32,24,39296] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2},
sharding={devices=[2,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("f32[32,12,64]"));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(1), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[16,39296,64]"));
const auto lhs_reshard =
op::Reshape(op::Transpose(op::AllToAll(op::Reshape(lhs))));
EXPECT_THAT(root,
AllOf(op::Dot(lhs_reshard, rhs), op::Shape("f32[16,24,39296]")));
}
TEST_P(SpmdPartitioningTest, EinsumOutputBatchPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64] parameter(0)
%lhs.copy = f32[32,24,64] copy(%lhs), sharding={replicated}
%rhs = f32[32,39296,64] parameter(1)
%rhs.copy = f32[32,39296,64] copy(%rhs), sharding={replicated}
ROOT %dot = f32[32,24,39296] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2},
sharding={devices=[2,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs_slice =
AllOf(op::DynamicSlice(op::Copy(op::Parameter(0)), op::Reshape(),
op::Constant(), op::Constant()),
op::Shape("f32[16,24,64]"));
const auto rhs_slice =
AllOf(op::DynamicSlice(op::Copy(op::Parameter(1)), op::Reshape(),
op::Constant(), op::Constant()),
op::Shape("f32[16,39296,64]"));
EXPECT_THAT(root, AllOf(op::Dot(lhs_slice, rhs_slice),
op::Shape("f32[16,24,39296]")));
}
TEST_P(SpmdPartitioningTest, EinsumContractingDimsPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,1,2,2]<=[4]}
%rhs = f32[32,39296,64,128] parameter(1)
%rhs.copy = f32[32,39296,64,128] copy(%rhs), sharding={devices=[1,1,2,2]<=[4]}
ROOT %dot = f32[32,24,39296] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(),
op::Constant(), op::Reshape(), op::Reshape())),
op::Shape("f32[32,24,32,64]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(1), op::Constant(),
op::Constant(), op::Reshape(), op::Reshape())),
op::Shape("f32[32,39296,32,64]"));
EXPECT_THAT(root, AllOf(op::AllReduce(op::AllReduce(op::Dot(lhs, rhs))),
op::Shape("f32[32,24,39296]")));
}
TEST_P(SpmdPartitioningTest,
EinsumContractingDimsPartitionedResultPartiallySliced) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,64] parameter(0), sharding={devices=[1,4]<=[4]}
%rhs = f32[64,128] parameter(1), sharding={devices=[4,1]<=[4]}
ROOT %dot = f32[32,128] dot(%lhs, %rhs),
lhs_contracting_dims={1}, rhs_contracting_dims={0},
sharding={devices=[2,1,2]0,2,1,3 last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(op::Parameter(0), op::Shape("f32[32,16]"));
const auto rhs = AllOf(op::Parameter(1), op::Shape("f32[16,128]"));
EXPECT_THAT(root, AllOf(op::AllReduce(op::DynamicSlice(
op::AllReduce(op::Dot(lhs, rhs)), _, _)),
op::Shape("f32[16,128]")));
}
TEST_P(SpmdPartitioningTest, EinsumLHSNonContractingDimsPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,2,1,2]<=[4]}
%rhs = f32[32,39296,64] parameter(1)
%rhs.copy = f32[32,39296,64] copy(%rhs), sharding={replicated}
ROOT %dot = f32[32,24,128,39296] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2},
sharding={devices=[1,2,2,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(), op::Reshape(),
op::Constant(), op::Reshape())),
op::Shape("f32[32,12,64,64]"));
const auto rhs =
AllOf(op::Copy(op::Parameter(1)), op::Shape("f32[32,39296,64]"));
EXPECT_THAT(root, AllOf(op::Dot(lhs, rhs), op::Shape("f32[32,12,64,39296]")));
}
TEST_P(SpmdPartitioningTest, EinsumRHSNonContractingDimsPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64] parameter(0)
%lhs.copy = f32[32,24,64] copy(%lhs), sharding={replicated}
%rhs = f32[32,39296,64,128] parameter(1)
%rhs.copy = f32[32,39296,64,128] copy(%rhs), sharding={devices=[1,2,1,2]<=[4]}
ROOT %dot = f32[32,24,39296,128] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2},
sharding={devices=[1,1,2,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs =
AllOf(op::Copy(op::Parameter(0)), op::Shape("f32[32,24,64]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(1), op::Constant(), op::Reshape(),
op::Constant(), op::Reshape())),
op::Shape("f32[32,19648,64,64]"));
EXPECT_THAT(root, AllOf(op::Dot(lhs, rhs), op::Shape("f32[32,24,19648,64]")));
}
TEST_P(SpmdPartitioningTest, EinsumOutputLHSNonContractingDimPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={replicated}
%rhs = f32[32,39296,64,128] parameter(1)
%rhs.copy = f32[32,39296,64,128] copy(%rhs), sharding={replicated}
ROOT %dot = f32[32,24,39296] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs =
AllOf(op::Copy(op::Parameter(0)), op::Shape("f32[32,24,64,128]"));
const auto rhs =
AllOf(op::Copy(op::Parameter(1)), op::Shape("f32[32,39296,64,128]"));
EXPECT_THAT(
root,
AllOf(op::Dot(AllOf(op::DynamicSlice(lhs, op::Constant(), op::Reshape(),
op::Constant(), op::Constant()),
op::Shape("f32[32,12,64,128]")),
rhs),
op::Shape("f32[32,12,39296]")));
}
TEST_P(SpmdPartitioningTest, EinsumOutputRHSNonContractingDimPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={replicated}
%rhs = f32[32,39296,64,128] parameter(1)
%rhs.copy = f32[32,39296,64,128] copy(%rhs), sharding={replicated}
ROOT %dot = f32[32,24,39296] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs =
AllOf(op::Copy(op::Parameter(0)), op::Shape("f32[32,24,64,128]"));
const auto rhs =
AllOf(op::Copy(op::Parameter(1)), op::Shape("f32[32,39296,64,128]"));
EXPECT_THAT(root,
AllOf(op::Dot(lhs, AllOf(op::DynamicSlice(
rhs, op::Constant(), op::Reshape(),
op::Constant(), op::Constant()),
op::Shape("f32[32,19648,64,128]"))),
op::Shape("f32[32,24,19648]")));
}
TEST_P(SpmdPartitioningTest,
EinsumRHSWindowedInContractingOutNonContractingPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[320,25,64,128] parameter(0)
%lhs.copy = f32[320,25,64,128] copy(%lhs), sharding={devices=[1,1,4,1]<=[4]}
%rhs = f32[320,39296,64,128] parameter(1)
%rhs.copy = f32[320,39296,64,128] copy(%rhs),
sharding={devices=[1,1,4,1]<=[4]}
ROOT %dot = f32[320,25,39296] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,4,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(),
op::Constant(), op::Reshape(), op::Constant())),
op::Shape("f32[320,25,16,128]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(1), op::Constant(),
op::Constant(), op::Reshape(), op::Constant())),
op::Shape("f32[320,39296,16,128]"));
EXPECT_THAT(
root,
AllOf(op::GetTupleElement(op::While(op::Tuple(
lhs, rhs, op::Broadcast(), op::Broadcast(), op::Constant()))),
op::Shape("f32[320,7,39296]")));
const auto while_loop = root->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant());
auto ds =
AllOf(op::DynamicSlice(
op::Pad(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant(), op::Reshape(), op::Constant(), op::Constant()),
op::Shape("f32[320,7,16,128]"));
auto partial_output =
AllOf(op::Add(op::GetTupleElement(op::Parameter(0)),
op::Dot(ds, op::GetTupleElement(op::Parameter(0)))),
op::Shape("f32[320,7,39296]"));
auto window = op::Conditional(op::Compare(next_i, op::Constant()),
partial_output, partial_output);
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0)), window,
op::GetTupleElement(op::Parameter(0)), next_i));
auto cp_conditional =
while_loop->while_body()->root_instruction()->operand(2);
EXPECT_THAT(cp_conditional->true_computation()->root_instruction(),
op::CollectivePermute(op::Parameter(0)));
EXPECT_THAT(cp_conditional->false_computation()->root_instruction(),
op::Parameter(0));
}
TEST_P(SpmdPartitioningTest,
UnrolledEinsumRHSWindowedInContractingOutNonContractingPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[320,25,64,128] parameter(0)
%lhs.copy = f32[320,25,64,128] copy(%lhs), sharding={devices=[1,1,4,1]<=[4]}
%rhs = f32[320,39296,64,128] parameter(1)
%rhs.copy = f32[320,39296,64,128] copy(%rhs),
sharding={devices=[1,1,4,1]<=[4]}
ROOT %dot = f32[320,25,39296] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,4,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 4,
true,
false,
true));
VLOG(1) << module->ToString();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(),
op::Constant(), op::Reshape(), op::Constant())),
op::Shape("f32[320,25,16,128]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(1), op::Constant(),
op::Constant(), op::Reshape(), op::Constant())),
op::Shape("f32[320,39296,16,128]"));
const auto while_op = AllOf(
op::While(op::Tuple(lhs, rhs, op::Broadcast(), op::Broadcast(),
op::Constant())),
op::Shape("(f32[320,25,16,128], f32[320,39296,16,128], f32[320,7,39296],"
" f32[320,7,39296], u32[])"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root, AllOf(op::Add(op::CollectivePermute(op::GetTupleElement(while_op)),
op::GetTupleElement(while_op)),
op::Shape("f32[320,7,39296]")));
const auto while_loop = root->operand(1)->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant());
auto ds =
AllOf(op::DynamicSlice(
op::Pad(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant(), op::Reshape(), op::Constant(), op::Constant()),
op::Shape("f32[320,7,16,128]"));
auto partial_output = AllOf(
op::Add(op::CollectivePermute(op::GetTupleElement(op::Parameter(0))),
op::Dot(ds, op::GetTupleElement(op::Parameter(0)))),
op::Shape("f32[320,7,39296]"));
auto partial_output2 =
AllOf(op::CollectivePermute(
op::Add(op::GetTupleElement(op::Parameter(0)),
op::Dot(ds, op::GetTupleElement(op::Parameter(0))))),
op::Shape("f32[320,7,39296]"));
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0)), partial_output,
partial_output2, next_i));
}
TEST_P(
SpmdPartitioningTest,
BidirectionalEinsumRHSWindowedInContractingOutNonContractingPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[320,25,64,128] parameter(0)
%lhs.copy = f32[320,25,64,128] copy(%lhs), sharding={devices=[1,1,4,1]<=[4]}
%rhs = f32[320,39296,64,128] parameter(1)
%rhs.copy = f32[320,39296,64,128] copy(%rhs),
sharding={devices=[1,1,4,1]<=[4]}
ROOT %dot = f32[320,25,39296] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,4,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 4,
true,
false,
false,
true));
VLOG(1) << module->ToString();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(),
op::Constant(), op::Reshape(), op::Constant())),
op::Shape("f32[320,25,16,128]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(1), op::Constant(),
op::Constant(), op::Reshape(), op::Constant())),
op::Shape("f32[320,39296,16,128]"));
const auto while_op = AllOf(
op::While(op::Tuple(lhs, rhs, op::Broadcast(), op::Broadcast(),
op::Constant())),
op::Shape("(f32[320,25,16,128], f32[320,39296,16,128], f32[320,7,39296],"
" f32[320,7,39296], u32[])"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root, AllOf(op::Add(op::GetTupleElement(while_op),
op::CollectivePermute(op::GetTupleElement(while_op))),
op::Shape("f32[320,7,39296]")));
const auto while_loop = root->operand(0)->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant());
const auto partial_dot_pattern =
AllOf(op::Reshape(op::Slice(
op::Dot(op::Maximum(), op::GetTupleElement(op::Parameter(0))))),
op::Shape("f32[320,7,39296]"));
const auto partial_output_pattern = AllOf(
op::Add(op::CollectivePermute(op::Add(
op::CollectivePermute(op::GetTupleElement(op::Parameter(0))),
partial_dot_pattern)),
partial_dot_pattern),
op::Shape("f32[320,7,39296]"));
EXPECT_THAT(
while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0)), partial_output_pattern,
partial_output_pattern, next_i));
}
TEST_P(SpmdPartitioningTest,
EinsumRHSWindowedInContractingOutNonContractingFromBroadcast) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%constant.1 = f32[] constant(2)
%broadcast = f32[32,25,64,128] broadcast(%constant.1), dimensions={},
sharding={devices=[1,1,4,1]<=[4]}
%add = f32[32,25,64,128] add(%broadcast, %broadcast),
sharding={devices=[1,1,4,1]<=[4]}
%rhs = f32[32,39296,64,128] parameter(0)
%rhs.copy = f32[32,39296,64,128] copy(%rhs),
sharding={devices=[1,1,4,1]<=[4]}
ROOT %dot = f32[32,25,39296] dot(%add, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,4,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, PartitionComputation(hlo_string,
4));
VLOG(1) << module->ToString();
}
TEST_P(SpmdPartitioningTest,
EinsumLHSWindowedInContractingOutNonContractingPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,1024,16384] parameter(0)
%lhs.copy = f32[16,1024,16384] copy(%lhs), sharding={devices=[2,1,4]<=[8]}
%rhs = f32[16384,67,128] parameter(1)
%rhs.copy = f32[16384,67,128] copy(%rhs),
sharding={devices=[4,1,1,2]<=[2,4]T(1,0) last_tile_dim_replicate}
ROOT %dot = f32[16,1024,67,128] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={2}, rhs_contracting_dims={0},
sharding={devices=[2,1,4,1]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(0), op::Reshape(),
op::Constant(), op::Reshape())),
op::Shape("f32[8,1024,4096]"));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(1), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[4096,67,128]"));
EXPECT_THAT(
root,
AllOf(op::GetTupleElement(op::While(op::Tuple(
lhs, rhs, op::Broadcast(), op::Broadcast(), op::Constant()))),
op::Shape("f32[8,1024,17,128]")));
const auto while_loop = root->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant());
auto ds =
AllOf(op::DynamicSlice(
op::Pad(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant(), op::Reshape(), op::Constant()),
op::Shape("f32[4096,17,128]"));
auto partial_output =
AllOf(op::Add(op::GetTupleElement(op::Parameter(0)),
op::Dot(op::GetTupleElement(op::Parameter(0)), ds)),
op::Shape("f32[8,1024,17,128]"));
auto window = op::Conditional(op::Compare(next_i, op::Constant()),
partial_output, partial_output);
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0)), window,
op::GetTupleElement(op::Parameter(0)), next_i));
auto cp_conditional =
while_loop->while_body()->root_instruction()->operand(2);
EXPECT_THAT(cp_conditional->true_computation()->root_instruction(),
op::CollectivePermute(op::Parameter(0)));
EXPECT_THAT(cp_conditional->false_computation()->root_instruction(),
op::Parameter(0));
}
TEST_P(SpmdPartitioningTest,
UnrollEinsumLHSWindowedInContractingOutNonContractingPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,1024,16384] parameter(0)
%lhs.copy = f32[16,1024,16384] copy(%lhs), sharding={devices=[2,1,4]<=[8]}
%rhs = f32[16384,67,128] parameter(1)
%rhs.copy = f32[16384,67,128] copy(%rhs),
sharding={devices=[4,1,1,2]<=[2,4]T(1,0) last_tile_dim_replicate}
ROOT %dot = f32[16,1024,67,128] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={2}, rhs_contracting_dims={0},
sharding={devices=[2,1,4,1]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 8,
true,
false,
true));
VLOG(1) << module->ToString();
const auto lhs =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(0), op::Reshape(),
op::Constant(), op::Reshape())),
op::Shape("f32[8,1024,4096]"));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(1), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[4096,67,128]"));
const auto while_op =
AllOf(op::While(op::Tuple(lhs, rhs, op::Broadcast(), op::Broadcast(),
op::Constant())),
op::Shape("(f32[8,1024,4096], f32[4096,67,128], f32[8,1024,17,128],"
" f32[8,1024,17,128], u32[])"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root, AllOf(op::Add(op::CollectivePermute(op::GetTupleElement(while_op)),
op::GetTupleElement(while_op)),
op::Shape("f32[8,1024,17,128]")));
const auto while_loop = root->operand(1)->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant());
auto ds =
AllOf(op::DynamicSlice(
op::Pad(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant(), op::Reshape(), op::Constant()),
op::Shape("f32[4096,17,128]"));
auto partial_output = AllOf(
op::Add(op::CollectivePermute(op::GetTupleElement(op::Parameter(0))),
op::Dot(op::GetTupleElement(op::Parameter(0)), ds)),
op::Shape("f32[8,1024,17,128]"));
auto partial_output2 =
AllOf(op::CollectivePermute(
op::Add(op::GetTupleElement(op::Parameter(0)),
op::Dot(op::GetTupleElement(op::Parameter(0)), ds))),
op::Shape("f32[8,1024,17,128]"));
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0)), partial_output,
partial_output2, next_i));
}
TEST_P(
SpmdPartitioningTest,
BidirectionalEinsumLHSWindowedInContractingOutNonContractingPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,1024,16384] parameter(0)
%lhs.copy = f32[16,1024,16384] copy(%lhs), sharding={devices=[2,1,4]<=[8]}
%rhs = f32[16384,67,128] parameter(1)
%rhs.copy = f32[16384,67,128] copy(%rhs),
sharding={devices=[4,1,1,2]<=[2,4]T(1,0) last_tile_dim_replicate}
ROOT %dot = f32[16,1024,67,128] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={2}, rhs_contracting_dims={0},
sharding={devices=[2,1,4,1]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 8,
true,
false,
false,
true));
VLOG(1) << module->ToString();
const auto lhs =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(0), op::Reshape(),
op::Constant(), op::Reshape())),
op::Shape("f32[8,1024,4096]"));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(1), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[4096,67,128]"));
const auto while_op =
AllOf(op::While(op::Tuple(lhs, rhs, op::Broadcast(), op::Broadcast(),
op::Constant())),
op::Shape("(f32[8,1024,4096], f32[4096,67,128], f32[8,1024,17,128],"
" f32[8,1024,17,128], u32[])"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root, AllOf(op::Add(op::GetTupleElement(while_op),
op::CollectivePermute(op::GetTupleElement(while_op))),
op::Shape("f32[8,1024,17,128]")));
const auto while_loop = root->operand(0)->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant());
const auto partial_dot_pattern =
AllOf(op::Reshape(op::Slice(
op::Dot(op::GetTupleElement(op::Parameter(0)), op::Maximum()))),
op::Shape("f32[8,1024,17,128]"));
const auto partial_output_pattern = AllOf(
op::Add(op::CollectivePermute(op::Add(
op::CollectivePermute(op::GetTupleElement(op::Parameter(0))),
partial_dot_pattern)),
partial_dot_pattern),
op::Shape("f32[8,1024,17,128]"));
EXPECT_THAT(
while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0)), partial_output_pattern,
partial_output_pattern, next_i));
}
TEST_P(SpmdPartitioningTest,
EinsumLHSWindowedInContractingOutNonContractingPartitioned2) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,1024,16384] parameter(0)
%lhs.copy = f32[16,1024,16384] copy(%lhs), sharding={devices=[2,1,4]<=[8]}
%rhs = f32[16384,2,33,128] parameter(1)
%rhs.copy = f32[16384,2,33,128] copy(%rhs),
sharding={devices=[4,1,1,1,2]<=[2,4]T(1,0) last_tile_dim_replicate}
ROOT %dot = f32[16,1024,2,33,128] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={2}, rhs_contracting_dims={0},
sharding={devices=[2,1,2,2,1]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(0), op::Reshape(),
op::Constant(), op::Reshape())),
op::Shape("f32[8,1024,4096]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(1), op::Reshape(), op::Constant(),
op::Constant(), op::Constant())),
op::Shape("f32[4096,2,33,128]"));
EXPECT_THAT(
root,
AllOf(op::GetTupleElement(op::While(op::Tuple(
lhs, rhs, op::Broadcast(), op::Broadcast(), op::Constant()))),
op::Shape("f32[8,1024,1,17,128]")));
const auto while_loop = root->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant());
auto ds =
AllOf(op::DynamicSlice(
op::Pad(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant(), op::Reshape(), op::Reshape(), op::Constant()),
op::Shape("f32[4096,1,17,128]"));
auto partial_output =
AllOf(op::Add(op::GetTupleElement(op::Parameter(0)),
op::Dot(op::GetTupleElement(op::Parameter(0)), ds)),
op::Shape("f32[8,1024,1,17,128]"));
auto window = op::Conditional(op::Compare(next_i, op::Constant()),
partial_output, partial_output);
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0)), window,
op::GetTupleElement(op::Parameter(0)), next_i));
auto cp_conditional =
while_loop->while_body()->root_instruction()->operand(2);
EXPECT_THAT(cp_conditional->true_computation()->root_instruction(),
op::CollectivePermute(op::Parameter(0)));
EXPECT_THAT(cp_conditional->false_computation()->root_instruction(),
op::Parameter(0));
}
TEST_P(SpmdPartitioningTest, EinsumRHSWindowedNonContractingNoDoubleAG) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%lhs2 = f32[32,24,64,128] parameter(2)
%lhs2.copy = f32[32,24,64,128] copy(%lhs2), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[32,39295,64,128] parameter(1)
%rhs.copy = f32[32,39295,64,128] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
%dot = f32[32,24,39295] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,2,1]0,1}
%dot2 = f32[32,24,39295] dot(%lhs2.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,2,1]0,1}
ROOT %t = tuple(%dot, %dot2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, PartitionComputation(hlo_string,
2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto tuple_element = op::AllReduce(op::DynamicUpdateSlice(
_, op::Dot(_, op::AllReduce(op::DynamicUpdateSlice())), _, _, _));
EXPECT_THAT(root, op::Tuple(tuple_element, tuple_element));
}
TEST_P(SpmdPartitioningTest, EinsumRHSWindowedNonContractingNoSharedSharding) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%lhs2 = f32[32,24,64,128] parameter(2)
%lhs2.copy = f32[32,24,64,128] copy(%lhs2), sharding={devices=[1,1,2,1]0,1}
%rhs = f32[32,39295,64,128] parameter(1)
%rhs.copy = f32[32,39295,64,128] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
%dot = f32[32,24,39295] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,2,1]0,1}
%dot2 = f32[32,24,39295] dot(%lhs2.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[2,1,1]0,1}
ROOT %t = tuple(%dot, %dot2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, PartitionComputation(hlo_string,
2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
op::Tuple(op::AllReduce(op::DynamicUpdateSlice(
_, op::Slice(op::GetTupleElement(op::While(_))), _, _, _)),
op::AllReduce(op::DynamicUpdateSlice(
_, op::Dot(_, op::Slice(_)), _, _, _))));
}
TEST_P(SpmdPartitioningTest,
UnrollEinsumRHSWindowedNonContractingNoSharedSharding) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%lhs2 = f32[32,24,64,128] parameter(2)
%lhs2.copy = f32[32,24,64,128] copy(%lhs2), sharding={devices=[1,1,2,1]0,1}
%rhs = f32[32,39295,64,128] parameter(1)
%rhs.copy = f32[32,39295,64,128] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
%dot = f32[32,24,39295] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,2,1]0,1}
%dot2 = f32[32,24,39295] dot(%lhs2.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[2,1,1]0,1}
ROOT %t = tuple(%dot, %dot2)
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 2,
true,
false,
true));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
op::Tuple(op::AllReduce(op::DynamicUpdateSlice(
_, op::Slice(op::GetTupleElement(op::While(_))), _, _, _)),
op::AllReduce(op::DynamicUpdateSlice(
_, op::Dot(_, op::Slice(_)), _, _, _))));
const auto while_loop =
root->operand(0)->operand(0)->operand(1)->operand(0)->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant());
auto intermediate_output = AllOf(
op::DynamicUpdateSlice(op::GetTupleElement(op::Parameter(0)),
op::Dot(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0))),
op::Constant(), op::Constant(), op::Reshape()),
op::Shape("f32[32,12,39296]"));
auto output = AllOf(
op::DynamicUpdateSlice(
intermediate_output,
op::Dot(op::GetTupleElement(op::Parameter(0)),
op::CollectivePermute(op::GetTupleElement(op::Parameter(0)))),
op::Constant(), op::Constant(), op::Reshape()),
op::Shape("f32[32,12,39296]"));
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
output, op::GetTupleElement(op::Parameter(0)), next_i));
}
TEST_P(SpmdPartitioningTest,
BidirectionalEinsumRHSWindowedNonContractingNoSharedSharding) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,4,1,1]<=[4]}
%lhs2 = f32[32,24,64,128] parameter(2)
%lhs2.copy = f32[32,24,64,128] copy(%lhs2), sharding={devices=[1,1,4,1]<=[4]}
%rhs = f32[32,39295,64,128] parameter(1)
%rhs.copy = f32[32,39295,64,128] copy(%rhs), sharding={devices=[1,4,1,1]<=[4]}
%dot = f32[32,24,39295] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,4,1]<=[4]}
%dot2 = f32[32,24,39295] dot(%lhs2.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[4,1,1]<=[4]}
ROOT %t = tuple(%dot, %dot2)
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 4,
true,
false,
false,
true));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
op::Tuple(op::AllReduce(op::DynamicUpdateSlice(
_, op::Slice(op::GetTupleElement(op::While(_))), _, _, _)),
op::AllReduce(op::DynamicUpdateSlice(
_, op::Dot(_, op::Slice(_)), _, _, _))));
const auto while_loop =
root->operand(0)->operand(0)->operand(1)->operand(0)->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant());
const auto partial_dot_pattern =
AllOf(op::Reshape(op::Slice(op::Dot(op::GetTupleElement(op::Parameter(0)),
op::Concatenate()))),
op::Shape("f32[32,6,9824]"));
auto intermediate_output1 =
AllOf(op::DynamicUpdateSlice(op::GetTupleElement(op::Parameter(0)),
partial_dot_pattern, op::Constant(),
op::Constant(), op::Reshape()),
op::Shape("f32[32,6,39296]"));
auto intermediate_output2 = AllOf(
op::DynamicUpdateSlice(intermediate_output1, partial_dot_pattern,
op::Constant(), op::Constant(), op::Reshape()),
op::Shape("f32[32,6,39296]"));
auto intermediate_output3 = AllOf(
op::DynamicUpdateSlice(intermediate_output2, partial_dot_pattern,
op::Constant(), op::Constant(), op::Reshape()),
op::Shape("f32[32,6,39296]"));
auto partial_output = AllOf(
op::DynamicUpdateSlice(intermediate_output3, partial_dot_pattern,
op::Constant(), op::Constant(), op::Reshape()),
op::Shape("f32[32,6,39296]"));
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
partial_output,
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
next_i));
}
TEST_P(SpmdPartitioningTest, EinsumRHSWindowedNonContracting) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[32,39295,64,128] parameter(1)
%rhs.copy = f32[32,39295,64,128] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
ROOT %dot = f32[32,24,39295] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, PartitionComputation(hlo_string,
2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,12,64,128]"));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(1), op::Constant()),
op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,19648,64,128]"));
EXPECT_THAT(root,
AllOf(op::Slice(AllOf(op::GetTupleElement(op::While(op::Tuple(
lhs, rhs, op::Broadcast(),
op::Broadcast(), op::Constant()))),
op::Shape("f32[32,12,39296]"))),
op::Shape("f32[32,12,39295]")));
const auto while_loop = root->operand(0)->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant());
auto window = op::Conditional(op::Compare(next_i, op::Constant()),
op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0)));
auto partial_output = op::Dot(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0)));
EXPECT_THAT(
while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)), window,
op::DynamicUpdateSlice(op::GetTupleElement(op::Parameter(0)),
partial_output, op::Constant(),
op::Constant(), op::Reshape()),
op::GetTupleElement(op::Parameter(0)), next_i));
auto cp_conditional =
while_loop->while_body()->root_instruction()->operand(1);
EXPECT_THAT(cp_conditional->true_computation()->root_instruction(),
op::CollectivePermute(op::Parameter(0)));
EXPECT_THAT(cp_conditional->false_computation()->root_instruction(),
op::Parameter(0));
}
TEST_P(SpmdPartitioningTest, UnrollEinsumRHSWindowedNonContracting) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[32,39295,64,128] parameter(1)
%rhs.copy = f32[32,39295,64,128] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
ROOT %dot = f32[32,24,39295] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 2,
true,
false,
true));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,12,64,128]"));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(1), op::Constant()),
op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,19648,64,128]"));
EXPECT_THAT(root,
AllOf(op::Slice(AllOf(op::GetTupleElement(op::While(op::Tuple(
lhs, rhs, op::Broadcast(),
op::Broadcast(), op::Constant()))),
op::Shape("f32[32,12,39296]"))),
op::Shape("f32[32,12,39295]")));
const auto while_loop = root->operand(0)->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant());
auto intermediate_output = AllOf(
op::DynamicUpdateSlice(op::GetTupleElement(op::Parameter(0)),
op::Dot(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0))),
op::Constant(), op::Constant(), op::Reshape()),
op::Shape("f32[32,12,39296]"));
auto output = AllOf(
op::DynamicUpdateSlice(
intermediate_output,
op::Dot(op::GetTupleElement(op::Parameter(0)),
op::CollectivePermute(op::GetTupleElement(op::Parameter(0)))),
op::Constant(), op::Constant(), op::Reshape()),
op::Shape("f32[32,12,39296]"));
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
output, op::GetTupleElement(op::Parameter(0)), next_i));
}
TEST_P(SpmdPartitioningTest, BidirectionalEinsumRHSWindowedNonContracting) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,4,1,1]<=[4]}
%rhs = f32[32,39295,64,128] parameter(1)
%rhs.copy = f32[32,39295,64,128] copy(%rhs), sharding={devices=[1,4,1,1]<=[4]}
ROOT %dot = f32[32,24,39295] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,4,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 4,
true,
false,
false,
true));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,6,64,128]"));
const auto rhs =
AllOf(op::Reshape(op::Copy(op::DynamicSlice(
op::Pad(op::Parameter(1), op::Constant()), op::Constant(),
op::Reshape(), op::Constant(), op::Constant()))),
op::Shape("f32[32,1,9824,64,128]"));
EXPECT_THAT(
root,
AllOf(op::Slice(AllOf(op::GetTupleElement(op::While(op::Tuple(
lhs, rhs, op::Broadcast(),
op::CollectivePermute(rhs), op::Constant()))),
op::Shape("f32[32,6,39296]"))),
op::Shape("f32[32,6,39295]")));
const auto while_loop = root->operand(0)->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant());
const auto partial_dot_pattern =
AllOf(op::Reshape(op::Slice(op::Dot(op::GetTupleElement(op::Parameter(0)),
op::Concatenate()))),
op::Shape("f32[32,6,9824]"));
auto intermediate_output1 =
AllOf(op::DynamicUpdateSlice(op::GetTupleElement(op::Parameter(0)),
partial_dot_pattern, op::Constant(),
op::Constant(), op::Reshape()),
op::Shape("f32[32,6,39296]"));
auto intermediate_output2 = AllOf(
op::DynamicUpdateSlice(intermediate_output1, partial_dot_pattern,
op::Constant(), op::Constant(), op::Reshape()),
op::Shape("f32[32,6,39296]"));
auto intermediate_output3 = AllOf(
op::DynamicUpdateSlice(intermediate_output2, partial_dot_pattern,
op::Constant(), op::Constant(), op::Reshape()),
op::Shape("f32[32,6,39296]"));
auto partial_output = AllOf(
op::DynamicUpdateSlice(intermediate_output3, partial_dot_pattern,
op::Constant(), op::Constant(), op::Reshape()),
op::Shape("f32[32,6,39296]"));
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
partial_output,
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
next_i));
}
TEST_P(SpmdPartitioningTest, EinsumRHSWindowedContracting) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,63,128] parameter(0)
%lhs.copy = f32[32,24,63,128] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[32,39296,63,128] parameter(1)
%rhs.copy = f32[32,39296,63,128] copy(%rhs), sharding={devices=[1,1,2,1]0,1}
ROOT %dot = f32[32,24,39296] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, PartitionComputation(hlo_string,
2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,12,63,128]"));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(1), op::Constant()),
op::Constant(), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("f32[32,39296,32,128]"));
auto masked_rhs =
op::Select(op::Compare(), rhs, op::Broadcast(op::Constant()));
EXPECT_THAT(root, AllOf(op::GetTupleElement(op::While(
op::Tuple(lhs, masked_rhs, op::Broadcast(),
op::Broadcast(), op::Constant()))),
op::Shape("f32[32,12,39296]")));
const auto while_loop = root->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant());
auto window = op::Conditional(op::Compare(next_i, op::Constant()),
op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0)));
auto partial_output = op::Dot(
op::DynamicSlice(
op::Pad(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant(), op::Constant(), op::Reshape(), op::Constant()),
op::GetTupleElement(op::Parameter(0)));
EXPECT_THAT(
while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)), window,
op::Add(op::GetTupleElement(op::Parameter(0)), partial_output),
op::GetTupleElement(op::Parameter(0)), next_i));
auto cp_conditional =
while_loop->while_body()->root_instruction()->operand(1);
EXPECT_THAT(cp_conditional->true_computation()->root_instruction(),
op::CollectivePermute(op::Parameter(0)));
EXPECT_THAT(cp_conditional->false_computation()->root_instruction(),
op::Parameter(0));
}
TEST_P(SpmdPartitioningTest, UnrollEinsumRHSWindowedContracting) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,63,128] parameter(0)
%lhs.copy = f32[32,24,63,128] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[32,39296,63,128] parameter(1)
%rhs.copy = f32[32,39296,63,128] copy(%rhs), sharding={devices=[1,1,2,1]0,1}
ROOT %dot = f32[32,24,39296] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 2,
true,
false,
true));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,12,63,128]"));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(1), op::Constant()),
op::Constant(), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("f32[32,39296,32,128]"));
auto masked_rhs =
op::Select(op::Compare(), rhs, op::Broadcast(op::Constant()));
EXPECT_THAT(root, AllOf(op::GetTupleElement(op::While(
op::Tuple(lhs, masked_rhs, op::Broadcast(),
op::Broadcast(), op::Constant()))),
op::Shape("f32[32,12,39296]")));
const auto while_loop = root->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant());
auto intermediate_output =
AllOf(op::Add(op::GetTupleElement(op::Parameter(0)),
op::Dot(op::DynamicSlice(
op::Pad(op::GetTupleElement(op::Parameter(0)),
op::Constant()),
op::Constant(), op::Constant(), op::Reshape(),
op::Constant()),
op::GetTupleElement(op::Parameter(0)))),
op::Shape("f32[32,12,39296]"));
auto output = AllOf(
op::Add(
intermediate_output,
op::Dot(
op::DynamicSlice(op::Pad(op::GetTupleElement(op::Parameter(0)),
op::Constant()),
op::Constant(), op::Constant(), op::Reshape(),
op::Constant()),
op::CollectivePermute(op::GetTupleElement(op::Parameter(0))))),
op::Shape("f32[32,12,39296]"));
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
output, op::GetTupleElement(op::Parameter(0)), next_i));
}
TEST_P(SpmdPartitioningTest, BidirectionalEinsumRHSWindowedContracting) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,63,128] parameter(0)
%lhs.copy = f32[32,24,63,128] copy(%lhs), sharding={devices=[1,4,1,1]<=[4]}
%rhs = f32[32,39296,63,128] parameter(1)
%rhs.copy = f32[32,39296,63,128] copy(%rhs), sharding={devices=[1,1,4,1]<=[4]}
ROOT %dot = f32[32,24,39296] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,4,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 4,
true,
false,
false,
true));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,6,63,128]"));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(1), op::Constant()),
op::Constant(), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("f32[32,39296,16,128]"));
auto masked_rhs = op::Reshape(
op::Select(op::Compare(), rhs, op::Broadcast(op::Constant())));
EXPECT_THAT(root,
AllOf(op::GetTupleElement(op::While(op::Tuple(
lhs, masked_rhs, op::Broadcast(),
op::CollectivePermute(masked_rhs), op::Constant()))),
op::Shape("f32[32,6,39296]")));
const auto while_loop = root->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant());
auto partial_output =
AllOf(op::Add(op::Add(op::GetTupleElement(op::Parameter(0)),
op::Dot(op::Maximum(), op::Concatenate())),
op::Dot(op::Maximum(), op::Concatenate())),
op::Shape("f32[32,6,39296]"));
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
partial_output,
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
next_i));
}
TEST_P(SpmdPartitioningTest,
EinsumWindowedNonContractingDimensionsNoCodeMotionWithDependentNodes) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[32,39295,64,128] parameter(1)
%rhs.copy = f32[32,39295,64,128] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
%dot = f32[32,24,39295] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,2,1]0,1}
%constant = f32[] constant(0)
%constant.1 = f32[] constant(2)
%constant.2 = f32[] constant(4)
%broadcast = f32[32,24,39295] broadcast(%constant.1), dimensions={},
sharding={devices=[1,2,1]0,1}
%multiply = f32[32,24,39295] multiply(%dot, %broadcast),
sharding={devices=[1,2,1]0,1}
%reduce = f32[32,24] reduce(%multiply, %constant), dimensions={2},
to_apply=sum, sharding={devices=[1,2]0,1}
%all-reduce = f32[32,24] all-reduce(%reduce),
to_apply=sum, sharding={devices=[1,2]0,1}
%broadcast.1 = f32[32,24,39295] broadcast(%all-reduce), dimensions={0,1},
sharding={devices=[1,2,1]0,1}
%subtract = f32[32,24,39295] subtract(%multiply, %broadcast.1),
sharding={devices=[1,2,1]0,1}
ROOT %reduce.1 = f32[32,24] reduce(%subtract, %constant.2), dimensions={2},
to_apply=sum, sharding={devices=[1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,12,64,128]"));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(1), op::Constant()),
op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,19648,64,128]"));
const auto while_output =
AllOf(op::Slice(op::GetTupleElement(op::While(op::Tuple(
lhs, rhs, op::Broadcast(), op::Broadcast(), op::Constant())))),
op::Shape("f32[32,12,39295]"));
const auto multiply =
AllOf(op::Multiply(while_output, op::Broadcast(op::Constant())),
op::Shape("f32[32,12,39295]"));
EXPECT_THAT(
root,
AllOf(op::Reduce(
op::Subtract(multiply, op::Broadcast(op::AllReduce(op::Reduce(
multiply, op::Constant())))),
op::Constant()),
op::Shape("f32[32,12]")));
const auto while_loop =
root->operand(0)->operand(0)->operand(0)->operand(0)->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant());
auto output = op::DynamicUpdateSlice(
op::GetTupleElement(op::Parameter(0)),
op::Dot(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0))),
op::Constant(), op::Constant(), op::Reshape(op::DynamicSlice()));
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::Conditional(op::Compare(next_i, op::Constant()),
op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0))),
output, op::GetTupleElement(op::Parameter(0)), next_i));
}
TEST_P(SpmdPartitioningTest, EinsumRHSWindowedNonContractingReduce1) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[32,39295,64,128] parameter(1)
%rhs.copy = f32[32,39295,64,128] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
%dot = f32[32,24,39295] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,2,1]0,1}
%constant = f32[] constant(0)
%constant.1 = f32[] constant(2)
%broadcast = f32[32,24,39295] broadcast(%constant.1), dimensions={},
sharding={devices=[1,2,1]0,1}
%multiply = f32[32,24,39295] multiply(%dot, %broadcast),
sharding={devices=[1,2,1]0,1}
ROOT %reduce = f32[32,24] reduce(%multiply, %constant), dimensions={2},
to_apply=sum, sharding={devices=[1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,12,64,128]"));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(1), op::Constant()),
op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,19648,64,128]"));
auto input_subtuple =
op::Tuple(op::Constant(), op::Constant(), op::Broadcast(op::Constant()));
EXPECT_THAT(
root,
AllOf(op::GetTupleElement(op::GetTupleElement(op::While(op::Tuple(
lhs, rhs, input_subtuple, op::Broadcast(), op::Constant())))),
op::Shape("f32[32,12]")));
const auto while_loop = root->operand(0)->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant());
auto output_tuple = op::Tuple(
op::GetTupleElement(op::GetTupleElement(op::Parameter(0))),
op::GetTupleElement(op::GetTupleElement(op::Parameter(0))),
op::Add(op::Reduce(
op::Select(op::Compare(),
op::Multiply(
op::Dot(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0))),
op::DynamicSlice()),
op::Broadcast()),
op::GetTupleElement(op::GetTupleElement(op::Parameter(0)))),
op::DynamicSlice(
op::GetTupleElement(op::GetTupleElement(op::Parameter(0))),
op::Constant(), op::Constant())));
EXPECT_THAT(
while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::Conditional(op::Compare(next_i, op::Constant()),
op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0))),
output_tuple, op::GetTupleElement(op::Parameter(0)), next_i));
}
TEST_P(SpmdPartitioningTest, UnrollEinsumRHSWindowedNonContractingReduce1) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[32,39295,64,128] parameter(1)
%rhs.copy = f32[32,39295,64,128] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
%dot = f32[32,24,39295] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,2,1]0,1}
%constant = f32[] constant(0)
%constant.1 = f32[] constant(2)
%broadcast = f32[32,24,39295] broadcast(%constant.1), dimensions={},
sharding={devices=[1,2,1]0,1}
%multiply = f32[32,24,39295] multiply(%dot, %broadcast),
sharding={devices=[1,2,1]0,1}
ROOT %reduce = f32[32,24] reduce(%multiply, %constant), dimensions={2},
to_apply=sum, sharding={devices=[1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 2,
true,
false,
true));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,12,64,128]"));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(1), op::Constant()),
op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,19648,64,128]"));
auto input_subtuple =
op::Tuple(op::Constant(), op::Constant(), op::Broadcast(op::Constant()));
EXPECT_THAT(
root,
AllOf(op::GetTupleElement(op::GetTupleElement(op::While(op::Tuple(
lhs, rhs, input_subtuple, op::Broadcast(), op::Constant())))),
op::Shape("f32[32,12]")));
const auto while_loop = root->operand(0)->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant());
auto intermediate_output = AllOf(
op::Add(
op::Reduce(
op::Select(op::Compare(),
op::Multiply(
op::Dot(op::GetTupleElement(op::Parameter(0)),
op::CollectivePermute(op::GetTupleElement(
op::Parameter(0)))),
op::DynamicSlice()),
op::Broadcast()),
op::GetTupleElement(op::GetTupleElement(op::Parameter(0)))),
op::DynamicSlice(
op::GetTupleElement(op::GetTupleElement(op::Parameter(0))),
op::Constant(), op::Constant())),
op::Shape("f32[32,12]"));
auto output_tuple = op::Tuple(
op::GetTupleElement(op::GetTupleElement(op::Parameter(0))),
op::GetTupleElement(op::GetTupleElement(op::Parameter(0))),
op::Add(op::Reduce(
op::Select(op::Compare(),
op::Multiply(
op::Dot(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0))),
op::DynamicSlice()),
op::Broadcast()),
op::GetTupleElement(op::GetTupleElement(op::Parameter(0)))),
op::DynamicSlice(intermediate_output, op::Constant(),
op::Constant())));
EXPECT_THAT(
while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
output_tuple, op::GetTupleElement(op::Parameter(0)), next_i));
}
TEST_P(SpmdPartitioningTest,
BidirectionalEinsumRHSWindowedNonContractingReduce1) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,4,1,1]<=[4]}
%rhs = f32[32,39295,64,128] parameter(1)
%rhs.copy = f32[32,39295,64,128] copy(%rhs), sharding={devices=[1,4,1,1]<=[4]}
%dot = f32[32,24,39295] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,4,1]<=[4]}
%constant = f32[] constant(0)
%constant.1 = f32[] constant(2)
%broadcast = f32[32,24,39295] broadcast(%constant.1), dimensions={},
sharding={devices=[1,4,1]<=[4]}
%multiply = f32[32,24,39295] multiply(%dot, %broadcast),
sharding={devices=[1,4,1]<=[4]}
ROOT %reduce = f32[32,24] reduce(%multiply, %constant), dimensions={2},
to_apply=sum, sharding={devices=[1,4]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 4,
true,
false,
false,
true));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,6,64,128]"));
const auto rhs =
AllOf(op::Reshape(op::Copy(op::DynamicSlice(
op::Pad(op::Parameter(1), op::Constant()), op::Constant(),
op::Reshape(), op::Constant(), op::Constant()))),
op::Shape("f32[32,1,9824,64,128]"));
auto input_subtuple =
op::Tuple(op::Constant(), op::Constant(), op::Broadcast(op::Constant()));
EXPECT_THAT(root,
AllOf(op::GetTupleElement(op::GetTupleElement(op::While(
op::Tuple(lhs, rhs, input_subtuple,
op::CollectivePermute(), op::Constant())))),
op::Shape("f32[32,6]")));
const auto while_loop = root->operand(0)->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant());
auto partial_reduce_pattern = AllOf(
op::Reduce(
op::Select(op::Compare(),
op::Multiply(op::Reshape(op::Slice(op::Dot(
op::GetTupleElement(op::Parameter(0)),
op::Concatenate()))),
op::DynamicSlice()),
op::Broadcast()),
op::GetTupleElement(op::GetTupleElement(op::Parameter(0)))),
op::Shape("f32[32,6]"));
auto intermediate_output1 = AllOf(
op::Add(partial_reduce_pattern,
op::DynamicSlice(
op::GetTupleElement(op::GetTupleElement(op::Parameter(0))),
op::Constant(), op::Constant())),
op::Shape("f32[32,6]"));
auto intermediate_output2 =
AllOf(op::Add(partial_reduce_pattern,
op::DynamicSlice(intermediate_output1, op::Constant(),
op::Constant())),
op::Shape("f32[32,6]"));
auto intermediate_output3 =
AllOf(op::Add(partial_reduce_pattern,
op::DynamicSlice(intermediate_output2, op::Constant(),
op::Constant())),
op::Shape("f32[32,6]"));
auto output_tuple =
op::Tuple(op::GetTupleElement(op::GetTupleElement(op::Parameter(0))),
op::GetTupleElement(op::GetTupleElement(op::Parameter(0))),
op::Add(partial_reduce_pattern,
op::DynamicSlice(intermediate_output3, op::Constant(),
op::Constant())));
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
output_tuple,
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
next_i));
}
TEST_P(SpmdPartitioningTest, EinsumRHSWindowedNonContractingReduce2) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[32,39295,64,128] parameter(1)
%rhs.copy = f32[32,39295,64,128] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
%dot = f32[32,24,39295] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,2,1]0,1}
%constant = f32[] constant(0)
%constant.1 = f32[] constant(2)
%broadcast = f32[32,24,39295] broadcast(%constant.1), dimensions={},
sharding={devices=[1,2,1]0,1}
%multiply = f32[32,24,39295] multiply(%dot, %broadcast),
sharding={devices=[1,2,1]0,1}
ROOT %reduce = f32[32,39295] reduce(%multiply, %constant), dimensions={1},
to_apply=sum, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, PartitionComputation(hlo_string,
2));
VLOG(1) << module->ToString();
}
TEST_P(SpmdPartitioningTest, UnrollEinsumRHSWindowedNonContractingReduce2) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[32,39295,64,128] parameter(1)
%rhs.copy = f32[32,39295,64,128] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
%dot = f32[32,24,39295] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,2,1]0,1}
%constant = f32[] constant(0)
%constant.1 = f32[] constant(2)
%broadcast = f32[32,24,39295] broadcast(%constant.1), dimensions={},
sharding={devices=[1,2,1]0,1}
%multiply = f32[32,24,39295] multiply(%dot, %broadcast),
sharding={devices=[1,2,1]0,1}
ROOT %reduce = f32[32,39295] reduce(%multiply, %constant), dimensions={1},
to_apply=sum, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 2,
true,
false,
true));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,12,64,128]"));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(1), op::Constant()),
op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,19648,64,128]"));
auto input_subtuple =
op::Tuple(op::Constant(), op::Constant(), op::Broadcast(op::Constant()));
EXPECT_THAT(
root,
AllOf(op::AllReduce(op::Slice(op::GetTupleElement(op::GetTupleElement(
op::While(op::Tuple(lhs, rhs, input_subtuple, op::Broadcast(),
op::Constant())))))),
op::Shape("f32[32,39295]")));
const auto while_loop = root->operand(0)->operand(0)->operand(0)->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant());
auto intermediate_output = AllOf(
op::DynamicUpdateSlice(
op::GetTupleElement(op::GetTupleElement(op::Parameter(0))),
op::Reduce(
op::Multiply(op::Dot(op::GetTupleElement(op::Parameter(0)),
op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
op::DynamicSlice()),
op::GetTupleElement(op::GetTupleElement(op::Parameter(0)))),
op::Constant(), op::Reshape()),
op::Shape("f32[32,39296]"));
auto output_tuple = op::Tuple(
op::GetTupleElement(op::GetTupleElement(op::Parameter(0))),
op::GetTupleElement(op::GetTupleElement(op::Parameter(0))),
op::DynamicUpdateSlice(
intermediate_output,
op::Reduce(
op::Multiply(op::Dot(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0))),
op::DynamicSlice()),
op::GetTupleElement(op::GetTupleElement(op::Parameter(0)))),
op::Constant(), op::Reshape()));
EXPECT_THAT(
while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
output_tuple, op::GetTupleElement(op::Parameter(0)), next_i));
}
TEST_P(SpmdPartitioningTest,
BidirectionalEinsumRHSWindowedNonContractingReduce2) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,4,1,1]<=[4]}
%rhs = f32[32,39295,64,128] parameter(1)
%rhs.copy = f32[32,39295,64,128] copy(%rhs), sharding={devices=[1,4,1,1]<=[4]}
%dot = f32[32,24,39295] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,4,1]<=[4]}
%constant = f32[] constant(0)
%constant.1 = f32[] constant(2)
%broadcast = f32[32,24,39295] broadcast(%constant.1), dimensions={},
sharding={devices=[1,4,1]<=[4]}
%multiply = f32[32,24,39295] multiply(%dot, %broadcast),
sharding={devices=[1,4,1]<=[4]}
ROOT %reduce = f32[32,39295] reduce(%multiply, %constant), dimensions={1},
to_apply=sum, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 4,
true,
false,
false,
true));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,6,64,128]"));
const auto rhs =
AllOf(op::Reshape(op::Copy(op::DynamicSlice(
op::Pad(op::Parameter(1), op::Constant()), op::Constant(),
op::Reshape(), op::Constant(), op::Constant()))),
op::Shape("f32[32,1,9824,64,128]"));
auto input_subtuple =
op::Tuple(op::Constant(), op::Constant(), op::Broadcast(op::Constant()));
EXPECT_THAT(
root, AllOf(op::AllReduce(op::Slice(op::GetTupleElement(
op::GetTupleElement(op::While(op::Tuple(
lhs, rhs, input_subtuple, op::CollectivePermute(rhs),
op::Constant())))))),
op::Shape("f32[32,39295]")));
const auto while_loop = root->operand(0)->operand(0)->operand(0)->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant());
auto partial_reduce_pattern = AllOf(
op::Reduce(op::Multiply(op::Reshape(op::Slice(
op::Dot(op::GetTupleElement(op::Parameter(0)),
op::Concatenate()))),
op::DynamicSlice(op::Broadcast(), op::Constant(),
op::Constant(), op::Reshape())),
op::GetTupleElement(op::GetTupleElement(op::Parameter(0)))),
op::Shape("f32[32,9824]"));
auto intermediate_output1 =
AllOf(op::DynamicUpdateSlice(
op::GetTupleElement(op::GetTupleElement(op::Parameter(0))),
partial_reduce_pattern, op::Constant(), op::Reshape()),
op::Shape("f32[32,39296]"));
auto intermediate_output2 =
AllOf(op::DynamicUpdateSlice(intermediate_output1, partial_reduce_pattern,
op::Constant(), op::Reshape()),
op::Shape("f32[32,39296]"));
auto intermediate_output3 =
AllOf(op::DynamicUpdateSlice(intermediate_output2, partial_reduce_pattern,
op::Constant(), op::Reshape()),
op::Shape("f32[32,39296]"));
auto output_tuple = op::Tuple(
op::GetTupleElement(op::GetTupleElement(op::Parameter(0))),
op::GetTupleElement(op::GetTupleElement(op::Parameter(0))),
op::DynamicUpdateSlice(intermediate_output3, partial_reduce_pattern,
op::Constant(), op::Reshape()));
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
output_tuple,
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
next_i));
}
TEST_P(SpmdPartitioningTest, EinsumRHSWindowedContractingFromBroadcast) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%rhs = f32[32,39296,63,128] parameter(0)
%rhs.copy = f32[32,39296,63,128] copy(%rhs), sharding={devices=[1,1,2,1]0,1}
%constant.1 = f32[] constant(2)
%broadcast = f32[32,24,63,128] broadcast(%constant.1), dimensions={},
sharding={devices=[1,2,1,1]0,1}
%add = f32[32,24,63,128] add(%broadcast, %broadcast),
sharding={devices=[1,2,1,1]0,1}
ROOT %dot = f32[32,24,39296] dot(%add, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, PartitionComputation(hlo_string,
2));
VLOG(1) << module->ToString();
}
TEST_P(SpmdPartitioningTest, UnrollEinsumRHSWindowedContractingFromBroadcast) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%rhs = f32[32,39296,63,128] parameter(0)
%rhs.copy = f32[32,39296,63,128] copy(%rhs), sharding={devices=[1,1,2,1]0,1}
%constant.1 = f32[] constant(2)
%broadcast = f32[32,24,63,128] broadcast(%constant.1), dimensions={},
sharding={devices=[1,2,1,1]0,1}
%add = f32[32,24,63,128] add(%broadcast, %broadcast),
sharding={devices=[1,2,1,1]0,1}
ROOT %dot = f32[32,24,39296] dot(%add, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 2,
true,
false,
true));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = op::Tuple(op::Constant());
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(0), op::Constant()),
op::Constant(), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("f32[32,39296,32,128]"));
auto masked_rhs =
op::Select(op::Compare(), rhs, op::Broadcast(op::Constant()));
EXPECT_THAT(root, AllOf(op::GetTupleElement(op::While(
op::Tuple(lhs, masked_rhs, op::Broadcast(),
op::Broadcast(), op::Constant()))),
op::Shape("f32[32,12,39296]")));
const auto while_loop = root->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant());
auto padded_broadcast_sum = op::Pad(
op::Add(op::Broadcast(
op::GetTupleElement(op::GetTupleElement(op::Parameter(0)))),
op::Broadcast(
op::GetTupleElement(op::GetTupleElement(op::Parameter(0))))),
op::Constant());
auto intermediate_output =
AllOf(op::Add(op::GetTupleElement(op::Parameter(0)),
op::Dot(op::DynamicSlice(padded_broadcast_sum,
op::Constant(), op::Constant(),
op::Reshape(), op::Constant()),
op::GetTupleElement(op::Parameter(0)))),
op::Shape("f32[32,12,39296]"));
auto output = AllOf(
op::Add(
intermediate_output,
op::Dot(
op::DynamicSlice(padded_broadcast_sum, op::Constant(),
op::Constant(), op::Reshape(), op::Constant()),
op::CollectivePermute(op::GetTupleElement(op::Parameter(0))))),
op::Shape("f32[32,12,39296]"));
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
output, op::GetTupleElement(op::Parameter(0)), next_i));
}
TEST_P(SpmdPartitioningTest,
BidirectionalEinsumRHSWindowedContractingFromBroadcast) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%rhs = f32[32,39296,63,128] parameter(0)
%rhs.copy = f32[32,39296,63,128] copy(%rhs), sharding={devices=[1,1,4,1]<=[4]}
%constant.1 = f32[] constant(2)
%broadcast = f32[32,24,63,128] broadcast(%constant.1), dimensions={},
sharding={devices=[1,4,1,1]<=[4]}
%add = f32[32,24,63,128] add(%broadcast, %broadcast),
sharding={devices=[1,4,1,1]<=[4]}
ROOT %dot = f32[32,24,39296] dot(%add, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,4,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 4,
true,
false,
false,
true));
VLOG(1) << module->ToString();
auto input_subtuple =
op::Tuple(op::Constant(), op::Constant(), op::Broadcast(op::Constant()));
const auto root = module->entry_computation()->root_instruction();
const auto lhs = op::Tuple(op::Constant());
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(0), op::Constant()),
op::Constant(), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("f32[32,39296,16,128]"));
auto masked_rhs = op::Reshape(
op::Select(op::Compare(), rhs, op::Broadcast(op::Constant())));
EXPECT_THAT(root,
AllOf(op::GetTupleElement(op::While(op::Tuple(
lhs, masked_rhs, op::Broadcast(),
op::CollectivePermute(masked_rhs), op::Constant()))),
op::Shape("f32[32,6,39296]")));
const auto while_loop = root->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant());
auto output =
AllOf(op::Add(op::Add(op::GetTupleElement(op::Parameter(0)),
op::Dot(op::Maximum(), op::Concatenate())),
op::Dot(op::Maximum(), op::Concatenate())),
op::Shape("f32[32,6,39296]"));
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
output,
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
next_i));
}
TEST_P(SpmdPartitioningTest, EinsumNonContractingDimPartitionOnTwoDims) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = bf16[8,1024,2,1536] parameter(0)
%lhs.copy = bf16[8,1024,2,1536] copy(lhs),
sharding={devices=[4,1,2,1]<=[8]}
%rhs = bf16[2,1536,512,1] parameter(1)
%rhs.copy = bf16[2,1536,512,1] copy(rhs),
sharding={devices=[2,1,2,1,2]0,4,2,6,1,5,3,7 last_tile_dim_replicate}
ROOT %convolution = bf16[8,1024,512,1] convolution(lhs.copy, rhs.copy),
window={size=1x2}, dim_labels=0b1f_1io0->0bf1,
sharding={devices=[4,1,2,1]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Reshape(), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("bf16[2,1024,1,1536]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(1), op::Reshape(), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("bf16[1,1536,256,1]"));
const auto partial_replicate_rhs =
AllOf(op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(), rhs, op::Constant(), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("bf16[1,1536,512,1]"));
EXPECT_THAT(
root,
AllOf(op::DynamicSlice(
op::AllReduce(op::Convolution(lhs, partial_replicate_rhs)),
op::Constant(), op::Constant(), op::Reshape(), op::Constant()),
op::Shape("bf16[2,1024,256,1]")));
}
TEST_P(SpmdPartitioningTest, EinsumNonContractingDimPartitionOnTwoDims2) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = bf16[8,1024,2,1536] parameter(0)
%lhs.copy = bf16[8,1024,2,1536] copy(lhs),
sharding={devices=[4,1,2,1]<=[8]}
%rhs = bf16[2,1536,512,1] parameter(1)
%rhs.copy = bf16[2,1536,512,1] copy(rhs),
sharding={devices=[2,1,2,1,2]<=[4,2]T(1,0) last_tile_dim_replicate}
ROOT %convolution = bf16[8,1024,512,1] convolution(lhs.copy, rhs.copy),
window={size=1x2}, dim_labels=0b1f_1io0->0bf1,
sharding={devices=[4,1,2,1]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Reshape(), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("bf16[2,1024,1,1536]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(1), op::Reshape(), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("bf16[1,1536,256,1]"));
const auto partial_replicate_rhs =
AllOf(op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(), rhs, op::Constant(), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("bf16[1,1536,512,1]"));
EXPECT_THAT(
root,
AllOf(op::DynamicSlice(
op::AllReduce(op::Convolution(lhs, partial_replicate_rhs)),
op::Constant(), op::Constant(), op::Reshape(), op::Constant()),
op::Shape("bf16[2,1024,256,1]")));
}
TEST_P(SpmdPartitioningTest, ReplicatedRng) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = s32[] parameter(0)
%lhs.copy = s32[] copy(%lhs), sharding={replicated}
%rhs = s32[] parameter(1)
%rhs.copy = s32[] copy(%rhs), sharding={replicated}
ROOT %rng = s32[4]{0} rng(%lhs.copy, %rhs.copy),
distribution=rng_uniform, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(op::Copy(op::Parameter(0)), op::Shape("s32[]"));
const auto rhs = AllOf(op::Copy(op::Parameter(1)), op::Shape("s32[]"));
EXPECT_THAT(
root,
AllOf(op::AllReduce(op::Select(
op::Broadcast(op::Compare(op::PartitionId(), op::Constant())),
op::Rng(), op::Broadcast(op::Constant()))),
op::Shape("s32[4]")));
}
TEST_P(SpmdPartitioningTest, ManualRng) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = s32[] parameter(0), sharding={manual}
%rhs = s32[] parameter(1), sharding={manual}
ROOT %rng = s32[4]{0} rng(%lhs, %rhs),
distribution=rng_uniform, sharding={manual}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Rng(op::Parameter(0), op::Parameter(1)),
op::Shape("s32[4]")));
}
TEST_P(SpmdPartitioningTest, PartitionedRng) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = s32[] parameter(0)
%lhs.copy = s32[] copy(%lhs), sharding={replicated}
%rhs = s32[] parameter(1)
%rhs.copy = s32[] copy(%rhs), sharding={maximal device=1}
ROOT %rng = s32[4]{0} rng(%lhs.copy, %rhs.copy),
distribution=rng_uniform, sharding={devices=[2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(op::Copy(op::Parameter(0)), op::Shape("s32[]"));
const auto rhs =
AllOf(op::Copy(op::Copy(op::Parameter(1))), op::Shape("s32[]"));
EXPECT_THAT(root, AllOf(op::Rng(lhs, op::AllReduce(op::Select(
op::Broadcast(op::Compare()), rhs,
op::Broadcast(op::Constant())))),
op::Shape("s32[2]")));
}
TEST_P(SpmdPartitioningTest, PartialReplicatedRng) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = s32[] parameter(0), sharding={replicated}
%rhs = s32[] parameter(1), sharding={replicated}
ROOT %rng = s32[8]{0} rng(%lhs, %rhs),
distribution=rng_uniform,
sharding={devices=[2,4]<=[8] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(op::Parameter(0), op::Shape("s32[]"));
const auto rhs = AllOf(op::Parameter(1), op::Shape("s32[]"));
auto partition_id =
AllOf(op::Reshape(op::DynamicSlice(op::Constant(), op::PartitionId())),
op::Shape("u32[]"));
EXPECT_THAT(
root, AllOf(op::AllReduce(op::Select(
op::Broadcast(op::Compare(partition_id, op::Constant())),
op::Rng(lhs, rhs), op::Broadcast(op::Constant()))),
op::Shape("s32[4]")));
}
TEST_P(SpmdPartitioningTest, ManualPartitionId) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
ROOT %lhs = u32[] partition-id(), sharding={manual}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::PartitionId());
}
TEST_P(SpmdPartitioningTest, DynamicSliceAlongNonPartitionedDimension) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = s32[128,64] parameter(0), sharding={devices=[2,1]0,1}
%index = s32[] parameter(1)
%trivial_index = s32[] parameter(2)
ROOT %dynamic-slice = s32[128,2] dynamic-slice(%input, %trivial_index, %index),
dynamic_slice_sizes={128,2}, sharding={devices=[2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto input = AllOf(op::Parameter(0), op::Shape("s32[64,64]"));
EXPECT_THAT(root,
AllOf(op::DynamicSlice(input, op::Constant(), op::Parameter(1)),
op::Shape("s32[64,2]")));
}
TEST_P(SpmdPartitioningTest, DynamicUpdateSliceAlongNonPartitionedDimension) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = s32[128,64] parameter(0), sharding={devices=[2,1]0,1}
%index = s32[] parameter(1)
%update = s32[128,2] parameter(2)
%trivial_index = s32[] parameter(3)
%update.copy = s32[128,2] copy(%update), sharding={devices=[2,1]0,1}
ROOT %dynamic-update-slice = s32[128,64]
dynamic-update-slice(%input, %update.copy, %trivial_index, %index),
sharding={devices=[2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto input = AllOf(op::Parameter(0), op::Shape("s32[64,64]"));
auto update = AllOf(op::Copy(op::DynamicSlice(op::Parameter(2), op::Reshape(),
op::Constant())),
op::Shape("s32[64,2]"));
EXPECT_THAT(root, AllOf(op::DynamicUpdateSlice(input, update, op::Constant(),
op::Parameter(1)),
op::Shape("s32[64,64]")));
}
TEST_P(SpmdPartitioningTest, DynamicUpdateSliceAlongPartitionedDimension) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = s32[128,64] parameter(0), sharding={devices=[1,2]0,1}
%index = s32[] parameter(1)
%constant = s32[] constant(60)
%update = s32[128,2] parameter(2), sharding={devices=[1,2]0,1}
ROOT %dynamic-update-slice = s32[128,64]
dynamic-update-slice(%input, %update, %index, %constant),
sharding={devices=[1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto input = AllOf(op::Parameter(0), op::Shape("s32[128,32]"));
auto update = AllOf(
op::AllReduce(op::DynamicUpdateSlice(op::Broadcast(), op::Parameter(2),
op::Constant(), op::Reshape())),
op::Shape("s32[128,2]"));
EXPECT_THAT(root,
AllOf(op::Select(op::Broadcast(),
op::DynamicUpdateSlice(
input, update, op::Constant(), op::Select()),
input),
op::Shape("s32[128,32]")));
}
TEST_P(SpmdPartitioningTest, DynamicUpdateSliceAlongPartitionedDimension2) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = s32[8,790,2] parameter(0),
sharding={devices=[8,1,1]<=[8]}
%index = s32[] parameter(1)
%constant = s32[] constant(0)
%update = s32[1,790,2] parameter(2),
sharding={devices=[8,1,1]<=[8]}
ROOT %dynamic-update-slice = s32[8,790,2]
dynamic-update-slice(%input, %update, %index, %constant, %constant),
sharding={devices=[8,1,1]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto input = AllOf(op::Parameter(0), op::Shape("s32[1,790,2]"));
auto update = AllOf(op::AllReduce(op::Select(
op::Broadcast(), op::Parameter(2), op::Broadcast())),
op::Shape("s32[1,790,2]"));
EXPECT_THAT(
root,
AllOf(op::Select(op::Broadcast(),
op::DynamicUpdateSlice(input, update, op::Select(),
op::Constant(), op::Constant()),
input),
op::Shape("s32[1,790,2]")));
}
TEST_P(SpmdPartitioningTest, DynamicUpdateSlicePartitionSliceAndNonSliceDims) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = s32[128,64] parameter(0)
%input.copy = s32[128,64] copy(%input), sharding={devices=[2,2]<=[4]}
%constant.0 = s32[] constant(0)
%constant.1 = s32[] constant(60)
%update = s32[128,2] parameter(1)
%update.copy = s32[128,2] copy(%update), sharding={devices=[2,2]<=[4]}
ROOT %dynamic-update-slice = s32[128,64]
dynamic-update-slice(%input.copy, %update.copy, %constant.0, %constant.1),
sharding={devices=[2,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto input = AllOf(op::Copy(op::DynamicSlice(op::Parameter(0), op::Reshape(),
op::Reshape())),
op::Shape("s32[64,32]"));
auto update = AllOf(op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(),
op::Copy(op::DynamicSlice(
op::Parameter(1), op::Reshape(), op::Reshape())),
op::Constant(), op::Reshape())),
op::Shape("s32[64,2]"));
EXPECT_THAT(root,
AllOf(op::Select(op::Broadcast(),
op::DynamicUpdateSlice(
input, update, op::Constant(), op::Select()),
input),
op::Shape("s32[64,32]")));
}
TEST_P(SpmdPartitioningTest, UnpartitionedGather) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[2,9] parameter(0), sharding={replicated}
%indices = s32[3] parameter(1), sharding={replicated}
ROOT %gather = f32[3,9] gather(%input, %indices), offset_dims={1},
collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1,
slice_sizes={1,9}, sharding={devices=[1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(
op::DynamicSlice(
op::Pad(op::Gather(op::Parameter(0), op::Parameter(1)), _), _, _),
op::Shape("f32[3,5]")));
}
TEST_P(SpmdPartitioningTest, PassthroughGather) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[2,9] parameter(0), sharding={devices=[1,2]0,1}
%indices = s32[3] parameter(1), sharding={replicated}
ROOT %gather = f32[3,9] gather(%input, %indices), offset_dims={1},
collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1,
slice_sizes={1,9}, sharding={devices=[1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Gather(op::Parameter(0), op::Parameter(1)),
op::Shape("f32[3,5]")));
}
TEST_P(SpmdPartitioningTest, PassthroughGather_PartialReplicate) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[2,9] parameter(0),
sharding={devices=[1,2,2]<=[4] last_tile_dim_replicate}
%indices = s32[3] parameter(1), sharding={replicated}
ROOT %gather = f32[3,9] gather(%input, %indices), offset_dims={1},
collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1,
slice_sizes={1,9}, sharding={devices=[1,2,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Gather(op::Parameter(0), op::Parameter(1)),
op::Shape("f32[3,5]")));
}
TEST_P(SpmdPartitioningTest, IndexPassthroughGather) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[2,9,8] parameter(0), sharding={replicated}
%indices = s32[4,2,4] parameter(1), sharding={devices=[2,1,2]<=[4]}
ROOT %gather = f32[8,4,4] gather(%input, %indices), offset_dims={0},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=1,
slice_sizes={1,1,8}, sharding={devices=[1,2,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Gather(op::Parameter(0), op::Parameter(1)),
op::Shape("f32[8,2,2]")));
}
TEST_P(SpmdPartitioningTest, IndexPassthroughGather_PartialReplicate) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[2,9,8] parameter(0), sharding={replicated}
%indices = s32[4,2,4] parameter(1),
sharding={devices=[2,1,2,2]<=[8] last_tile_dim_replicate}
ROOT %gather = f32[8,4,4] gather(%input, %indices), offset_dims={0},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=1,
slice_sizes={1,1,8},
sharding={devices=[1,2,2,2]<=[8] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Gather(op::Parameter(0), op::Parameter(1)),
op::Shape("f32[8,2,2]")));
}
TEST_P(SpmdPartitioningTest, IndexAndOperandPassthroughGather) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[7,12] parameter(0),
sharding={devices=[1,2,2]<=[4] last_tile_dim_replicate}
%indices = s32[16,2] parameter(1),
sharding={devices=[2,1,2]0,2,1,3 last_tile_dim_replicate}
ROOT %gather = f32[16,1,12] gather(%input, %indices),
offset_dims={1,2}, collapsed_slice_dims={}, start_index_map={0,1},
index_vector_dim=1, slice_sizes={1,12},
sharding={devices=[2,1,2]0,2,1,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Gather(op::Parameter(0), op::Parameter(1)),
op::Shape("f32[8,1,6]")));
}
TEST_P(SpmdPartitioningTest, IndexPassthroughGatherPartitionedIndexVectorDim) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[2,9,8] parameter(0), sharding={replicated}
%indices = s32[4,2,4] parameter(1), sharding={devices=[2,2,2]<=[8]}
ROOT %gather = f32[8,4,4] gather(%input, %indices), offset_dims={0},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=1,
slice_sizes={1,1,8},
sharding={devices=[1,2,2,2]<=[8] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("f32[2,9,8]"), op::Parameter(0));
auto indices = AllOf(op::Shape("s32[2,2,2]"), op::AllReduce());
auto gather = AllOf(op::Shape("f32[8,2,2]"), op::Gather(operand, indices));
VLOG(1) << module->ToString();
EXPECT_THAT(root, op::CollectivePermute(gather));
}
TEST_P(SpmdPartitioningTest, GatherExplicitBatchDims) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[10,3,14,4] parameter(0), sharding={devices=[2,1,2,1]<=[2,2]T(1,0)}
%indices = s32[14,10,6,2] parameter(1), sharding={devices=[2,2,1,1]<=[4]}
ROOT %gather = f32[14,10,6,2] gather(%input, %indices), offset_dims={3},
collapsed_slice_dims={1}, operand_batching_dims={0,2},
start_indices_batching_dims={1,0}, start_index_map={1,3},
index_vector_dim=3, slice_sizes={1,1,1,2}, sharding={devices=[2,2,1,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto input = AllOf(op::Shape("f32[5,3,7,4]"), op::Parameter(0));
auto indices = AllOf(op::Shape("s32[7,5,6,2]"), op::Parameter(1));
auto gather = AllOf(op::Shape("f32[7,5,6,2]"), op::Gather(input, indices));
EXPECT_THAT(module->entry_computation()->root_instruction(), gather);
}
TEST_P(SpmdPartitioningTest, GatherExplicitBatchAndOperandPassthroughDims) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[10,3,14,4] parameter(0), sharding={devices=[2,1,1,2]<=[4]}
%indices = s32[14,10,6,2] parameter(1), sharding={devices=[1,2,1,1,2]<=[4] last_tile_dim_replicate}
ROOT %gather = f32[14,10,6,4] gather(%input, %indices), offset_dims={3},
collapsed_slice_dims={1}, operand_batching_dims={0,2},
start_indices_batching_dims={1,0}, start_index_map={1,3},
index_vector_dim=3, slice_sizes={1,1,1,4}, sharding={devices=[1,2,1,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto input = AllOf(op::Shape("f32[5,3,14,2]"), op::Parameter(0));
auto indices = AllOf(op::Shape("s32[14,5,6,2]"), op::Parameter(1));
auto gather = AllOf(op::Shape("f32[14,5,6,2]"), op::Gather(input, indices));
EXPECT_THAT(module->entry_computation()->root_instruction(), gather);
}
TEST_P(SpmdPartitioningTest, GatherExplicitBatchAndIndexPassthroughDims) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[10,3,14,4] parameter(0), sharding={devices=[1,1,2,1,2]<=[4] last_tile_dim_replicate}
%indices = s32[14,10,6,2] parameter(1), sharding={devices=[2,1,2,1]<=[4]}
ROOT %gather = f32[14,10,6,2] gather(%input, %indices), offset_dims={3},
collapsed_slice_dims={1}, operand_batching_dims={0,2},
start_indices_batching_dims={1,0}, start_index_map={1,3},
index_vector_dim=3, slice_sizes={1,1,1,2}, sharding={devices=[2,1,2,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto input = AllOf(op::Shape("f32[10,3,7,4]"), op::Parameter(0));
auto indices = AllOf(op::Shape("s32[7,10,3,2]"), op::Parameter(1));
auto gather = AllOf(op::Shape("f32[7,10,3,2]"), op::Gather(input, indices));
EXPECT_THAT(module->entry_computation()->root_instruction(), gather);
}
TEST_P(SpmdPartitioningTest, GatherPartitionedOnTrivialSliceDims) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[17,9] parameter(0), sharding={devices=[2,1]0,1}
%indices = s32[2,3] parameter(1), sharding={replicated}
ROOT %gather = f32[2,3,9] gather(%input, %indices), offset_dims={2},
collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=2,
slice_sizes={1,9}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto offset =
op::Reshape(op::DynamicSlice(op::Constant(), op::PartitionId()));
auto min = AllOf(op::Broadcast(offset), op::Shape("s32[2,3]"));
auto max = AllOf(op::Broadcast(op::Add(offset, op::Constant())),
op::Shape("s32[2,3]"));
auto clamp = op::Clamp(min, op::Parameter(1), max);
auto gather = op::Gather(op::Parameter(0), op::Subtract(clamp, min));
auto mask =
op::Or(op::Lt(op::Parameter(1), min), op::Gt(op::Parameter(1), max));
auto masked =
op::Select(op::Broadcast(mask), op::Broadcast(op::Constant()), gather);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::AllReduce(masked), op::Shape("f32[2,3,9]")));
}
TEST_P(SpmdPartitioningTest,
GatherPartitionedOnTrivialSliceDims_PartialReplicate) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[17,9] parameter(0),
sharding={devices=[2,1,2]<=[4] last_tile_dim_replicate}
%indices = s32[2,3] parameter(1), sharding={replicated}
ROOT %gather = f32[2,3,9] gather(%input, %indices), offset_dims={2},
collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=2,
slice_sizes={1,9}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto offset =
op::Reshape(op::DynamicSlice(op::Constant(), op::PartitionId()));
auto min = AllOf(op::Broadcast(offset), op::Shape("s32[2,3]"));
auto max = AllOf(op::Broadcast(op::Add(offset, op::Constant())),
op::Shape("s32[2,3]"));
auto clamp = op::Clamp(min, op::Parameter(1), max);
auto gather = op::Gather(op::Parameter(0), op::Subtract(clamp, min));
auto mask =
op::Or(op::Lt(op::Parameter(1), min), op::Gt(op::Parameter(1), max));
auto masked =
op::Select(op::Broadcast(mask), op::Broadcast(op::Constant()), gather);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::AllReduce(masked), op::Shape("f32[2,3,9]")));
}
TEST_P(SpmdPartitioningTest, UnpartitionedScatter) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9] parameter(0), sharding={replicated}
%indices = s32[3] parameter(1), sharding={replicated}
%updates = f32[3,9] parameter(2), sharding={replicated}
ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1, sharding={devices=[1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::DynamicSlice(
op::Pad(op::Scatter(op::Parameter(0), op::Parameter(1),
op::Parameter(2)),
_),
_, _),
op::Shape("f32[2,5]")));
}
TEST_P(SpmdPartitioningTest, VariadicScatter) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs.0: f32[], lhs.1: f32[], rhs.0: f32[], rhs.1: f32[]) -> (f32[], f32[]) {
lhs.0 = f32[] parameter(0)
lhs.1 = f32[] parameter(1)
rhs.0 = f32[] parameter(2)
rhs.1 = f32[] parameter(3)
sum.0 = f32[] add(lhs.0, rhs.0)
sum.1 = f32[] add(lhs.1, rhs.1)
ROOT tuple = tuple(sum.0, sum.1)
}
ENTRY entry {
%input.0 = f32[2,9] parameter(0), sharding={devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}
%input.1 = f32[2,9] parameter(1), sharding={devices=[1,2,2]0,2,1,3 last_tile_dim_replicate}
%indices = s32[3] parameter(2), sharding={replicated}
%updates.0 = f32[3,9] parameter(3), sharding={devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}
%updates.1 = f32[3,9] parameter(4), sharding={devices=[1,4]0,1,2,3}
ROOT %scatter = (f32[2,9],f32[2,9]) scatter(%input.0, %input.1, %indices, %updates.0, %updates.1),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1, sharding={devices=[1,4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
auto scatter = op::Scatter(op::Shape("f32[1,9]"), op::Shape("f32[1,9]"),
op::Shape("s32[3]"), op::Shape("f32[3,9]"),
op::Shape("f32[3,9]"));
EXPECT_THAT(
root,
AllOf(op::Tuple(op::DynamicSlice(
op::Pad(op::AllReduce(op::DynamicUpdateSlice(
_, op::GetTupleElement(scatter), _, _)),
_),
_, _),
op::DynamicSlice(
op::Pad(op::AllReduce(op::DynamicUpdateSlice(
_, op::GetTupleElement(scatter), _, _)),
_),
_, _)),
op::Shape("(f32[2,3],f32[2,3])")));
}
TEST_P(SpmdPartitioningTest, VariadicScatterSharedOperands) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs.0: f32[], lhs.1: f32[], rhs.0: f32[], rhs.1: f32[]) -> (f32[], f32[]) {
lhs.0 = f32[] parameter(0)
lhs.1 = f32[] parameter(1)
rhs.0 = f32[] parameter(2)
rhs.1 = f32[] parameter(3)
sum.0 = f32[] add(lhs.0, rhs.0)
sum.1 = f32[] add(lhs.1, rhs.1)
ROOT tuple = tuple(sum.0, sum.1)
}
ENTRY entry {
%input.0 = f32[8,16,32] parameter(0), sharding={devices=[4,1,1,2]<=[8] last_tile_dim_replicate}
%indices = s32[16,1] parameter(1), sharding={replicated}
%updates.0 = f32[8,16,16] parameter(2), sharding={devices=[4,1,1,2]<=[8] last_tile_dim_replicate}
%updates.1 = f32[8,16,16] parameter(3), sharding={devices=[4,1,1,2]<=[8] last_tile_dim_replicate}
ROOT %scatter = (f32[8,16,32], f32[8,16,32]) scatter(%input.0, %input.0, %indices, %updates.0, %updates.1),
to_apply=add,
update_window_dims={0,1},
inserted_window_dims={2},
scatter_dims_to_operand_dims={2},
index_vector_dim=1,
indices_are_sorted=true,
unique_indices=true,
sharding={{devices=[4,1,1,2]<=[8] last_tile_dim_replicate}, {devices=[4,1,1,2]<=[8] last_tile_dim_replicate}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::Scatter(), op::Shape("(f32[2,16,32],f32[2,16,32])")));
}
TEST_P(SpmdPartitioningTest, PassthroughScatter) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9] parameter(0), sharding={devices=[1,2]0,1}
%indices = s32[3] parameter(1), sharding={replicated}
%updates = f32[3,9] parameter(2), sharding={devices=[1,2]0,1}
ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1, sharding={devices=[1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Scatter(op::Parameter(0), op::Parameter(1),
op::Parameter(2)),
op::Shape("f32[2,5]")));
}
TEST_P(SpmdPartitioningTest, PassthroughScatterVariadic) {
absl::string_view hlo_string = R"(
HloModule module
add_min_max {
lhs0 = f32[] parameter(0)
lhs1 = f32[] parameter(1)
rhs0 = f32[] parameter(2)
rhs1 = f32[] parameter(3)
min = minimum(rhs0, rhs1)
max = maximum(rhs0, rhs1)
min_sum = add(lhs0, min)
max_sum = add(lhs1, max)
ROOT tuple = tuple(min_sum, max_sum)
}
ENTRY entry {
%input0 = f32[2,9] parameter(0), sharding={devices=[1,2]0,1}
%input1 = f32[2,9] parameter(1), sharding={devices=[1,2]0,1}
%indices = s32[3] parameter(2), sharding={replicated}
%updates0 = f32[3,9] parameter(3), sharding={devices=[1,2]0,1}
%updates1 = f32[3,9] parameter(4), sharding={devices=[1,2]0,1}
ROOT %scatter = (f32[2,9], f32[2,9])
scatter(%input0, %input1, %indices, %updates0, %updates1),
to_apply=add_min_max, update_window_dims={1}, inserted_window_dims={0},
scatter_dims_to_operand_dims={0}, index_vector_dim=1,
sharding={{devices=[1,2]0,1},{devices=[1,2]0,1}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Scatter(op::Parameter(0), op::Parameter(1),
op::Parameter(2), op::Parameter(3),
op::Parameter(4)),
op::Shape("(f32[2,5], f32[2,5])")));
}
TEST_P(SpmdPartitioningTest, PassthroughScatter_PartialReplicate) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9] parameter(0),
sharding={devices=[1,2,2]<=[4] last_tile_dim_replicate}
%indices = s32[3] parameter(1), sharding={replicated}
%updates = f32[3,9] parameter(2),
sharding={devices=[1,2,2]<=[4] last_tile_dim_replicate}
ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1,
sharding={devices=[1,2,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Scatter(op::Parameter(0), op::Parameter(1),
op::Parameter(2)),
op::Shape("f32[2,5]")));
}
TEST_P(SpmdPartitioningTest, PassthroughScatterVariadic_PartialReplicate) {
absl::string_view hlo_string = R"(
HloModule module
add_min_max {
lhs0 = f32[] parameter(0)
lhs1 = f32[] parameter(1)
rhs0 = f32[] parameter(2)
rhs1 = f32[] parameter(3)
min = minimum(rhs0, rhs1)
max = maximum(rhs0, rhs1)
min_sum = add(lhs0, min)
max_sum = add(lhs1, max)
ROOT tuple = tuple(min_sum, max_sum)
}
ENTRY entry {
%input0 = f32[2,9] parameter(0),
sharding={devices=[1,2,2]<=[4] last_tile_dim_replicate}
%input1 = f32[2,9] parameter(1),
sharding={devices=[1,2,2]<=[4] last_tile_dim_replicate}
%indices = s32[3] parameter(2), sharding={replicated}
%updates0 = f32[3,9] parameter(3),
sharding={devices=[1,2,2]<=[4] last_tile_dim_replicate}
%updates1 = f32[3,9] parameter(4),
sharding={devices=[1,2,2]<=[4] last_tile_dim_replicate}
ROOT %scatter = (f32[2,9], f32[2,9])
scatter(%input0, %input1, %indices, %updates0, %updates1),
to_apply=add_min_max, update_window_dims={1}, inserted_window_dims={0},
scatter_dims_to_operand_dims={0}, index_vector_dim=1,
sharding={{devices=[1,2,2]<=[4] last_tile_dim_replicate},
{devices=[1,2,2]<=[4] last_tile_dim_replicate}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Scatter(op::Parameter(0), op::Parameter(1),
op::Parameter(2), op::Parameter(3),
op::Parameter(4)),
op::Shape("(f32[2,5], f32[2,5])")));
}
TEST_P(SpmdPartitioningTest, IndexPassthroughScatter) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9,8] parameter(0), sharding={replicated}
%indices = s32[4,2,4] parameter(1), sharding={devices=[2,1,2]<=[4]}
%updates = f32[4,4,8] parameter(2), sharding={devices=[2,2,1]<=[4]}
ROOT %scatter = f32[2,9,8] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={2},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=1, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(op::AllReduce(op::AllReduce(op::Scatter(
op::Select(op::Broadcast(op::Convert(op::PartitionId())),
op::Broadcast(op::Constant()), op::Parameter(0)),
op::Parameter(1), op::Parameter(2)))),
op::Shape("f32[2,9,8]")));
}
TEST_P(SpmdPartitioningTest, IndexPassthroughScatter_PartialReplicate) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9,8] parameter(0), sharding={replicated}
%indices = s32[4,2,4] parameter(1),
sharding={devices=[2,1,2,2]<=[8] last_tile_dim_replicate}
%updates = f32[4,4,8] parameter(2),
sharding={devices=[2,2,1,2]<=[8] last_tile_dim_replicate}
ROOT %scatter = f32[2,9,8] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={2},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=1, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(op::AllReduce(op::AllReduce(op::Scatter(
op::Select(op::Broadcast(op::Convert(op::Reshape())),
op::Broadcast(op::Constant()), op::Parameter(0)),
op::Parameter(1), op::Parameter(2)))),
op::Shape("f32[2,9,8]")));
}
TEST_P(SpmdPartitioningTest, IndexPassthroughScatterPartitionedIndexVectorDim) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9,8] parameter(0), sharding={replicated}
%indices = s32[4,2,4] parameter(1), sharding={devices=[2,2,2]<=[8]}
%updates = f32[4,4,8] parameter(2),
sharding={devices=[2,2,1,2]<=[8] last_tile_dim_replicate}
ROOT %scatter = f32[2,9,8] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={2},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=1, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("f32[2,9,8]"), op::Select());
auto indices = AllOf(op::Shape("s32[2,2,2]"), op::AllReduce());
auto update = AllOf(op::Shape("f32[2,2,8]"), op::CollectivePermute());
auto scatter =
AllOf(op::Shape("f32[2,9,8]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root, op::AllReduce(op::AllReduce(op::AllReduce(scatter))));
}
TEST_P(SpmdPartitioningTest, IndexPassthroughScatter_Min) {
absl::string_view hlo_string = R"(
HloModule module
min (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT min = f32[] minimum(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9,8] parameter(0), sharding={replicated}
%indices = s32[4,2,4] parameter(1), sharding={devices=[2,1,2]<=[4]}
%updates = f32[4,4,8] parameter(2), sharding={devices=[2,2,1]<=[4]}
ROOT %scatter = f32[2,9,8] scatter(%input, %indices, %updates),
to_apply=min,
update_window_dims={2},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=1, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(op::AllReduce(op::AllReduce(op::Scatter(
op::Select(op::Broadcast(op::Convert(op::PartitionId())),
op::Broadcast(op::Constant()), op::Parameter(0)),
op::Parameter(1), op::Parameter(2)))),
op::Shape("f32[2,9,8]")));
}
TEST_P(SpmdPartitioningTest, ScatterExplicitBatchDims) {
absl::string_view hlo_string = R"(
HloModule module
min (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT min = f32[] minimum(lhs, rhs)
}
ENTRY entry {
%input = f32[10,6,14,4] parameter(0), sharding={devices=[2,1,2,1]<=[4]}
%indices = s32[14,10,6,2] parameter(1), sharding={devices=[2,2,1,1]<=[2,2]T(1,0)}
%updates = f32[14,10,6,2] parameter(2), sharding={devices=[2,2,1,1]<=[2,2]T(1,0)}
ROOT %scatter = f32[10,6,14,4] scatter(%input, %indices, %updates),
to_apply=min, update_window_dims={3}, inserted_window_dims={1},
scatter_dims_to_operand_dims={1,3}, input_batching_dims={0,2},
scatter_indices_batching_dims={1,0}, index_vector_dim=3, sharding={devices=[2,1,2,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto input = AllOf(op::Shape("f32[5,6,7,4]"), op::Parameter(0));
auto indices = AllOf(op::Shape("s32[7,5,6,2]"), op::Parameter(1));
auto updates = AllOf(op::Shape("f32[7,5,6,2]"), op::Parameter(2));
auto scatter =
AllOf(op::Shape("f32[5,6,7,4]"), op::Scatter(input, indices, updates));
EXPECT_THAT(module->entry_computation()->root_instruction(), scatter);
}
TEST_P(SpmdPartitioningTest, ScatterExplicitBatchAndOperandPassthroughDims) {
absl::string_view hlo_string = R"(
HloModule module
min (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT min = f32[] minimum(lhs, rhs)
}
ENTRY entry {
%input = f32[10,6,14,4] parameter(0), sharding={devices=[1,1,2,2]<=[4]}
%indices = s32[14,10,6,2] parameter(1), sharding={devices=[2,1,1,1,2]<=[4] last_tile_dim_replicate}
%updates = f32[14,10,6,4] parameter(2), sharding={devices=[2,1,1,2]<=[4]}
ROOT %scatter = f32[10,6,14,4] scatter(%input, %indices, %updates),
to_apply=min, update_window_dims={3}, inserted_window_dims={1},
scatter_dims_to_operand_dims={1,3}, input_batching_dims={0,2},
scatter_indices_batching_dims={1,0}, index_vector_dim=3, sharding={devices=[1,1,2,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto input = AllOf(op::Shape("f32[10,6,7,2]"), op::Parameter(0));
auto indices = AllOf(op::Shape("s32[7,10,6,2]"), op::Parameter(1));
auto updates = AllOf(op::Shape("f32[7,10,6,2]"), op::Parameter(2));
auto scatter =
AllOf(op::Shape("f32[10,6,7,2]"), op::Scatter(input, indices, updates));
EXPECT_THAT(module->entry_computation()->root_instruction(), scatter);
}
TEST_P(SpmdPartitioningTest, ScatterExplicitBatchAndIndexPassthroughDims) {
absl::string_view hlo_string = R"(
HloModule module
min (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT min = f32[] minimum(lhs, rhs)
}
ENTRY entry {
%input = f32[10,6,14,4] parameter(0), sharding={devices=[1,1,2,1,2]<=[4] last_tile_dim_replicate}
%indices = s32[14,10,6,2] parameter(1), sharding={devices=[2,1,2,1]<=[4]}
%updates = f32[14,10,6,2] parameter(2), sharding={devices=[2,1,2,1]<=[4]}
ROOT %scatter = f32[10,6,14,4] scatter(%input, %indices, %updates),
to_apply=min, update_window_dims={3}, inserted_window_dims={1},
scatter_dims_to_operand_dims={1,3}, input_batching_dims={0,2},
scatter_indices_batching_dims={1,0}, index_vector_dim=3, sharding={devices=[1,1,2,1,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto input =
AllOf(op::Shape("f32[10,6,7,4]"), op::Select(_, _, op::Parameter(0)));
auto indices = AllOf(op::Shape("s32[7,10,3,2]"), op::Parameter(1));
auto updates = AllOf(op::Shape("f32[7,10,3,2]"), op::Parameter(2));
auto scatter = AllOf(op::Shape("f32[10,6,7,4]"),
op::AllReduce(op::Scatter(input, indices, updates)));
EXPECT_THAT(module->entry_computation()->root_instruction(), scatter);
}
TEST_P(SpmdPartitioningTest, ScatterPartitionedOnTrivialSliceDims) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[17,9] parameter(0), sharding={devices=[2,1]0,1}
%indices = s32[2,3] parameter(1), sharding={replicated}
%updates = f32[2,3,9] parameter(2), sharding={replicated}
ROOT %scatter = f32[17,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={2},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=2, sharding={devices=[2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto offset =
op::Reshape(op::DynamicSlice(op::Constant(), op::PartitionId()));
auto indices = op::Subtract(
op::Parameter(1), AllOf(op::Broadcast(offset), op::Shape("s32[2,3]")));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::Scatter(op::Parameter(0), indices, op::Parameter(2)),
op::Shape("f32[9,9]")));
}
TEST_P(SpmdPartitioningTest, ScatterPartitionedOnTrivialSliceDimsVariadic) {
absl::string_view hlo_string = R"(
HloModule module
add_min_max {
lhs0 = f32[] parameter(0)
lhs1 = f32[] parameter(1)
rhs0 = f32[] parameter(2)
rhs1 = f32[] parameter(3)
min = minimum(rhs0, rhs1)
max = maximum(rhs0, rhs1)
min_sum = add(lhs0, min)
max_sum = add(lhs1, max)
ROOT tuple = tuple(min_sum, max_sum)
}
ENTRY entry {
%input0 = f32[17,9] parameter(0), sharding={devices=[2,1]0,1}
%input1 = f32[17,9] parameter(1), sharding={devices=[2,1]0,1}
%indices = s32[2,3] parameter(2), sharding={replicated}
%updates0 = f32[2,3,9] parameter(3), sharding={replicated}
%updates1 = f32[2,3,9] parameter(4), sharding={replicated}
ROOT %scatter = (f32[17,9], f32[17,9])
scatter(%input0, %input1, %indices, %updates0, %updates1),
to_apply=add_min_max, update_window_dims={2}, inserted_window_dims={0},
scatter_dims_to_operand_dims={0}, index_vector_dim=2,
sharding={{devices=[2,1]0,1},{devices=[2,1]0,1}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
auto offset =
op::Reshape(op::DynamicSlice(op::Constant(), op::PartitionId()));
auto indices = op::Subtract(
op::Parameter(2), AllOf(op::Broadcast(offset), op::Shape("s32[2,3]")));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::Scatter(op::Parameter(0), op::Parameter(1), indices,
op::Parameter(3), op::Parameter(4)),
op::Shape("(f32[9,9], f32[9,9])")));
}
TEST_P(SpmdPartitioningTest,
ScatterPartitionedOnTrivialSliceDims_PartialReplicate) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[17,9] parameter(0),
sharding={devices=[2,1,2]<=[4] last_tile_dim_replicate}
%indices = s32[2,3] parameter(1), sharding={replicated}
%updates = f32[2,3,9] parameter(2), sharding={replicated}
ROOT %scatter = f32[17,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={2},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=2,
sharding={devices=[2,1,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto offset =
op::Reshape(op::DynamicSlice(op::Constant(), op::PartitionId()));
auto indices = op::Subtract(
op::Parameter(1), AllOf(op::Broadcast(offset), op::Shape("s32[2,3]")));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::Scatter(op::Parameter(0), indices, op::Parameter(2)),
op::Shape("f32[9,9]")));
}
TEST_P(SpmdPartitioningTest,
ScatterPartitionedOnTrivialSliceDimsVariadic_PartialReplicate) {
absl::string_view hlo_string = R"(
HloModule module
add_min_max {
lhs0 = f32[] parameter(0)
lhs1 = f32[] parameter(1)
rhs0 = f32[] parameter(2)
rhs1 = f32[] parameter(3)
min = minimum(rhs0, rhs1)
max = maximum(rhs0, rhs1)
min_sum = add(lhs0, min)
max_sum = add(lhs1, max)
ROOT tuple = tuple(min_sum, max_sum)
}
ENTRY entry {
%input0 = f32[17,9] parameter(0),
sharding={devices=[2,1,2]<=[4] last_tile_dim_replicate}
%input1 = f32[17,9] parameter(1),
sharding={devices=[2,1,2]<=[4] last_tile_dim_replicate}
%indices = s32[2,3] parameter(2), sharding={replicated}
%updates0 = f32[2,3,9] parameter(3), sharding={replicated}
%updates1 = f32[2,3,9] parameter(4), sharding={replicated}
ROOT %scatter = (f32[17,9], f32[17,9])
scatter(%input0, %input1, %indices, %updates0, %updates1),
to_apply=add_min_max, update_window_dims={2}, inserted_window_dims={0},
scatter_dims_to_operand_dims={0}, index_vector_dim=2,
sharding={{devices=[2,1,2]<=[4] last_tile_dim_replicate},
{devices=[2,1,2]<=[4] last_tile_dim_replicate}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto offset =
op::Reshape(op::DynamicSlice(op::Constant(), op::PartitionId()));
auto indices = op::Subtract(
op::Parameter(2), AllOf(op::Broadcast(offset), op::Shape("s32[2,3]")));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::Scatter(op::Parameter(0), op::Parameter(1), indices,
op::Parameter(3), op::Parameter(4)),
op::Shape("(f32[9,9], f32[9,9])")));
}
TEST_P(SpmdPartitioningTest, TiledReversePassthrough) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
constant = f32[3,3]{1,0} constant({{1,1,1},{1,1,1},{1,1,1}}),
sharding={devices=[2,1]0,1}
ROOT reverse = f32[3,3]{1,0} reverse(constant), dimensions={1},
sharding={devices=[2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[2,3]{1,0}"),
op::Reverse(op::DynamicSlice(
op::Pad(op::Constant(), op::Constant()),
op::Reshape(), op::Constant()))));
}
TEST_P(SpmdPartitioningTest, TiledReversePassthroughViaReversedSharding) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = f32[4] parameter(0), sharding={devices=[2]0,1}
ROOT reverse = f32[4] reverse(param), dimensions={0},
sharding={devices=[2]1,0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[2]"), op::Reverse(op::Parameter(0))));
}
TEST_P(SpmdPartitioningTest, TiledReverseSwapShards) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = f32[4] parameter(0), sharding={devices=[2]0,1}
ROOT reverse = f32[4] reverse(param), dimensions={0},
sharding={devices=[2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::Shape("f32[2]"),
op::Reverse(op::CollectivePermute(op::Parameter(0)))));
}
TEST_P(SpmdPartitioningTest, TiledReverseHaloExchange) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = f32[3] parameter(0), sharding={devices=[2]0,1}
ROOT reverse = f32[3] reverse(param), dimensions={0},
sharding={devices=[2]1,0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
auto halo_exchange_concat =
op::Concatenate(AllOf(op::Shape("f32[1]"),
op::CollectivePermute(op::Slice(op::Parameter(0)))),
op::Slice(op::Parameter(0)));
EXPECT_THAT(root,
AllOf(op::Shape("f32[2]"), op::Reverse(halo_exchange_concat)));
}
TEST_P(SpmdPartitioningTest, MixWithManualPartitioning) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = (f32[8,2], f32[4,2]) parameter(0), sharding={{devices=[2,1]0,1},{manual}}
param0 = f32[8,2] get-tuple-element(param), index=0, sharding={devices=[2,1]0,1}
param1 = f32[4,2] get-tuple-element(param), index=1, sharding={manual}
to_shard = f32[4,2] custom-call(param0), custom_call_target="SPMDFullToShardShape", sharding={manual}
add = f32[4,2] add(to_shard, param1), sharding={manual}
to_full = f32[8,2] custom-call(add), custom_call_target="SPMDShardToFullShape", sharding={devices=[2,1]0,1}
mul = f32[8,2] multiply(to_full, param0), sharding={devices=[2,1]0,1}
to_shard2 = f32[4,2] custom-call(mul), custom_call_target="SPMDFullToShardShape", sharding={manual}
ROOT tuple = (f32[4,2]) tuple(to_shard2), sharding={{manual}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
auto p0 = op::GetTupleElement(op::Parameter(0));
auto to_shard = op::Copy(p0);
auto p1 = op::GetTupleElement(op::Parameter(0));
auto mul = AllOf(op::Shape("f32[4,2]"),
op::Multiply(op::Copy(op::Add(to_shard, p1)), p0));
EXPECT_THAT(root, op::Tuple(op::Copy(mul)));
}
TEST_P(SpmdPartitioningTest, NestedManual) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
p.0 = s32[16,16,16] parameter(0), sharding={devices=[2,2,2]<=[8]}
m.0 = s32[8,8,8] custom-call(p.0), custom_call_target="SPMDFullToShardShape", sharding={manual}
m.1 = s32[16,8,8] custom-call(m.0), custom_call_target="SPMDShardToFullShape", sharding={devices=[2,1,1,4]<=[8] last_tile_dims={manual}}
m.2 = s32[16,16,8] custom-call(m.1), custom_call_target="SPMDShardToFullShape", sharding={devices=[2,2,1,2]<=[8] last_tile_dims={manual}}
ROOT out.0 = s32[16,16,16] custom-call(m.2), custom_call_target="SPMDShardToFullShape", sharding={devices=[2,2,2]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::Shape("s32[8,8,8]"),
op::Copy(op::Copy(op::Copy(op::Copy(op::Parameter(0)))))));
}
TEST_P(SpmdPartitioningTest, SubgroupAllToAllReshard) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[8,8,8,8] parameter(0),
sharding={devices=[2,2,1,2]<=[8]}
ROOT %copy = f32[8,8,8,8] copy(%param0),
sharding={devices=[1,2,2,2]0,1,4,5,2,3,6,7}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto reshape =
AllOf(op::Shape("f32[4,4,2,4,4]"), op::Reshape(op::Parameter(0)));
auto all_to_all = AllOf(op::Shape("f32[4,4,2,4,4]"), op::AllToAll(reshape));
auto xpose = AllOf(op::Shape("f32[2,4,4,4,4]"), op::Transpose(all_to_all));
EXPECT_THAT(root,
op::Copy(AllOf(op::Reshape(xpose), op::Shape("f32[8,4,4,4]"))));
EXPECT_EQ(root->operand(0)->operand(0)->operand(0)->replica_groups().size(),
4);
}
TEST_P(SpmdPartitioningTest, SubgroupAllToAllReshard2) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[8,8] parameter(0),
sharding={devices=[2,4]<=[8]}
ROOT %copy = f32[8,8] copy(%param0),
sharding={devices=[4,2]0,1,4,5,2,3,6,7}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto all_to_all = op::AllToAll(
AllOf(op::Shape("f32[2,2,2]"), op::Reshape(op::Parameter(0))));
auto reshape =
AllOf(op::Shape("f32[2,4]"), op::Reshape(op::Transpose(all_to_all)));
EXPECT_THAT(root, op::Copy(op::CollectivePermute(reshape)));
}
TEST_P(SpmdPartitioningTest, SubgroupAllToAllReshard3) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[8,8,8] parameter(0),
sharding={devices=[2,4,1]<=[8]}
ROOT %copy = f32[8,8,8] copy(%param0),
sharding={devices=[1,2,4]0,1,4,5,2,3,6,7}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto all_to_all = op::AllToAll(
AllOf(op::Shape("f32[4,2,4,2]"), op::Reshape(op::Parameter(0))));
auto reshape =
AllOf(op::Shape("f32[4,8,2]"), op::Reshape(op::Transpose(all_to_all)));
auto all_to_all2 =
op::AllToAll(AllOf(op::Shape("f32[4,2,4,2]"), op::Reshape(reshape)));
auto reshape2 =
AllOf(op::Shape("f32[8,4,2]"), op::Reshape(op::Transpose(all_to_all2)));
EXPECT_THAT(root, op::Copy(op::CollectivePermute(reshape2)));
}
TEST_P(SpmdPartitioningTest, Dot2DPartitionedNonContractingAndContracting0) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[48,12] parameter(0), sharding={devices=[2,2]<=[4]}
%rhs = f32[32,12] parameter(1), sharding={devices=[2,2]0,2,1,3}
ROOT %dot = f32[48,32] dot(%lhs, %rhs),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={1}, rhs_contracting_dims={1},
sharding={devices=[2,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[24,6]"), op::Parameter(0));
auto partial_replicated_lhs =
AllOf(op::Shape("f32[24,12]"),
op::AllReduce(op::DynamicUpdateSlice(_, lhs, _, _)));
const auto rhs = AllOf(op::Shape("f32[16,6]"), op::Parameter(1));
auto partial_replicated_rhs =
AllOf(op::Shape("f32[16,12]"),
op::AllReduce(op::DynamicUpdateSlice(_, rhs, _, _)));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::Dot(partial_replicated_lhs, partial_replicated_rhs),
op::Shape("f32[24,16]")));
}
TEST_P(SpmdPartitioningTest, Dot2DPartitionedNonContractingAndContracting1) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[48,100] parameter(0), sharding={devices=[2,2]<=[4]}
%rhs = f32[32,100] parameter(1), sharding={devices=[2,2]<=[4]}
ROOT %dot = f32[48,32] dot(%lhs, %rhs),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={1}, rhs_contracting_dims={1},
sharding={devices=[2,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[24,50]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("f32[16,50]"), op::Parameter(1));
auto partial_replicated_rhs =
AllOf(op::Shape("f32[32,50]"),
op::AllReduce(op::DynamicUpdateSlice(_, rhs, _, _)));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root, AllOf(op::Shape("f32[24,16]"),
op::DynamicSlice(
op::AllReduce(AllOf(op::Dot(lhs, partial_replicated_rhs),
op::Shape("f32[24,32]"))),
_, _)));
}
TEST_P(SpmdPartitioningTest, Dot2DPartitionedNonContractingAndContracting2) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[48,100] parameter(0), sharding={replicated}
%rhs = f32[32,100] parameter(1), sharding={devices=[2,2]<=[4]}
ROOT %dot = f32[48,32] dot(%lhs, %rhs),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={1}, rhs_contracting_dims={1},
sharding={devices=[2,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[48,100]"), op::Parameter(0));
const auto lhs_slice =
AllOf(op::Shape("f32[24,100]"), op::DynamicSlice(lhs, _, _));
const auto rhs = AllOf(op::Shape("f32[16,50]"), op::Parameter(1));
auto partial_replicated_rhs = AllOf(
op::Shape("f32[16,100]"), op::AllReduce(op::DynamicUpdateSlice(
_, op::CollectivePermute(rhs), _, _)));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[24,16]"),
op::Dot(lhs_slice, partial_replicated_rhs)));
}
TEST_P(SpmdPartitioningTest, Dot2DPartitionedNoncontractingAndContracting3) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[23,24] parameter(0), sharding={devices=[2,1,2]<=[4] last_tile_dim_replicate}
%rhs = f32[23,32] parameter(1), sharding={devices=[2,2]<=[4]}
ROOT %dot = f32[24,32] dot(%lhs, %rhs),
lhs_contracting_dims={0}, rhs_contracting_dims={0},
sharding={devices=[2,2]1,0,3,2}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[12,24]"), op::Parameter(0));
auto masked_lhs = op::Select(_, lhs, op::Broadcast(op::Constant()));
const auto rhs = AllOf(op::Shape("f32[12,16]"), op::Parameter(1));
auto masked_rhs = op::Select(_, rhs, op::Broadcast(op::Constant()));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::Shape("f32[12,16]"),
op::DynamicSlice(
AllOf(op::Shape("f32[24,16]"),
op::AllReduce(op::Dot(masked_lhs, masked_rhs))),
_, _)));
}
TEST_P(SpmdPartitioningTest, Dot2DPartitionedBatchAndNonContracting) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[4,24,100] parameter(0), sharding={devices=[2,2,1]<=[4]}
%rhs = f32[4,32,100] parameter(1), sharding={devices=[2,2,1]<=[4]}
ROOT %dot = f32[4,24,32] dot(%lhs, %rhs),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2},
sharding={devices=[2,2,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[2,12,100]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("f32[2,16,100]"), op::Parameter(1));
auto partial_replicated_rhs =
AllOf(op::Shape("f32[2,32,100]"),
op::AllReduce(op::DynamicUpdateSlice(_, rhs, _, _, _)));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[2,12,32]"),
op::Dot(lhs, partial_replicated_rhs)));
}
TEST_P(SpmdPartitioningTest, Dot2DPartitionedBatchAndContracting) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[4,24,100] parameter(0), sharding={devices=[2,1,2]<=[4]}
%rhs = f32[4,32,100] parameter(1), sharding={devices=[1,2,2]<=[4]}
ROOT %dot = f32[4,24,32] dot(%lhs, %rhs),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2},
sharding={devices=[2,2,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[2,24,50]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("f32[4,16,50]"), op::Parameter(1));
auto resharded_rhs =
AllOf(op::Shape("f32[2,32,50]"),
op::Reshape(op::Transpose(op::AllToAll(op::Reshape(rhs)))));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[2,12,32]"),
op::DynamicSlice(
AllOf(op::Shape("f32[2,24,32]"),
op::AllReduce(op::Dot(lhs, resharded_rhs))),
_, _, _)));
}
TEST_P(SpmdPartitioningTest, Dot2DPartitionedBatchAndContracting2) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[4,24,100] parameter(0), sharding={devices=[2,1,2]<=[4]}
%rhs = f32[4,32,100] parameter(1), sharding={replicated}
ROOT %dot = f32[4,24,32] dot(%lhs, %rhs),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2},
sharding={devices=[2,2,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[2,24,50]"), op::Parameter(0));
auto resharded_lhs =
AllOf(op::Shape("f32[2,12,100]"),
op::Reshape(op::Transpose(op::AllToAll(op::Reshape(lhs)))));
const auto rhs = AllOf(op::Shape("f32[4,32,100]"), op::Parameter(1));
const auto rhs_slice =
AllOf(op::Shape("f32[2,32,100]"), op::DynamicSlice(rhs, _, _, _));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[2,12,32]"),
op::Dot(resharded_lhs, rhs_slice)));
}
TEST_P(SpmdPartitioningTest,
Dot2DPartitionedBatchNonContractingAndContracting) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[4,24,100] parameter(0), sharding={devices=[2,1,2]<=[4]}
%rhs = f32[4,32,100] parameter(1), sharding={devices=[2,2,1]<=[4]}
ROOT %dot = f32[4,24,32] dot(%lhs, %rhs),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2},
sharding={devices=[2,1,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[2,24,50]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("f32[2,16,100]"), op::Parameter(1));
auto partial_replicated_lhs =
AllOf(op::Shape("f32[2,24,100]"),
op::AllReduce(op::DynamicUpdateSlice(_, lhs, _, _, _)));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[2,24,16]"),
op::Dot(partial_replicated_lhs, rhs)));
}
TEST_P(SpmdPartitioningTest, Dot2DPartitionedBatchAndReshard) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[4,8,24,100] parameter(0), sharding={devices=[2,1,2,1]<=[4]}
%rhs = f32[4,8,32,100] parameter(1), sharding={devices=[2,1,2,1]<=[4]}
ROOT %dot = f32[4,8,24,32] dot(%lhs, %rhs),
lhs_batch_dims={0,1}, rhs_batch_dims={0,1},
lhs_contracting_dims={3}, rhs_contracting_dims={3},
sharding={devices=[1,2,2,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[2,8,12,100]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("f32[2,8,16,100]"), op::Parameter(1));
auto partial_replicated_rhs =
AllOf(op::Shape("f32[2,8,32,100]"),
op::AllReduce(op::DynamicUpdateSlice(_, rhs, _, _, _, _)));
auto dot =
AllOf(op::Shape("f32[2,8,12,32]"), op::Dot(lhs, partial_replicated_rhs));
auto reshape = AllOf(op::Shape("f32[2,2,4,12,32]"), op::Reshape(dot));
auto all_to_all = AllOf(op::Shape("f32[2,2,4,12,32]"), op::AllToAll(reshape));
auto xpose = AllOf(op::Shape("f32[2,2,4,12,32]"), op::Transpose(all_to_all));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[4,4,12,32]"), op::Reshape(xpose)));
}
TEST_P(SpmdPartitioningTest, SimpleDotPartial) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[2,24,100] parameter(0),
sharding={devices=[2,1,1,2]<=[4] last_tile_dim_replicate}
%rhs = f32[2,32,100] parameter(1),
sharding={devices=[2,1,1,2]<=[4] last_tile_dim_replicate}
ROOT %dot = f32[2,24,32] dot(%lhs, %rhs),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2},
sharding={devices=[2,1,1,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[1,24,100]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("f32[1,32,100]"), op::Parameter(1));
auto dot = AllOf(op::Shape("f32[1,24,32]"), op::Dot(lhs, rhs));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, dot);
}
TEST_P(SpmdPartitioningTest, SimpleSparseDot) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[2,24,128] parameter(0),
sharding={devices=[2,2,1]<=[4]}
%rhs = f32[2,32,256] parameter(1),
sharding={devices=[2,1,1,2]<=[4] last_tile_dim_replicate}
%meta = u16[2,24,16] parameter(2),
sharding={devices=[2,2,1]<=[4]}
ROOT %dot = f32[2,24,32] dot(%lhs, %rhs, %meta),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2}, sparsity=L.2@2:4,
sharding={devices=[2,2,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[1,12,128]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("f32[1,32,256]"), op::Parameter(1));
const auto meta = AllOf(op::Shape("u16[1,12,16]"), op::Parameter(2));
auto dot = AllOf(op::Shape("f32[1,12,32]"),
::testing::MakeMatcher(new ::xla::testing::HloMatcher(
HloOpcode::kDot, {lhs, rhs, meta})));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, dot);
}
TEST_P(SpmdPartitioningTest, DotPartialContracting) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[24,100] parameter(0),
sharding={devices=[1,2,2]<=[4] last_tile_dim_replicate}
%rhs = f32[32,100] parameter(1),
sharding={devices=[1,2,2]<=[4] last_tile_dim_replicate}
ROOT %dot = f32[24,32] dot(%lhs, %rhs),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={1}, rhs_contracting_dims={1},
sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[24,50]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("f32[32,50]"), op::Parameter(1));
auto dot = AllOf(op::Shape("f32[24,32]"), op::Dot(lhs, rhs));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::AllReduce(dot));
}
TEST_P(SpmdPartitioningTest, DotPartialContracting2) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[24,100] parameter(0),
sharding={devices=[1,2,2]<=[4] last_tile_dim_replicate}
%rhs = f32[32,100] parameter(1),
sharding={devices=[1,2,2]<=[4] last_tile_dim_replicate}
ROOT %dot = f32[24,32] dot(%lhs, %rhs),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={1}, rhs_contracting_dims={1},
sharding={devices=[2,1,2]0,2,1,3 last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[24,50]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("f32[32,50]"), op::Parameter(1));
auto dot =
AllOf(op::Shape("f32[12,32]"),
op::Dot(AllOf(op::Shape("f32[12,50]"), op::DynamicSlice(lhs, _, _)),
rhs));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::AllReduce(dot));
}
TEST_P(SpmdPartitioningTest, DotPartialContracting3) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[24,100] parameter(0),
sharding={devices=[1,2,4]<=[8] last_tile_dim_replicate}
%rhs = f32[32,100] parameter(1),
sharding={devices=[1,2,4]<=[8] last_tile_dim_replicate}
ROOT %dot = f32[24,32] dot(%lhs, %rhs),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={1}, rhs_contracting_dims={1},
sharding={devices=[1,2,4]<=[8] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[24,50]"), op::Parameter(0));
const auto rhs =
AllOf(op::Shape("f32[16,50]"), op::DynamicSlice(op::Parameter(1), _, _));
auto dot = AllOf(op::Shape("f32[24,16]"), op::Dot(lhs, rhs));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::CollectivePermute(op::AllReduce(dot)));
}
TEST_P(SpmdPartitioningTest, DotBatchAndPartialContracting) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[4,24,100] parameter(0),
sharding={devices=[2,2,2]<=[8]}
%rhs = f32[4,32,100] parameter(1),
sharding={devices=[2,1,2,2]0,2,1,3,4,6,5,7 last_tile_dim_replicate}
ROOT %dot = f32[4,24,32] dot(%lhs, %rhs),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2},
sharding={devices=[2,2,1,2]<=[8] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[2,12,50]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("f32[2,32,50]"), op::Parameter(1));
auto dot = AllOf(op::Shape("f32[2,12,32]"), op::Dot(lhs, rhs));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::AllReduce(dot));
}
TEST_P(SpmdPartitioningTest, DotPartialNonContracting) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[24,8,100] parameter(0),
sharding={devices=[2,1,1,2]<=[4] last_tile_dim_replicate}
%rhs = f32[32,100] parameter(1), sharding={devices=[2,2]0,2,1,3}
ROOT %dot = f32[24,8,32] dot(%lhs, %rhs),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={2}, rhs_contracting_dims={1},
sharding={devices=[2,1,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[12,8,100]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("f32[16,50]"), op::Parameter(1));
auto partially_replicated_rhs =
AllOf(op::Shape("f32[16,100]"),
op::AllReduce(op::DynamicUpdateSlice(op::Broadcast(_), rhs, _, _)));
auto dot =
AllOf(op::Shape("f32[12,8,16]"), op::Dot(lhs, partially_replicated_rhs));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, dot);
}
TEST_P(SpmdPartitioningTest, DotPartialNonContractingPartialMatch) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[24,8,100] parameter(0), sharding={devices=[2,2,1]<=[4]}
%rhs = f32[32,100] parameter(1),
sharding={devices=[2,1,2]0,2,1,3 last_tile_dim_replicate}
ROOT %dot = f32[24,8,32] dot(%lhs, %rhs),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={2}, rhs_contracting_dims={1},
sharding={devices=[2,1,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[12,4,100]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("f32[16,100]"), op::Parameter(1));
auto partially_replicated_lhs = AllOf(
op::Shape("f32[12,8,100]"),
op::AllReduce(op::DynamicUpdateSlice(op::Broadcast(_), lhs, _, _, _)));
auto dot =
AllOf(op::Shape("f32[12,8,16]"), op::Dot(partially_replicated_lhs, rhs));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, dot);
}
TEST_P(SpmdPartitioningTest, DotPartialContractingPartialMatch) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[24,8,100] parameter(0), sharding={devices=[1,2,2]<=[4]}
%rhs = f32[32,8,100] parameter(1),
sharding={devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate}
ROOT %dot = f32[24,32] dot(%lhs, %rhs),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={1,2}, rhs_contracting_dims={1,2},
sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[24,4,50]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("f32[32,8,50]"), op::Parameter(1));
auto dot = AllOf(op::Shape("f32[24,32]"),
op::Dot(lhs, AllOf(op::Shape("f32[32,4,50]"),
op::DynamicSlice(rhs, _, _, _))));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::AllReduce(op::AllReduce(dot)));
}
TEST_P(SpmdPartitioningTest, DotNonContractingPartialMatchContractingMatch) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[24,8,100] parameter(0), sharding={devices=[2,1,2]<=[4]}
%rhs = f32[100,50] parameter(1), sharding={devices=[2,2]0,2,1,3}
ROOT %dot = f32[24,8,50] dot(%lhs, %rhs),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={2}, rhs_contracting_dims={0},
sharding={devices=[2,2,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[12,8,50]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("f32[50,25]"), op::Parameter(1));
auto dot = AllOf(
op::Shape("f32[12,8,50]"),
op::Dot(lhs, AllOf(op::Shape("f32[50,50]"),
op::AllReduce(op::DynamicUpdateSlice(_, rhs, _, _)))));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[12,4,50]"),
op::DynamicSlice(op::AllReduce(dot), _, _, _)))
<< module->ToString();
}
TEST_P(SpmdPartitioningTest, DotLHSMutiNonContractingRHSNotMatch) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[24,8,10] parameter(0), sharding={devices=[2,2,1]<=[4]}
%rhs = f32[10,50] parameter(1),
sharding={devices=[2,1,2]0,2,1,3 last_tile_dim_replicate}
ROOT %dot = f32[24,8,50] dot(%lhs, %rhs),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={2}, rhs_contracting_dims={0},
sharding={devices=[2,2,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[12,4,10]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("f32[5,50]"), op::Parameter(1));
auto dot = AllOf(
op::Shape("f32[12,4,50]"),
op::Dot(lhs, AllOf(op::Shape("f32[10,50]"),
op::AllReduce(op::DynamicUpdateSlice(_, rhs, _, _)))));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, dot) << module->ToString();
}
TEST_P(SpmdPartitioningTest, ReshardLHSRHSToMatchDotSharding) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %main.7 {
%p0 = bf16[32,97] parameter(0), sharding={devices=[32,1]<=[8,4]T(1,0)}
%p1 = bf16[48,64,97] parameter(1), sharding={devices=[8,4,1]<=[32]}
%dot.0 = bf16[32,48,64] dot(%p0, %p1), lhs_contracting_dims={1}, rhs_contracting_dims={2}, sharding={devices=[4,8,1]<=[8,4]T(1,0)}
%dot.1 = bf16[32,48,64] dot(%p0, %p1), lhs_contracting_dims={1}, rhs_contracting_dims={2}, sharding={devices=[4,4,1,2]<=[8,4]T(1,0) last_tile_dim_replicate}
ROOT %tuple = tuple(%dot.0, %dot.1), sharding={{devices=[4,8,1]<=[8,4]T(1,0)}, {devices=[4,4,1,2]<=[8,4]T(1,0) last_tile_dim_replicate}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 32));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("bf16[8,97]"));
const auto rhs0 = AllOf(op::Shape("bf16[6,64,97]"));
const auto rhs1 = AllOf(op::Shape("bf16[12,64,97]"));
auto dot0 = AllOf(op::Shape("bf16[8,6,64]"), op::Dot(lhs, rhs0));
auto dot1 = AllOf(op::Shape("bf16[8,12,64]"), op::Dot(lhs, rhs1));
auto tuple =
AllOf(op::Shape("(bf16[8,6,64], bf16[8,12,64])"), op::Tuple(dot0, dot1));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, tuple);
}
TEST_P(SpmdPartitioningTest, PartiallyReplicateRHS) {
const char* const hlo_string = R"(
HloModule module
ENTRY main {
lhs = bf16[16384,2048] parameter(0), sharding={devices=[16,8]<=[128]}
rhs = bf16[16384,256] parameter(1), sharding={devices=[128,1]<=[128]}
ROOT dot = bf16[2048,256] dot(lhs, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}, sharding={devices=[8,1,16]<=[16,8]T(1,0) last_tile_dim_replicate}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, PartitionComputation(hlo_string, 128));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("bf16[1024,256]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("bf16[1024,256]"),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(), op::Parameter(1), _, _)));
auto dot = AllOf(op::Shape("bf16[256,256]"), op::Dot(lhs, rhs));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::AllReduce(dot));
}
TEST_P(SpmdPartitioningTest, AllToAllAndPartialReplicateRHS) {
const char* const hlo_string = R"(
HloModule module
ENTRY main {
lhs = bf16[64,64] parameter(0), sharding={devices=[2,2,2]<=[8] last_tile_dim_replicate}
rhs = bf16[64,64,64] parameter(1), sharding={devices=[1,2,4]<=[2,2,2]T(2,1,0)}
ROOT dot = bf16[64,64,64] dot(lhs, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={2}, sharding={devices=[2,2,1,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("bf16[32,32]"), op::Parameter(0));
const auto all_to_all_p1 = AllOf(
op::Shape("bf16[32,64,16]"),
op::Reshape(op::Transpose(op::AllToAll(op::Reshape(op::Parameter(1))))));
const auto rhs = AllOf(op::Shape("bf16[32,64,32]"),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(), all_to_all_p1, _, _, _)));
auto dot = AllOf(op::Shape("bf16[32,32,64]"), op::Dot(lhs, rhs));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::AllReduce(dot));
}
TEST_P(SpmdPartitioningTest, ReplicateLHSofConv) {
const char* const hlo_string = R"(
HloModule module
ENTRY main {
lhs = bf16[128,8,8,1280] parameter(0), sharding={devices=[128,1,1,1]<=[128]}
rhs = bf16[3,3,1280,1280] parameter(1), sharding={devices=[1,1,1,8,16]<=[16,8]T(1,0) last_tile_dim_replicate}
ROOT conv = bf16[128,8,8,1280] convolution(lhs, rhs), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, sharding={devices=[1,1,1,8,16]<=[16,8]T(1,0) last_tile_dim_replicate}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, PartitionComputation(hlo_string, 128));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("bf16[128,8,8,1280]"),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(), op::Parameter(0), _, _, _, _)));
const auto rhs = AllOf(op::Shape("bf16[3,3,1280,160]"), op::Parameter(1));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::Shape("bf16[128,8,8,160]"), op::Convolution(lhs, rhs)));
}
TEST_P(SpmdPartitioningTest, ElementwiseTest_SubgroupSharding_TileToReplicate) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
constant = f32[6,3]{1,0}
constant({{1,3,7},{5,1,4},{1,2,8},{2,3,7},{5,2,4},{2,2,8}}),
sharding={devices=[1,2,2]<=[4] last_tile_dims={manual}}
constant.1 = f32[6,3]{1,0}
constant({{2,7,2},{2,9,2},{2,6,2},{3,7,2},{2,9,3},{2,3,2}}),
sharding={devices=[1,2,2]<=[4] last_tile_dims={manual}}
multiply = f32[6,3]{1,0} multiply(constant, constant.1),
sharding={devices=[1,2,2]<=[4] last_tile_dims={manual}}
ROOT add = f32[6,3]{1,0} add(multiply, constant.1),
sharding={devices=[1,1,2,2]<=[4] last_tile_dims={replicated, manual}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto multiply_lhs =
AllOf(op::Shape("f32[6,2]"),
op::DynamicSlice(op::Pad(op::Constant(), op::Constant()),
op::Constant(), op::Reshape()));
auto multiply_rhs =
AllOf(op::Shape("f32[6,2]"),
op::DynamicSlice(op::Pad(op::Constant(), op::Constant()),
op::Constant(), op::Reshape()));
auto multiply =
AllOf(op::Shape("f32[6,2]"), op::Multiply(multiply_lhs, multiply_rhs));
auto replicated_lhs = AllOf(op::Shape("f32[6,3]"),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(), op::Select(_, multiply, _),
op::Constant(), op::Reshape())));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[6,3]"),
op::Add(replicated_lhs, op::Constant())));
}
TEST_P(SpmdPartitioningTest, ElementwiseTest_SubgroupSharding_ReplicateToTile) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
constant = f32[6,3]{1,0}
constant({{1,3,7},{5,1,4},{1,2,8},{2,3,7},{5,2,4},{2,2,8}}),
sharding={devices=[1,1,2,2]<=[4] last_tile_dims={replicated,manual}}
constant.1 = f32[6,3]{1,0}
constant({{2,7,2},{2,9,2},{2,6,2},{3,7,2},{2,9,3},{2,3,2}}),
sharding={devices=[1,1,2,2]<=[4] last_tile_dims={replicated,manual}}
multiply = f32[6,3]{1,0} multiply(constant, constant.1),
sharding={devices=[1,1,2,2]<=[4] last_tile_dims={replicated,manual}}
ROOT add = f32[6,3]{1,0} add(multiply, constant.1),
sharding={devices=[1,2,2]<=[4] last_tile_dims={manual}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto multiply = AllOf(op::Shape("f32[6,3]"),
op::Multiply(op::Constant(), op::Constant()));
auto add_lhs = AllOf(op::Shape("f32[6,2]"),
op::DynamicSlice(op::Pad(multiply, op::Constant()),
op::Constant(), op::Reshape()));
auto add_rhs = AllOf(op::Shape("f32[6,2]"),
op::DynamicSlice(op::Pad(op::Constant(), op::Constant()),
op::Constant(), op::Reshape()));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[6,2]"), op::Add(add_lhs, add_rhs)));
}
TEST_P(SpmdPartitioningTest,
ElementwiseTest_PartialReplicateToTiledHaloExchange) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
input = f32[6,3] parameter(0),
sharding={devices=[2,1,2]<=[4] last_tile_dim_replicate}
ROOT copy = f32[6,3]{1,0} copy(input),
sharding={devices=[4,1]<=[4]}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto input = AllOf(op::Shape("f32[3,3]"), op::Parameter(0));
auto right_halo =
AllOf(op::Shape("f32[1,3]"), op::CollectivePermute(op::Slice(input)));
auto concat = op::Concatenate(
input, AllOf(op::Shape("f32[2,3]"), op::Pad(right_halo, _)));
auto valid_slice =
AllOf(op::Shape("f32[4,3]"), op::DynamicSlice(concat, _, _));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[2,3]"),
op::Copy(op::DynamicSlice(valid_slice, _, _))));
}
TEST_P(SpmdPartitioningTest, TileToPartialReplicateReshard) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[8,8] parameter(0)
%copy = f32[8,8] copy(%param0),
sharding={devices=[2,2]<=[4]}
ROOT %copy0 = f32[8,8] copy(%copy),
sharding={devices=[2,1,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto tiled = AllOf(op::Shape("f32[4,4]"),
op::Copy(op::DynamicSlice(op::Parameter(0), op::Reshape(),
op::Reshape())));
auto partially_replicated = AllOf(
op::Shape("f32[4,8]"), op::Copy(op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(_), tiled, _, _))));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, partially_replicated);
}
TEST_P(SpmdPartitioningTest, TileToPartialReplicateReshardUnevenPartition) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[8,8] parameter(0), sharding={devices=[2,3]<=[6]}
ROOT %copy0 = f32[8,8] copy(%param0),
sharding={devices=[1,2,3]<=[6] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 6));
VLOG(1) << module->ToString();
auto tiled = AllOf(op::Shape("f32[4,3]"), op::Select(_, op::Parameter(0), _));
auto partially_replicated = AllOf(
op::Shape("f32[8,4]"),
op::Copy(op::Reshape(op::Transpose(op::AllToAll(op::Reshape(op::AllReduce(
op::DynamicUpdateSlice(op::Broadcast(), tiled, _, _))))))));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, partially_replicated);
const HloInstruction* all_reduce =
FindInstruction(module.get(), "all-reduce");
EXPECT_NE(all_reduce, nullptr);
EXPECT_TRUE(
absl::StrContains(all_reduce->ToString(), "replica_groups=[2,3]<=[6]"));
}
TEST_P(SpmdPartitioningTest, PartialReplicateToTileReshardUnevenPartition) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[8,8] parameter(0),
sharding={devices=[1,2,3]<=[6] last_tile_dim_replicate}
ROOT %copy0 = f32[8,8] copy(%param0), sharding={devices=[2,3]<=[6]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 6));
VLOG(1) << module->ToString();
auto partial_replicated = AllOf(op::Shape("f32[8,4]"), op::Parameter(0));
auto tiled = AllOf(
op::Shape("f32[4,3]"),
op::Copy(op::DynamicSlice(op::Pad(op::Reshape(op::Transpose(op::AllToAll(
op::Reshape(partial_replicated)))),
_),
_, _)));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, tiled);
}
TEST_P(SpmdPartitioningTest, PartialReplicateToTileReshard) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[8,8] parameter(0)
%copy = f32[8,8] copy(%param0),
sharding={devices=[2,1,2]<=[4] last_tile_dim_replicate}
ROOT %copy0 = f32[8,8] copy(%copy),
sharding={devices=[2,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto partially_replicated =
AllOf(op::Shape("f32[4,8]"),
op::Copy(op::DynamicSlice(op::Parameter(0), op::Reshape(),
op::Constant())));
auto tiled =
AllOf(op::Shape("f32[4,4]"),
op::Copy(op::DynamicSlice(partially_replicated, op::Subtract(),
op::Subtract())));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, tiled);
}
TEST_P(SpmdPartitioningTest,
PartialReplicateToPartialReplicateReshard_AllReduce) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[8,8] parameter(0)
%copy = f32[8,8] copy(param0),
sharding={devices=[2,2,2]<=[8] last_tile_dim_replicate}
ROOT %copy0 = f32[8,8] copy(%copy),
sharding={devices=[2,1,4]<=[8] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto partially_replicated_init =
AllOf(op::Shape("f32[4,4]"),
op::Copy(op::DynamicSlice(op::Parameter(0), op::Reshape(),
op::Reshape())));
auto partially_replicated =
AllOf(op::Shape("f32[4,8]"),
op::Copy(op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(_), partially_replicated_init, _, _))));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, partially_replicated);
}
TEST_P(SpmdPartitioningTest,
PartialReplicateToPartialReplicateReshard_DynamicSlice) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[8,8] parameter(0)
%copy = f32[8,8] copy(%param0),
sharding={devices=[2,1,4]<=[8] last_tile_dim_replicate}
ROOT %copy0 = f32[8,8] copy(%copy),
sharding={devices=[2,2,2]<=[8] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto partially_replicated =
AllOf(op::Shape("f32[4,8]"),
op::Copy(op::DynamicSlice(op::Parameter(0), op::Reshape(),
op::Constant())));
auto tiled =
AllOf(op::Shape("f32[4,4]"),
op::Copy(op::DynamicSlice(partially_replicated, op::Subtract(),
op::Subtract())));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, tiled);
}
TEST_P(SpmdPartitioningTest,
PartialReplicateToPartialReplicateReshardWithCollectivePermute) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[8,8] parameter(0)
%copy = f32[8,8] copy(param0),
sharding={devices=[2,2,2]<=[8] last_tile_dim_replicate}
ROOT %copy0 = f32[8,8] copy(%copy),
sharding={devices=[1,2,4]<=[8] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto partially_replicated_init =
AllOf(op::Shape("f32[4,4]"),
op::CollectivePermute(op::Copy(op::DynamicSlice(
op::Parameter(0), op::Reshape(), op::Reshape()))));
auto partially_replicated =
AllOf(op::Shape("f32[8,4]"),
op::Copy(op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(_), partially_replicated_init, _, _))));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, partially_replicated);
}
TEST_P(SpmdPartitioningTest,
PartialReplicateToPartialReplicateReshardCollectivePermute1) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[8,8] parameter(0)
%copy = f32[8,8] copy(%param0),
sharding={devices=[1,2,4]<=[8] last_tile_dim_replicate}
ROOT %copy0 = f32[8,8] copy(%copy),
sharding={devices=[2,2,2]<=[8] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto partially_replicated =
AllOf(op::Shape("f32[8,4]"),
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(),
op::Reshape())));
auto tiled =
AllOf(op::Shape("f32[4,4]"),
op::Copy(op::CollectivePermute(op::DynamicSlice(
partially_replicated, op::Subtract(), op::Subtract()))));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, tiled);
}
TEST_P(SpmdPartitioningTest,
PartialReplicateToPartialReplicateReshardHaloExchange) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[6,3] parameter(0),
sharding={devices=[4,1,2]<=[8] last_tile_dim_replicate}
ROOT %copy0 = f32[6,3] copy(%param0),
sharding={devices=[2,1,4]<=[8] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto input = AllOf(op::Shape("f32[2,3]"), op::Parameter(0));
auto piece1 =
AllOf(op::Shape("f32[2,3]"),
op::Select(_, op::Pad(op::CollectivePermute(op::Slice(input)), _),
input));
auto piece2 = AllOf(op::Shape("f32[1,3]"), op::Slice(input));
auto concat = op::Concatenate(piece1, piece2);
auto partially_replicated =
AllOf(op::Shape("f32[3,3]"),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(_),
op::Select(_, op::DynamicSlice(concat, _, _), _), _, _)));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Copy(partially_replicated));
}
TEST_P(SpmdPartitioningTest,
PartialReplicateToPartialReplicateReshardHaloExchange1) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[6,3] parameter(0),
sharding={devices=[2,1,4]<=[8] last_tile_dim_replicate}
ROOT %copy0 = f32[6,3] copy(%param0),
sharding={devices=[4,1,2]<=[8] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto input = AllOf(op::Shape("f32[3,3]"), op::Parameter(0));
auto slice =
AllOf(op::Shape("f32[4,3]"),
op::DynamicSlice(
op::Concatenate(
input, op::Pad(op::CollectivePermute(op::Slice(input)), _)),
_, _));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[2,3]"),
op::Copy(op::DynamicSlice(slice, _, _))));
}
TEST_P(SpmdPartitioningTest, PartitionConvWithBathGroupCount) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,801,1,1024] parameter(0)
%lhs.copy = f32[16,801,1,1024] copy(%lhs),
sharding={devices=[1,1,1,2]0,1}
%rhs = f32[16,801,1,1024] parameter(1)
%rhs.copy = f32[16,801,1,1024] copy(%rhs),
sharding={devices=[1,1,1,2]0,1}
ROOT %conv = f32[5,1,1,1024] convolution(%lhs.copy, %rhs.copy),
dim_labels=f01b_i01o->01bf,batch_group_count=1024,
window={size=801x1 pad=2_2x0_0},
sharding={devices=[1,1,1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[16,801,1,512]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[16,801,1,512]"));
EXPECT_THAT(root,
AllOf(op::Convolution(lhs, rhs), op::Shape("f32[5,1,1,512]")));
}
TEST_P(SpmdPartitioningTest, PartitionConvWithBathGroupCountRHSAlignWithLHS) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,801,1,1024] parameter(0)
%lhs.copy = f32[16,801,1,1024] copy(%lhs),
sharding={devices=[1,1,1,2]0,1}
%rhs = f32[16,801,1,1024] parameter(1)
%rhs.copy = f32[16,801,1,1024] copy(%rhs),
sharding={devices=[1,2,1,1]0,1}
ROOT %conv = f32[5,1,1,1024] convolution(%lhs.copy, %rhs.copy),
dim_labels=f01b_i01o->01bf,batch_group_count=1024,
window={size=801x1 pad=2_2x0_0},
sharding={devices=[1,1,1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[16,801,1,512]"));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(), op::Constant()),
op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[16,401,1,1024]"));
auto resharded_rhs = AllOf(
op::Slice(op::Reshape(op::Transpose(op::AllToAll(op::Reshape(rhs))))),
op::Shape("f32[16,801,1,512]"));
EXPECT_THAT(root, AllOf(op::Convolution(lhs, resharded_rhs),
op::Shape("f32[5,1,1,512]")));
}
TEST_P(SpmdPartitioningTest, PartitionConvWithBathGroupCountLHSAlignWithRHS) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,801,1,1024] parameter(0)
%lhs.copy = f32[16,801,1,1024] copy(%lhs),
sharding={devices=[1,2,1,1]0,1}
%rhs = f32[16,801,1,1024] parameter(1)
%rhs.copy = f32[16,801,1,1024] copy(%rhs),
sharding={devices=[1,1,1,2]0,1}
ROOT %conv = f32[5,1,1,1024] convolution(%lhs.copy, %rhs.copy),
dim_labels=f01b_i01o->01bf,batch_group_count=1024,
window={size=801x1 pad=2_2x0_0},
sharding={devices=[1,1,1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[16,801,1,512]"));
const auto lhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(), op::Constant()),
op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[16,401,1,1024]"));
auto resharded_lhs = AllOf(
op::Slice(op::Reshape(op::Transpose(op::AllToAll(op::Reshape(lhs))))),
op::Shape("f32[16,801,1,512]"));
EXPECT_THAT(root, AllOf(op::Convolution(resharded_lhs, rhs),
op::Shape("f32[5,1,1,512]")));
}
TEST_P(SpmdPartitioningTest,
PartitionConvWithBathGroupCountOutputAlignWithLHS) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,801,1,1024] parameter(0)
%lhs.copy = f32[16,801,1,1024] copy(%lhs),
sharding={devices=[1,1,1,2]0,1}
%rhs = f32[16,801,1,1024] parameter(1)
%rhs.copy = f32[16,801,1,1024] copy(%rhs),
sharding={devices=[1,1,1,2]0,1}
ROOT %conv = f32[5,1,1,1024] convolution(%lhs.copy, %rhs.copy),
dim_labels=f01b_i01o->01bf,batch_group_count=1024,
window={size=801x1 pad=2_2x0_0},
sharding={devices=[2,1,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[16,801,1,512]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[16,801,1,512]"));
auto conv = AllOf(op::Convolution(lhs, rhs), op::Shape("f32[5,1,1,512]"));
EXPECT_THAT(root, AllOf(op::Reshape(op::Transpose(op::AllToAll(
op::Reshape(op::Pad(conv, op::Constant()))))),
op::Shape("f32[3,1,1,1024]")));
}
TEST_P(SpmdPartitioningTest,
PartitionConvWithBathGroupCountOutputAlignWithRHS) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,801,1,1024] parameter(0)
%lhs.copy = f32[16,801,1,1024] copy(%lhs),
sharding={devices=[1,2,1,1]0,1}
%rhs = f32[16,801,1,1024] parameter(1)
%rhs.copy = f32[16,801,1,1024] copy(%rhs),
sharding={devices=[1,1,1,2]0,1}
ROOT %conv = f32[5,1,1,1024] convolution(%lhs.copy, %rhs.copy),
dim_labels=f01b_i01o->01bf,batch_group_count=1024,
window={size=801x1 pad=2_2x0_0},
sharding={devices=[2,1,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[16,801,1,512]"));
const auto lhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(), op::Constant()),
op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[16,401,1,1024]"));
auto resharded_lhs = AllOf(
op::Slice(op::Reshape(op::Transpose(op::AllToAll(op::Reshape(lhs))))),
op::Shape("f32[16,801,1,512]"));
auto conv =
AllOf(op::Convolution(resharded_lhs, rhs), op::Shape("f32[5,1,1,512]"));
EXPECT_THAT(root, AllOf(op::Reshape(op::Transpose(op::AllToAll(
op::Reshape(op::Pad(conv, op::Constant()))))),
op::Shape("f32[3,1,1,1024]")));
}
TEST_P(SpmdPartitioningTest, PartitionConvWithBathGroupAlignWithLHSPartial) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[4,275,64]{2,1,0} parameter(0)
%multiply.5810 = f32[4,275,64]{2,1,0} copy(lhs), sharding={devices=[2,1,4]<=[8]}
%rhs = f32[4,275,64]{2,1,0} parameter(1)
%copy.25 = f32[4,275,64]{2,1,0} copy(rhs), sharding={devices=[4,1,2]<=[8]}
ROOT %convolution.6144 = f32[5,1,64]{2,1,0} convolution(multiply.5810, copy.25), window={size=275 pad=2_2},
dim_labels=f0b_i0o->0bf, batch_group_count=64,
operand_precision={HIGH,HIGH}, sharding={devices=[1,4,1,2]<=[2,4]T(1,0) last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(op::Shape("f32[4,275,16]"));
const auto rhs = AllOf(op::Shape("f32[4,275,16]"));
auto conv = AllOf(op::Convolution(lhs, rhs), op::Shape("f32[5,1,16]"));
EXPECT_THAT(root, AllOf(op::Reshape(op::Transpose(op::AllToAll(
op::Reshape(op::Pad(conv, op::Constant()))))),
op::Shape("f32[5,1,64]")));
}
TEST_P(SpmdPartitioningTest,
PartitionConvWithBathGroupCountAlignWithRHSPartial) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[4,275,64]{2,1,0} parameter(0)
%multiply.5810 = f32[4,275,64]{2,1,0} copy(lhs), sharding={devices=[4,1,2]<=[8]}
%rhs = f32[4,275,64]{2,1,0} parameter(1)
%copy.25 = f32[4,275,64]{2,1,0} copy(rhs), sharding={devices=[2,1,4]<=[8]}
ROOT %convolution.6144 = f32[5,1,64]{2,1,0} convolution(multiply.5810, copy.25), window={size=275 pad=2_2},
dim_labels=f0b_i0o->0bf, batch_group_count=64,
operand_precision={HIGH,HIGH}, sharding={devices=[1,4,1,2]<=[2,4]T(1,0) last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(op::Shape("f32[4,275,16]"));
const auto rhs = AllOf(op::Shape("f32[4,275,16]"));
auto conv = AllOf(op::Convolution(lhs, rhs), op::Shape("f32[5,1,16]"));
EXPECT_THAT(root, AllOf(op::Reshape(op::Transpose(op::AllToAll(
op::Reshape(op::Pad(conv, op::Constant()))))),
op::Shape("f32[5,1,64]")));
}
TEST_P(SpmdPartitioningTest,
PartitionConvWithBathGroupCountAlignWithOutputPartial) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[4,275,64]{2,1,0} parameter(0)
%multiply.5810 = f32[4,275,64]{2,1,0} copy(lhs), sharding={devices=[4,1,2]<=[8]}
%rhs = f32[4,275,64]{2,1,0} parameter(1)
%copy.25 = f32[4,275,64]{2,1,0} copy(rhs), sharding={devices=[4,1,2]<=[8]}
ROOT %convolution.6144 = f32[5,1,64]{2,1,0} convolution(multiply.5810, copy.25), window={size=275 pad=2_2},
dim_labels=f0b_i0o->0bf, batch_group_count=64,
operand_precision={HIGH,HIGH}, sharding={devices=[1,1,4,2]<=[2,4]T(1,0) last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(op::Shape("f32[4,275,16]"));
const auto rhs = AllOf(op::Shape("f32[4,275,16]"));
EXPECT_THAT(root, AllOf(op::Convolution(lhs, rhs), op::Shape("f32[5,1,16]")));
}
TEST_P(SpmdPartitioningTest, PartitionConvWithFeatureGroupCount) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,801,1,1024] parameter(0)
%lhs.copy = f32[16,801,1,1024] copy(%lhs),
sharding={devices=[1,1,1,2]0,1}
%rhs = f32[5,1,1,2048] parameter(1)
%rhs.copy = f32[5,1,1,2048] copy(%rhs),
sharding={devices=[1,1,1,2]0,1}
ROOT %conv = f32[16,801,1,2048] convolution(%lhs.copy, %rhs.copy),
dim_labels=b01f_01io->b01f,feature_group_count=1024,
window={size=5x1 pad=2_2x0_0},
sharding={devices=[1,1,1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[16,801,1,512]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[5,1,1,1024]"));
EXPECT_THAT(
root, AllOf(op::Convolution(lhs, rhs), op::Shape("f32[16,801,1,1024]")));
}
TEST_P(SpmdPartitioningTest, PartitionConvWithFeatureGroupCount2) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[64,3,1,3072] parameter(0)
%lhs.copy = f32[64,3,1,3072] copy(%lhs),
sharding={devices=[1,1,1,4,8]0,1,2,3,4,5,6,7,16,17,18,19,20,21,22,23,24,25
,26,27,28,29,30,31,8,9,10,11,12,13,14,15 last_tile_dim_replicate}
%rhs = f32[3,1,1,3072] parameter(1)
%rhs.copy = f32[3,1,1,3072] copy(%rhs),
sharding={devices=[1,1,1,4,8]0,1,2,3,4,5,6,7,16,17,18,19,20,21,22,23,24,25
,26,27,28,29,30,31,8,9,10,11,12,13,14,15 last_tile_dim_replicate}
ROOT %conv = f32[64,1,1,3072] convolution(%lhs.copy, %rhs.copy),
dim_labels=b01f_01io->b01f,feature_group_count=3072,
window={size=3x1},
sharding={devices=[8,1,1,4]0,16,24,8,2,18,26,10,4,20,28,12,6,22,30,14,7,23,
31,15,5,21,29,13,3,19,27,11,1,17,25,9}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 32));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs =
AllOf(op::DynamicSlice(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(),
op::Constant(), op::Constant(),
op::Reshape())),
op::Reshape(), op::Constant(), op::Constant(), op::Constant()),
op::Shape("f32[8,3,1,768]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[3,1,1,768]"));
EXPECT_THAT(root,
AllOf(op::Convolution(lhs, rhs), op::Shape("f32[8,1,1,768]")));
}
TEST_P(SpmdPartitioningTest,
PartitionConvWithFeatureGroupCountAlignWithLHSPartial) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[4,275,16]{2,1,0} parameter(0)
%multiply.5810 = f32[4,275,16]{2,1,0} copy(lhs), sharding={devices=[1,1,4,2]<=[8] last_tile_dim_replicate}
%rhs = f32[1,275,16]{2,1,0} parameter(1)
%copy.25 = f32[1,275,16]{2,1,0} copy(rhs), sharding={devices=[1,1,2,4]<=[8] last_tile_dim_replicate}
ROOT %convolution.6144 = f32[5,4,16]{2,1,0} convolution(multiply.5810, copy.25), window={size=275 pad=2_2},
dim_labels=b0f_i0o->0bf, feature_group_count=16,
operand_precision={HIGH,HIGH}, sharding={devices=[1,1,2,4]<=[2,4]T(1,0) last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(op::Shape("f32[4,275,4]"));
const auto rhs = AllOf(op::Shape("f32[1,275,4]"));
auto conv = AllOf(op::Convolution(lhs, rhs), op::Shape("f32[5,4,4]"));
EXPECT_THAT(root, AllOf(op::AllReduce(op::DynamicUpdateSlice(
_, op::CollectivePermute(conv), _, _, _)),
op::Shape("f32[5,4,8]")));
}
TEST_P(SpmdPartitioningTest,
PartitionConvWithFeatureGroupCountAlignWithRHSPartial) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[4,275,16]{2,1,0} parameter(0)
%multiply.5810 = f32[4,275,16]{2,1,0} copy(lhs), sharding={devices=[1,1,2,4]<=[8] last_tile_dim_replicate}
%rhs = f32[1,275,16]{2,1,0} parameter(1)
%copy.25 = f32[1,275,16]{2,1,0} copy(rhs), sharding={devices=[1,1,4,2]<=[8] last_tile_dim_replicate}
ROOT %convolution.6144 = f32[5,4,16]{2,1,0} convolution(multiply.5810, copy.25), window={size=275 pad=2_2},
dim_labels=b0f_i0o->0bf, feature_group_count=16,
operand_precision={HIGH,HIGH}, sharding={devices=[1,1,2,4]<=[2,4]T(1,0) last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(op::Shape("f32[4,275,4]"));
const auto rhs = AllOf(op::Shape("f32[1,275,4]"));
auto conv = AllOf(op::Convolution(lhs, rhs), op::Shape("f32[5,4,4]"));
EXPECT_THAT(root, AllOf(op::AllReduce(op::DynamicUpdateSlice(
_, op::CollectivePermute(conv), _, _, _)),
op::Shape("f32[5,4,8]")));
}
TEST_P(SpmdPartitioningTest,
PartitionConvWithFeatureGroupCountAlignWithOutputPartial) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[4,275,16]{2,1,0} parameter(0)
%multiply.5810 = f32[4,275,16]{2,1,0} copy(lhs), sharding={devices=[1,1,2,4]<=[8] last_tile_dim_replicate}
%rhs = f32[1,275,16]{2,1,0} parameter(1)
%copy.25 = f32[1,275,16]{2,1,0} copy(rhs), sharding={devices=[1,1,2,4]<=[8] last_tile_dim_replicate}
ROOT %convolution.6144 = f32[5,4,16]{2,1,0} convolution(multiply.5810, copy.25), window={size=275 pad=2_2},
dim_labels=b0f_i0o->0bf, feature_group_count=16,
operand_precision={HIGH,HIGH}, sharding={devices=[1,1,4,2]<=[2,4]T(1,0) last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(op::Shape("f32[4,275,4]"));
const auto rhs = AllOf(op::Shape("f32[1,275,4]"));
EXPECT_THAT(root, AllOf(op::Convolution(lhs, rhs), op::Shape("f32[5,4,4]")));
}
TEST_P(SpmdPartitioningTest,
PartitionConvWithFeatureGroupCountRHSAlignWithLHS) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,801,1,1024] parameter(0)
%lhs.copy = f32[16,801,1,1024] copy(%lhs),
sharding={devices=[1,1,1,2]0,1}
%rhs = f32[5,1,1,1024] parameter(1)
%rhs.copy = f32[5,1,1,1024] copy(%rhs),
sharding={devices=[2,1,1,1]0,1}
ROOT %conv = f32[16,801,1,1024] convolution(%lhs.copy, %rhs.copy),
dim_labels=b01f_01io->b01f,feature_group_count=1024,
window={size=5x1 pad=2_2x0_0},
sharding={devices=[1,1,1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[16,801,1,512]"));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(), op::Constant()),
op::Reshape(), op::Constant(),
op::Constant(), op::Constant())),
op::Shape("f32[3,1,1,1024]"));
auto resharded_rhs = AllOf(
op::Slice(op::Reshape(op::Transpose(op::AllToAll(op::Reshape(rhs))))),
op::Shape("f32[5,1,1,512]"));
EXPECT_THAT(root, AllOf(op::Convolution(lhs, resharded_rhs),
op::Shape("f32[16,801,1,512]")));
}
TEST_P(SpmdPartitioningTest,
PartitionConvWithFeatureGroupCountLHSAlignWithRHS) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,801,1,1024] parameter(0)
%lhs.copy = f32[16,801,1,1024] copy(%lhs),
sharding={devices=[1,2,1,1]0,1}
%rhs = f32[5,1,1,1024] parameter(1)
%rhs.copy = f32[5,1,1,1024] copy(%rhs),
sharding={devices=[1,1,1,2]0,1}
ROOT %conv = f32[16,801,1,1024] convolution(%lhs.copy, %rhs.copy),
dim_labels=b01f_01io->b01f,feature_group_count=1024,
window={size=5x1 pad=2_2x0_0},
sharding={devices=[1,1,1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(), op::Constant()),
op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[16,401,1,1024]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[5,1,1,512]"));
auto resharded_lhs = AllOf(
op::Slice(op::Reshape(op::Transpose(op::AllToAll(op::Reshape(lhs))))),
op::Shape("f32[16,801,1,512]"));
EXPECT_THAT(root, AllOf(op::Convolution(resharded_lhs, rhs),
op::Shape("f32[16,801,1,512]")));
}
TEST_P(SpmdPartitioningTest,
PartitionConvWithFeatureGroupCountAlignOuputWithLHS) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,801,1,1024] parameter(0)
%lhs.copy = f32[16,801,1,1024] copy(%lhs),
sharding={devices=[1,1,1,2]0,1}
%rhs = f32[5,1,1,1024] parameter(1)
%rhs.copy = f32[5,1,1,1024] copy(%rhs),
sharding={devices=[1,1,1,2]0,1}
ROOT %conv = f32[16,801,1,1024] convolution(%lhs.copy, %rhs.copy),
dim_labels=b01f_01io->b01f,feature_group_count=1024,
window={size=5x1 pad=2_2x0_0},
sharding={devices=[2,1,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[16,801,1,512]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[5,1,1,512]"));
auto conv = AllOf(op::Convolution(lhs, rhs), op::Shape("f32[16,801,1,512]"));
EXPECT_THAT(root,
AllOf(op::Reshape(op::Transpose(op::AllToAll(op::Reshape(conv)))),
op::Shape("f32[8,801,1,1024]")));
}
TEST_P(SpmdPartitioningTest,
PartitionConvGroupOnFeatureGroupCount_RHSPartialReplicate) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,801,1,1024] parameter(0)
%lhs.copy = f32[16,801,1,1024] copy(%lhs),
sharding={devices=[1,2,1,2]<=[4]}
%rhs = f32[5,1,1,1024] parameter(1)
%rhs.copy = f32[5,1,1,1024] copy(%rhs),
sharding={devices=[1,1,1,2,2]0,2,1,3 last_tile_dim_replicate}
ROOT %conv = f32[16,801,1,1024] convolution(%lhs.copy, %rhs.copy),
dim_labels=b01f_01io->b01f,feature_group_count=1024,
window={size=5x1 pad=2_2x0_0},
sharding={devices=[1,2,1,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(), op::Constant()),
op::Constant(), op::Reshape(),
op::Constant(), op::Reshape())),
op::Shape("f32[16,401,1,512]"));
auto left_halo = AllOf(op::Shape("f32[16,2, 1, 512]"),
op::CollectivePermute(op::Slice(lhs)));
auto right_halo = AllOf(op::Shape("f32[16,2, 1, 512]"),
op::CollectivePermute(op::Slice(lhs)));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[5,1,1,512]"));
EXPECT_THAT(
root,
AllOf(op::Convolution(
op::Select(_, op::Concatenate(left_halo, lhs, right_halo), _),
rhs),
op::Shape("f32[16, 401, 1, 512]")));
}
TEST_P(SpmdPartitioningTest,
PartitionConvGroupOnFeatureGroupCount_RHSAlignWithOutput) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,801,1,1024] parameter(0)
%lhs.copy = f32[16,801,1,1024] copy(%lhs),
sharding={devices=[1,2,1,2]<=[4]}
%rhs = f32[5,1,1,1024] parameter(1), sharding={replicated}
ROOT %conv = f32[16,801,1,1024] convolution(%lhs.copy, %rhs),
dim_labels=b01f_01io->b01f,feature_group_count=1024,
window={size=5x1 pad=2_2x0_0},
sharding={devices=[1,2,1,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(), op::Constant()),
op::Constant(), op::Reshape(),
op::Constant(), op::Reshape())),
op::Shape("f32[16,401,1,512]"));
auto left_halo = AllOf(op::Shape("f32[16,2, 1, 512]"),
op::CollectivePermute(op::Slice(lhs)));
auto right_halo = AllOf(op::Shape("f32[16,2, 1, 512]"),
op::CollectivePermute(op::Slice(lhs)));
const auto rhs =
AllOf(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape()),
op::Shape("f32[5,1,1,512]"));
EXPECT_THAT(
root,
AllOf(op::Convolution(
op::Select(_, op::Concatenate(left_halo, lhs, right_halo), _),
rhs),
op::Shape("f32[16, 401, 1, 512]")));
}
TEST_P(SpmdPartitioningTest,
PartitionConvGroupOnFeatureGroupCount_LHSAlignWithOutput) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,801,1,1024] parameter(0)
%lhs.copy = f32[16,801,1,1024] copy(%lhs),
sharding={devices=[2,1,1,1,2]<=[4] last_tile_dim_replicate}
%rhs = f32[5,1,1,1024] parameter(1)
%rhs.copy = f32[5,1,1,1024] copy(%rhs),
sharding={devices=[1,1,1,2,2]0,2,1,3 last_tile_dim_replicate}
ROOT %conv = f32[16,801,1,1024] convolution(%lhs.copy, %rhs.copy),
dim_labels=b01f_01io->b01f,feature_group_count=1024,
window={size=5x1 pad=2_2x0_0},
sharding={devices=[1,2,1,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Reshape(), op::Constant(),
op::Constant(), op::Constant())),
op::Shape("f32[8,801,1,1024]"));
auto resharded_lhs =
AllOf(op::Reshape(op::Transpose(op::AllToAll(op::Reshape(
op::Pad(op::DynamicSlice(lhs, op::Subtract(), op::Subtract(),
op::Subtract(), op::Subtract()),
op::Constant()))))),
op::Shape("f32[16,401,1,512]"));
auto left_halo = AllOf(op::Shape("f32[16,2, 1, 512]"),
op::CollectivePermute(op::Slice(resharded_lhs)));
auto right_halo = AllOf(op::Shape("f32[16,2, 1, 512]"),
op::CollectivePermute(op::Slice(resharded_lhs)));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[5,1,1,512]"));
EXPECT_THAT(
root,
AllOf(
op::Convolution(
op::Select(
_, op::Concatenate(left_halo, resharded_lhs, right_halo), _),
rhs),
op::Shape("f32[16, 401, 1, 512]")));
}
TEST_P(SpmdPartitioningTest, PartitionConvGroupOnBatchGroupCount) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,801,1,1024] parameter(0)
%lhs.copy = f32[16,801,1,1024] copy(%lhs),
sharding={devices=[1,2,1,2]<=[4]}
%rhs = f32[16,801,1,1024] parameter(1)
%rhs.copy = f32[16,801,1,1024] copy(%rhs),
sharding={devices=[1,2,1,2]<=[4]}
ROOT %conv = f32[5,1,1,1024] convolution(%lhs.copy, %rhs.copy),
dim_labels=f01b_i01o->01bf,batch_group_count=1024,
window={size=801x1 pad=2_2x0_0},
sharding={devices=[1,1,1,2,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Select(_,
op::Copy(op::DynamicSlice(
op::Pad(op::Parameter(), op::Constant()), op::Constant(),
op::Reshape(), op::Constant(), op::Reshape())),
_),
op::Shape("f32[16,401,1,512]"));
auto left_halo = AllOf(op::Shape("f32[16,2, 1, 512]"),
op::CollectivePermute(op::Slice(lhs)));
auto right_halo = AllOf(op::Shape("f32[16,2, 1, 512]"),
op::CollectivePermute(op::Slice(lhs)));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(), op::Constant()),
op::Constant(), op::Reshape(),
op::Constant(), op::Reshape())),
op::Shape("f32[16,401,1,512]"));
auto conv = AllOf(op::Convolution(op::Concatenate(left_halo, lhs, right_halo),
op::Select(_, rhs, _)),
op::Shape("f32[5,1,1,512]"));
EXPECT_THAT(root, AllOf(op::CollectivePermute(op::AllReduce(conv)),
op::Shape("f32[5,1,1,512]")));
}
TEST_P(SpmdPartitioningTest, PartitionConvWithBatchGroupCountReplicatedLHSRHS) {
absl::string_view hlo_string = R"(
HloModule test, entry_computation_layout={(f32[8,28,1,64]{3,2,1,0}, f32[8,28,1,2]{3,2,1,0})->f32[3,1,32,2]{3,2,1,0}}, allow_spmd_sharding_propagation_to_output={true}
ENTRY main.4 {
lhs = f32[8,28,1,64]{3,2,1,0} parameter(0), sharding={replicated}
rhs = f32[8,28,1,2]{3,2,1,0} parameter(1), sharding={replicated}
ROOT convolution.3 = f32[3,1,32,2]{3,2,1,0} convolution(lhs, rhs), window={size=28x1 pad=1_1x0_0}, dim_labels=f01b_i01o->01bf, batch_group_count=2, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
}
TEST_P(SpmdPartitioningTest,
PartitionConvWithFeatureGroupCountAlignOuputWithRHS) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,801,1,1024] parameter(0)
%lhs.copy = f32[16,801,1,1024] copy(%lhs),
sharding={devices=[1,2,1,1]0,1}
%rhs = f32[5,1,1,1024] parameter(1)
%rhs.copy = f32[5,1,1,1024] copy(%rhs),
sharding={devices=[1,1,1,2]0,1}
ROOT %conv = f32[16,801,1,1024] convolution(%lhs.copy, %rhs.copy),
dim_labels=b01f_01io->b01f,feature_group_count=1024,
window={size=5x1 pad=2_2x0_0},
sharding={devices=[2,1,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(), op::Constant()),
op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[16,401,1,1024]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[5,1,1,512]"));
auto resharded_lhs = AllOf(
op::Slice(op::Reshape(op::Transpose(op::AllToAll(op::Reshape(lhs))))),
op::Shape("f32[16,801,1,512]"));
auto conv = AllOf(op::Convolution(resharded_lhs, rhs),
op::Shape("f32[16,801,1,512]"));
EXPECT_THAT(root,
AllOf(op::Reshape(op::Transpose(op::AllToAll(op::Reshape(conv)))),
op::Shape("f32[8,801,1,1024]")));
}
TEST_P(SpmdPartitioningTest, PartitionConvWithFeatureGroupCountBackProp) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,801,1,1024] parameter(0)
%lhs.copy = f32[16,801,1,1024] copy(%lhs),
sharding={devices=[1,1,1,2]0,1}
%rhs = f32[5,1,1024,1] parameter(1)
%rhs.copy = f32[5,1,1024,1] copy(%rhs),
sharding={devices=[1,1,2,1]0,1}
ROOT %conv = f32[16,801,1,1024] convolution(%lhs.copy, %rhs.copy),
dim_labels=b01f_01oi->b01f,feature_group_count=1024,
window={size=5x1 pad=2_2x0_0 rhs_reversal=1x1},
sharding={devices=[1,1,1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[16,801,1,512]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("f32[5,1,512,1]"));
EXPECT_THAT(root,
AllOf(op::Convolution(lhs, rhs), op::Shape("f32[16,801,1,512]")));
}
TEST_P(SpmdPartitioningTest, NoReshardOnBroadcastDims) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[2,3] parameter(0)
%param1 = f32[2,3,20] parameter(1)
%br0 = f32[20,2,20,3,20] broadcast(%param0), dimensions={1,3}, sharding={devices=[2,1,2,1,2]<=[8]}
%br1 = f32[20,2,20,3,20] broadcast(%param1), dimensions={1,3,4}, sharding={devices=[2,1,2,1,2]<=[8]}
%add = f32[20,2,20,3,20] add(%br0, %br1), sharding={devices=[2,1,2,1,2]<=[8]}
%reshape = f32[10,4,10,6,20] reshape(%br0), sharding={devices=[2,1,2,1,2]<=[8]}
%transpose = f32[2,3,20,20,20] transpose(%br0), dimensions={1,3,0,2,4}, sharding={devices=[1,1,2,2,2]<=[8]}
%copy_add0 = f32[20,2,20,3,20] copy(%add), sharding={devices=[2,1,2,1,2]6,7,2,3,4,5,0,1}
%copy_add1 = f32[20,2,20,3,20] copy(%add), sharding={devices=[2,1,2,1,2]7,6,3,2,5,4,0,1}
%copy_reshape = f32[10,4,10,6,20] copy(%reshape), sharding={devices=[2,1,2,1,2]7,6,3,2,5,4,0,1}
%copy_transpose = f32[2,3,20,20,20] copy(%transpose), sharding={devices=[1,1,2,2,2]7,6,3,2,5,4,0,1}
ROOT %tuple = (f32[20,2,20,3,20], f32[20,2,20,3,20], f32[10,4,10,6,20], f32[2,3,20,20,20])
tuple(%copy_add0, %copy_add1, %copy_reshape, %copy_transpose),
sharding={{devices=[2,1,2,1,2]6,7,2,3,4,5,0,1},{devices=[2,1,2,1,2]7,6,3,2,5,4,0,1},{devices=[2,1,2,1,2]7,6,3,2,5,4,0,1},{devices=[1,1,2,2,2]7,6,3,2,5,4,0,1}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto copy_add0 =
op::Copy(op::Copy(op::Add(op::Broadcast(_), op::Broadcast(_))));
auto copy_add1 = op::Copy(
op::CollectivePermute(op::Add(op::Broadcast(_), op::Broadcast(_))));
auto copy_reshape = op::Copy(op::Copy(op::Reshape(op::Broadcast(_))));
auto copy_transpose = op::Copy(op::Copy(op::Transpose(op::Broadcast(_))));
EXPECT_THAT(root,
op::Tuple(copy_add0, copy_add1, copy_reshape, copy_transpose));
}
TEST_P(SpmdPartitioningTest,
ConvolutionFilterIFOFPartitionedInputPartialReplicate) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,112,112,12] parameter(0)
%lhs.copy = f32[128,112,112,12] copy(f32[128,112,112,12] %lhs),
sharding={devices=[1,1,1,2,2]<=[4] last_tile_dim_replicate}
%rhs = f32[7,7,12,64] parameter(1)
%rhs.copy = f32[7,7,12,64] copy(f32[7,7,12,64] %rhs),
sharding={devices=[1,1,2,2]<=[4]}
ROOT %conv = f32[128,56,56,64] convolution(
f32[128,112,112,12] %lhs.copy,
f32[7,7,12,64] %rhs.copy),
window={size=7x7 stride=2x2 pad=3_3x3_3},
dim_labels=b01f_01io->b01f,
sharding={devices=[1,1,1,2,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[128,112,112,6]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Reshape(), op::Reshape())),
op::Shape("f32[7,7,6,32]"));
EXPECT_THAT(
root,
AllOf(op::CollectivePermute(op::AllReduce(op::Convolution(lhs, rhs))),
op::Shape("f32[128,56,56,32]")));
}
TEST_P(SpmdPartitioningTest,
ConvolutionInputKernelNonContractingDimPartialReplicate) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,56,56,256] parameter(0)
%lhs.copy = f32[128,56,56,256] copy(%lhs),
sharding={devices=[1,1,1,2,2]<=[4] last_tile_dim_replicate}
%rhs = f32[128,28,28,512] parameter(1)
%rhs.copy = f32[128,28,28,512] copy(%rhs),
sharding={devices=[1,1,1,2,2]<=[4] last_tile_dim_replicate}
ROOT %conv = f32[1,1,256,512] convolution(%lhs.copy, %rhs.copy),
window={size=28x28 pad=0_-1x0_-1 rhs_dilate=2x2}, dim_labels=f01b_i01o->01bf,
sharding={devices=[1,1,2,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[128,56,56,128]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[128,28,28,256]"));
EXPECT_THAT(root, AllOf(op::Convolution(lhs, op::CollectivePermute(rhs)),
op::Shape("f32[1,1,128,256]")));
}
TEST_P(SpmdPartitioningTest,
ConvolutionInputSpatialDimAndFeatureDimParttiioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[8,210,210,12] parameter(0)
%lhs.copy = f32[8,210,210,12] copy(f32[8,210,210,12] %lhs),
sharding={devices=[1,2,1,2]<=[4]}
%rhs = f32[3,3,12,32] parameter(1)
%rhs.copy = f32[3,3,12,32] copy(f32[3,3,12,32] %rhs),
sharding={devices=[1,1,2,1,2]<=[4] last_tile_dim_replicate}
ROOT %conv = f32[8,210,210,32] convolution(
f32[8,210,210,12] %lhs.copy,
f32[3,3,12,32] %rhs.copy),
window={size=3x3 pad=1_1x1_1},
dim_labels=b01f_01io->b01f,
sharding={devices=[1,2,1,1,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Reshape())),
op::Shape("f32[8,105,210,6]"));
auto left_halo =
AllOf(op::CollectivePermute(op::Slice(lhs)), op::Shape("f32[8,1,210,6]"));
auto right_halo =
AllOf(op::CollectivePermute(op::Slice(lhs)), op::Shape("f32[8,1,210,6]"));
auto exchanged_lhs = AllOf(
op::Select(op::And(_, _), op::Concatenate(left_halo, lhs, right_halo),
op::Broadcast(_)),
op::Shape("f32[8,107,210,6]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("f32[3,3,6,32]"));
EXPECT_THAT(root, AllOf(op::AllReduce(op::Convolution(
exchanged_lhs, op::CollectivePermute(rhs))),
op::Shape("f32[8,105,210,32]")));
}
TEST_P(SpmdPartitioningTest, Fft3D) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
constant = c64[1,1,6]
constant({{{(0,0),(1,1),(2,2),(3,3),(4,4),(5,5)}}}),
sharding={devices=[1,1,2]0,1}
ROOT fft = c64[1,1,6] fft(c64[1,1,6] constant), fft_type=FFT, fft_length={6},
sharding={devices=[1,1,2]0,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto input = AllOf(op::DynamicSlice(op::Constant(), op::Constant(),
op::Constant(), op::Reshape()),
op::Shape("c64[1,1,3]"));
auto padded_input =
AllOf(op::DynamicSlice(
op::Concatenate(input, op::CollectivePermute(op::Slice())),
op::Constant(), op::Constant(), op::Reshape()),
op::Shape("c64[1,1,4]"));
auto shuffled_input =
AllOf(op::Slice(op::AllToAll(op::Dot(padded_input, op::Convert()))),
op::Shape("c64[1,1,3]"));
auto local_fft = AllOf(op::Fft(shuffled_input), op::Shape("c64[1,1,3]"));
EXPECT_THAT(root, AllOf(op::GetTupleElement(op::While(op::Tuple(
_, op::Multiply(local_fft, op::Exp()), _, _, _))),
op::Shape("c64[1,1,3]")));
}
TEST_P(SpmdPartitioningTest, DotInputsAreIdentical) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%parameter.1 = f32[4000,4000]{1,0} parameter(0), sharding={devices=[2,4]<=[8]}
ROOT %convolution = f32[4000,4000]{1,0} convolution(
f32[4000,4000]{1,0} %parameter.1, f32[4000,4000]{1,0} %parameter.1),
dim_labels=bf_io->bf, sharding={devices=[2,4]<=[8]}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param = AllOf(op::Parameter(), op::Shape("f32[2000, 1000]"));
auto resharded_lhs =
AllOf(op::AllReduce(op::DynamicUpdateSlice(_, param, _, _)),
op::Shape("f32[2000, 4000]"));
auto resharded_rhs =
AllOf(op::AllReduce(op::DynamicUpdateSlice(_, op::Copy(param), _, _)),
op::Shape("f32[4000, 1000]"));
EXPECT_THAT(root, AllOf(op::Convolution(resharded_lhs, resharded_rhs),
op::Shape("f32[2000, 1000]")));
}
TEST_P(SpmdPartitioningTest, ConstantSliceReshard) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%constant.785 = f32[1,8] constant({{0,1,2,3,4,5,6,7}}),
sharding={devices=[1,8]<=[8]}
%slice.62 = f32[1,1] slice(%constant.785), slice={[0:1], [0:1]},
sharding={devices=[1,8]<=[8]}
ROOT %reshape.779 = f32[] reshape(%slice.62), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto slice = AllOf(op::Shape("f32[1,1]"),
op::Copy(op::DynamicSlice(op::Constant(), _, _)));
EXPECT_THAT(root, op::Reshape(op::AllReduce(op::Select(_, slice, _))));
}
TEST_P(SpmdPartitioningTest, GatherParallelDimRedistributionOperand) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[4,2,1,1]<=[8]}
%constant = s32[4] constant({0, 1, 2, 3}), sharding={replicated}
%iota = s32[1,8,4]{2,1,0} broadcast(%constant), dimensions={2},
sharding={devices=[1,8,1]<=[8]}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,8,1]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,8,1]<=[8]}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={1,0}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[1,4,2,2]"), op::Reshape());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[1,4,2,2]"), op::Gather(operand, indices));
EXPECT_THAT(root,
op::AllReduce(op::DynamicUpdateSlice(_, gather, _, _, _, _)));
}
TEST_P(SpmdPartitioningTest, GatherParallelDimRedistributionIndices) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[8,1,1,1]<=[8]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,4,2]<=[8]}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,4,2]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,4,2]<=[8]}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[2,2,2,2]"), op::Reshape());
auto indices = AllOf(op::Shape("s32[2,2,2]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[2,2,2,2]"), op::Gather(operand, indices));
EXPECT_THAT(root, op::AllReduce(op::AllReduce(
op::DynamicUpdateSlice(_, gather, _, _, _, _))));
}
TEST_P(SpmdPartitioningTest, GatherParallelDimReplicatedIndices) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[8,1,1,1]<=[8]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={replicated}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={replicated}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={replicated}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[1,4,2,2]"), op::Parameter());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[1,4,2,2]"), op::Gather(operand, indices));
EXPECT_THAT(root,
op::AllReduce(op::DynamicUpdateSlice(_, gather, _, _, _, _)));
}
TEST_P(SpmdPartitioningTest, GatherParallelDimReplicatedOperand) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0), sharding={replicated}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,8,1]<=[8]}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,8,1]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,8,1]<=[8]}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[1,4,2,2]"), op::DynamicSlice());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[1,4,2,2]"), op::Gather(operand, indices));
EXPECT_THAT(root,
op::AllReduce(op::DynamicUpdateSlice(_, gather, _, _, _, _)));
}
TEST_P(SpmdPartitioningTest, GatherParallelDimPartialReplicatedIndices) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[8,1,1,1]<=[8]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,2,1,4]<=[8] last_tile_dim_replicate}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,2,1,4]<=[8] last_tile_dim_replicate}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,2,1,4]<=[8] last_tile_dim_replicate}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[1,4,2,2]"), op::Parameter());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[1,4,2,2]"), op::Gather(operand, indices));
EXPECT_THAT(root,
op::AllReduce(op::DynamicUpdateSlice(_, gather, _, _, _, _)));
}
TEST_P(SpmdPartitioningTest, GatherParallelDimPartialReplicatedOperand) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0), sharding={
devices=[2,1,1,1,4]<=[8] last_tile_dim_replicate}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,8,1]<=[8]}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,8,1]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,8,1]<=[8]}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[1,4,2,2]"), op::DynamicSlice());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[1,4,2,2]"), op::Gather(operand, indices));
EXPECT_THAT(root,
op::AllReduce(op::DynamicUpdateSlice(_, gather, _, _, _, _)));
}
TEST_P(SpmdPartitioningTest, GatherParallelDimSwappedDimensions) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0), sharding={
devices=[4,2,1,1]<=[8]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,2,4]<=[8]}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,2,4]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,2,4]<=[8]}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[4,1,2,2]"), op::CollectivePermute());
auto indices = AllOf(op::Shape("s32[2,4,1]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[4,1,2,2]"), op::Gather(operand, indices));
EXPECT_THAT(root, op::AllReduce(op::AllReduce(
op::DynamicUpdateSlice(_, gather, _, _, _, _))));
}
TEST_P(SpmdPartitioningTest, GatherParallelDimFromOutsideWhilePositive) {
absl::string_view hlo_string = R"(
HloModule module
cond {
%parameters = (s32[8,4,2,2], s32[1,8,4], s32[]) parameter(0),
sharding={{replicated}, {devices=[1,8,1]<=[8]}, {replicated}}
%counter = s32[] get-tuple-element(parameters), index=2, sharding={replicated}
%constant = s32[] constant(3), sharding={replicated}
ROOT %lt = pred[] compare(counter, constant), direction=LT,
sharding={replicated}
}
body {
%parameters = (s32[8,4,2,2], s32[1,8,4], s32[]) parameter(0),
sharding={{replicated}, {devices=[1,8,1]<=[8]}, {replicated}}
%parameter.0 = s32[8,4,2,2]{3,2,1,0} get-tuple-element(parameters), index=0,
sharding={replicated}
%iota = s32[1,8,4]{2,1,0} get-tuple-element(parameters), index=1,
sharding={devices=[1,8,1]<=[8]}
%counter = s32[] get-tuple-element(parameters), index=2, sharding={replicated}
%constant = s32[] constant(1), sharding={replicated}
%updated_counter = s32[] add(counter, constant), sharding={replicated}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,8,1]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,8,1]<=[8]}
%gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
ROOT %tuple = (s32[8,4,2,2], s32[1,8,4], s32[])
tuple(gather.20, iota, updated_counter),
sharding={{replicated}, {devices=[1,8,1]<=[8]}, {replicated}}
}
ENTRY entry {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0), sharding={replicated}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,8,1]<=[8]}
%counter = s32[] constant(0), sharding={replicated}
%tuple = (s32[8,4,2,2], s32[1,8,4], s32[]) tuple(parameter.0, iota, counter),
sharding={{replicated}, {devices=[1,8,1]<=[8]}, {replicated}}
ROOT while = (s32[8,4,2,2], s32[1,8,4], s32[]) while(tuple), body=body,
condition=cond,
sharding={{replicated}, {devices=[1,8,1]<=[8]}, {replicated}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const auto root = module->entry_computation()
->root_instruction()
->while_body()
->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[1,4,2,2]"), op::DynamicSlice());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[1,4,2,2]"), op::Gather(operand, indices));
EXPECT_THAT(
root,
op::Tuple(op::AllReduce(op::DynamicUpdateSlice(_, gather, _, _, _, _)), _,
_));
}
TEST_P(SpmdPartitioningTest, GatherParallelDimFromOutsideWhileNegative) {
absl::string_view hlo_string = R"(
HloModule module
cond {
%parameters = (s32[8,4,2,2], s32[1,8,4], s32[]) parameter(0),
sharding={{replicated}, {devices=[1,8,1]<=[8]}, {replicated}}
%counter = s32[] get-tuple-element(parameters), index=2, sharding={replicated}
%constant = s32[] constant(3), sharding={replicated}
ROOT %lt = pred[] compare(counter, constant), direction=LT,
sharding={replicated}
}
body {
%parameters = (s32[8,4,2,2], s32[1,8,4], s32[]) parameter(0),
sharding={{replicated}, {devices=[1,8,1]<=[8]}, {replicated}}
%parameter.0 = s32[8,4,2,2]{3,2,1,0} get-tuple-element(parameters), index=0,
sharding={replicated}
%iota = s32[1,8,4]{2,1,0} get-tuple-element(parameters), index=1,
sharding={devices=[1,8,1]<=[8]}
%counter = s32[] get-tuple-element(parameters), index=2, sharding={replicated}
%constant = s32[] constant(1), sharding={replicated}
%updated_counter = s32[] add(counter, constant), sharding={replicated}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,8,1]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,8,1]<=[8]}
%gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
%iota.2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,8,1]<=[8]}
ROOT %tuple = (s32[8,4,2,2], s32[1,8,4], s32[])
tuple(gather.20, iota.2, updated_counter),
sharding={{replicated}, {devices=[1,8,1]<=[8]}, {replicated}}
}
ENTRY entry {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0), sharding={replicated}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,8,1]<=[8]}
%counter = s32[] constant(0), sharding={replicated}
%tuple = (s32[8,4,2,2], s32[1,8,4], s32[]) tuple(parameter.0, iota, counter),
sharding={{replicated}, {devices=[1,8,1]<=[8]}, {replicated}}
ROOT while = (s32[8,4,2,2], s32[1,8,4], s32[]) while(tuple), body=body,
condition=cond,
sharding={{replicated}, {devices=[1,8,1]<=[8]}, {replicated}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const auto root = module->entry_computation()
->root_instruction()
->while_body()
->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[8,4,2,2]"), op::GetTupleElement());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Concatenate());
auto gather = AllOf(op::Shape("s32[1,4,2,2]"), op::Gather(operand, indices));
EXPECT_THAT(
root,
op::Tuple(op::AllReduce(op::DynamicUpdateSlice(_, gather, _, _, _, _)), _,
_));
}
TEST_P(SpmdPartitioningTest, ScatterRepsOnLastTileDimDontDivideGroups) {
absl::string_view hlo_string = R"(
HloModule module
region.1 {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT res.1 = f32[] add(lhs, rhs)
}
ENTRY entry {
%add.1 = f32[8,96,2048,16]{3,2,1,0} parameter(0)
%concatenate.1 = s32[8,96,2048,2,4]{4,3,2,1,0} parameter(1)
%broadcast.1 = f32[8,96,2048,2]{3,2,1,0} parameter(2)
%add.1.shard = f32[8,96,2048,16]{3,2,1,0} copy(%add.1), sharding={devices=[8,8,1,1,24]<=[8,8,24]T(1,0,2) last_tile_dim_replicate}
%concatenate.1.shard = s32[8,96,2048,2,4]{4,3,2,1,0} copy(%concatenate.1), sharding={devices=[8,8,1,1,1,24]<=[8,8,24]T(1,0,2) last_tile_dim_replicate}
%broadcast.1.shard = f32[8,96,2048,2]{3,2,1,0} copy(%broadcast.1), sharding={devices=[8,8,1,1,24]<=[8,8,24]T(1,0,2) last_tile_dim_replicate}
ROOT %scatter.44 = f32[8,96,2048,16]{3,2,1,0} scatter(
%add.1.shard,
%concatenate.1.shard,
%broadcast.1.shard),
update_window_dims={},
inserted_window_dims={0,1,2,3},
scatter_dims_to_operand_dims={0,1,2,3},
index_vector_dim=4,
to_apply=region.1,
sharding={devices=[8,8,1,1,24]<=[8,8,24]T(1,0,2) last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module, PartitionComputation(hlo_string, 1536));
VLOG(1) << module->ToString();
{
const auto partitioned_scatter =
module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("f32[1,12,2048,16]"));
auto indices = AllOf(op::Shape("s32[8,96,2048,2,4]"));
auto update = AllOf(op::Shape("f32[8,96,2048,2]"));
auto scatter = AllOf(op::Shape("f32[1,12,2048,16]"),
op::Scatter(operand, indices, update));
EXPECT_THAT(partitioned_scatter, scatter);
}
}
TEST_P(SpmdPartitioningTest, ParallelDimFromOutsideConditionalPositive) {
absl::string_view hlo_string = R"(
HloModule module
gather_comp {
%parameters = (s32[8,4,2,2], s32[1,8,4]) parameter(0),
sharding={{replicated}, {devices=[1,8,1]<=[8]}}
%parameter.0 = s32[8,4,2,2]{3,2,1,0} get-tuple-element(parameters), index=0,
sharding={replicated}
%iota = s32[1,8,4]{2,1,0} get-tuple-element(parameters), index=1,
sharding={devices=[1,8,1]<=[8]}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,8,1]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,8,1]<=[8]}
%gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather.20), sharding={replicated}
}
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
scatter_comp {
%parameters = (s32[8,4,2,2], s32[1,8,4]) parameter(0),
sharding={{replicated}, {devices=[1,8,1]<=[8]}}
%parameter.0 = s32[8,4,2,2]{3,2,1,0} get-tuple-element(parameters), index=0,
sharding={replicated}
%iota = s32[1,8,4]{2,1,0} get-tuple-element(parameters), index=1,
sharding={devices=[1,8,1]<=[8]}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,8,1]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,8,1]<=[8]}
%constant = s32[] constant(0)
%base = s32[8,4,2,2]{3,2,1,0} broadcast(constant), dimensions={},
sharding={replicated}
%scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %base,
s32[2,8,4]{2,1,0} %concatenate.19,
s32[8,4,2,2]{3,2,1,0} %parameter.0),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter.20), sharding={replicated}
}
ENTRY entry {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0), sharding={replicated}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,8,1]<=[8]}
%counter = s32[] constant(0), sharding={replicated}
%tuple = (s32[8,4,2,2], s32[1,8,4]) tuple(parameter.0, iota),
sharding={{replicated}, {devices=[1,8,1]<=[8]}}
%parameter.1 = pred[] parameter(1)
ROOT conditional = s32[8,4,2,2] conditional(parameter.1, tuple, tuple),
true_computation=gather_comp, false_computation=scatter_comp,
sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
{
const auto partitioned_gather = module->entry_computation()
->root_instruction()
->true_computation()
->root_instruction();
auto operand = AllOf(op::Shape("s32[1,4,2,2]"), op::DynamicSlice());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Subtract());
auto gather =
AllOf(op::Shape("s32[1,4,2,2]"), op::Gather(operand, indices));
EXPECT_THAT(
partitioned_gather,
op::Copy(op::AllReduce(op::DynamicUpdateSlice(_, gather, _, _, _, _))));
}
{
const auto partitioned_scatter = module->entry_computation()
->root_instruction()
->false_computation()
->root_instruction();
auto operand = AllOf(op::Shape("s32[1,4,2,2]"), op::DynamicSlice());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Subtract());
auto update = AllOf(op::Shape("s32[1,4,2,2]"), op::DynamicSlice());
auto scatter =
AllOf(op::Shape("s32[1,4,2,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(partitioned_scatter,
op::Copy(op::AllReduce(
op::DynamicUpdateSlice(_, scatter, _, _, _, _))));
}
}
TEST_P(SpmdPartitioningTest, GatherParallelDimAndNonParallelDimPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[1,8,4]{2,1,0} parameter(1)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),
sharding={devices=[2,2,1,1]<=[4]}
%indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1),
sharding={devices=[1,2,2]<=[4]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,2,2]<=[4]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %indices), dimensions={0},
sharding={devices=[1,2,2]<=[4]}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[4,4,2,2]"), op::AllReduce());
auto indices = AllOf(op::Shape("s32[2,4,2]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[4,2,2,2]"), op::Gather(operand, indices));
EXPECT_THAT(
root, op::AllReduce(op::DynamicUpdateSlice(
_, op::AllReduce(op::DynamicUpdateSlice(_, gather, _, _, _, _)),
_, _, _, _)));
}
TEST_P(SpmdPartitioningTest, Gather_b303520921) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%convert.303 = bf16[1000,16]{1,0} parameter(0), sharding={devices=[4,1,2]<=[2,4]T(1,0) last_tile_dim_replicate}
%reshape.830 = s32[16,8,1]{2,1,0} parameter(1), sharding={devices=[2,1,1,4]0,1,2,3,4,5,6,7 last_tile_dim_replicate}
ROOT %gather.831 = bf16[16,8,16]{2,1,0} gather(convert.303, reshape.830),
offset_dims={2}, collapsed_slice_dims={0}, start_index_map={0},
index_vector_dim=2, slice_sizes={1,16}, sharding={devices=[2,1,4]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("bf16[250,16]"), op::Parameter());
auto indices = AllOf(op::Shape("s32[8,8,1]"), op::Subtract());
auto gather = AllOf(op::Shape("bf16[8,8,16]"), op::Gather(operand, indices));
const HloInstruction* gather_inst = FindInstruction(module.get(), "gather");
EXPECT_NE(gather_inst, nullptr);
EXPECT_THAT(gather_inst, gather);
}
TEST_P(SpmdPartitioningTest, GatherMergedIndexParallelAndOperandPassthrough) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[2,2,2,1]<=[8]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={1,0}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[2,4,1,2]"), op::Reshape());
auto indices = AllOf(op::Shape("s32[2,2,4]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[2,4,1,2]"), op::Gather(operand, indices));
EXPECT_THAT(root, op::AllReduce(op::AllReduce(
op::DynamicUpdateSlice(_, gather, _, _, _, _))));
}
TEST_P(SpmdPartitioningTest, GatherMergedIndexParallelAndTrivialSlicedOperand) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[4,2,1,1]<=[8]}
%parameter.1 = s32[1,8,1]{2,1,0} parameter(1),
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%iota = s32[1,8,1]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%concatenate.19 = s32[2,8,1]{2,1,0} concatenate(
s32[1,8,1]{2,1,0} %parameter.1, s32[1,8,1]{2,1,0} %iota), dimensions={0},
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
ROOT %gather.20 = s32[8,1,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,1]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={1,0}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[2,2,2,2]"), op::Parameter());
auto indices = AllOf(op::Shape("s32[2,2,1]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[2,1,2,2]"), op::Gather(operand, indices));
VLOG(1) << module->ToString();
EXPECT_THAT(root,
op::AllReduce(op::DynamicUpdateSlice(
_, op::AllReduce(op::Select(_, _, gather)), _, _, _, _)));
}
TEST_P(SpmdPartitioningTest, GatherMergedIndexParallelAndIndexPassthrough) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[4,1,1,1,2]<=[8] last_tile_dim_replicate}
%parameter.1 = s32[1,8,4]{2,1,0} parameter(1),
sharding={devices=[1,4,2]<=[8]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,4,2]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %parameter.1, s32[1,8,4]{2,1,0} %iota), dimensions={0},
sharding={devices=[1,4,2]<=[8]}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={1,0}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[2,4,2,2]"), op::Parameter());
auto indices = AllOf(op::Shape("s32[2,2,2]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[2,2,2,2]"), op::Gather(operand, indices));
EXPECT_THAT(
root, op::AllReduce(op::DynamicUpdateSlice(
_, op::AllReduce(op::DynamicUpdateSlice(_, gather, _, _, _, _)),
_, _, _, _)));
}
TEST_P(SpmdPartitioningTest,
GatherMergedOperandPassthroughAndTrivialSlicedOperand) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[2,2,2,1]<=[8]}
%parameter.1 = s32[2,8,4]{2,1,0} parameter(1),
sharding={replicated}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %parameter.1), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[4,2,1,2]"), op::Parameter());
auto indices = AllOf(op::Shape("s32[2,8,4]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[8,4,1,2]"), op::Gather(operand, indices));
EXPECT_THAT(
root, op::AllReduce(op::DynamicUpdateSlice(
_, op::AllReduce(op::AllReduce(op::Select(_, _, gather))), _, _,
_, _)));
}
TEST_P(SpmdPartitioningTest,
GatherMergedOperandPassthroughAndIndexPassthrough) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[1,1,2,1,4]<=[8] last_tile_dim_replicate}
%parameter.1 = s32[2,8,4]{2,1,0} parameter(1),
sharding={devices=[1,2,1,4]<=[8] last_tile_dim_replicate}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %parameter.1), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[8,4,1,2]"), op::Parameter());
auto indices = AllOf(op::Shape("s32[2,4,4]"), op::CollectivePermute());
auto gather = AllOf(op::Shape("s32[4,4,1,2]"), op::Gather(operand, indices));
EXPECT_THAT(
root, op::AllReduce(op::DynamicUpdateSlice(
_, op::AllReduce(op::DynamicUpdateSlice(_, gather, _, _, _, _)),
_, _, _, _)));
}
TEST_P(SpmdPartitioningTest,
GatherMergedOperandPassthroughAndIndexPassthrough_PartialGrouping) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[2,2,2,1]<=[8]}
%parameter.1 = s32[2,8,4]{2,1,0} parameter(1),
sharding={devices=[1,2,2,2]<=[8] last_tile_dim_replicate}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %parameter.1), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[8,4,1,2]"), op::AllReduce());
auto indices = AllOf(op::Shape("s32[2,4,2]"), op::Parameter());
auto gather = AllOf(op::Shape("s32[4,2,1,2]"), op::Gather(operand, indices));
EXPECT_THAT(
root, op::AllReduce(op::AllReduce(op::DynamicUpdateSlice(
_, op::AllReduce(op::DynamicUpdateSlice(_, gather, _, _, _, _)),
_, _, _, _))));
}
TEST_P(SpmdPartitioningTest,
GatherMergedTrivialSlicedOperandAndIndexPassthrough) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[2,2,1,1,2]<=[8] last_tile_dim_replicate}
%parameter.1 = s32[2,8,4]{2,1,0} parameter(1),
sharding={devices=[1,2,1,4]<=[8] last_tile_dim_replicate}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %parameter.1), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[4,2,2,2]"), op::Parameter());
auto indices = AllOf(op::Shape("s32[2,4,4]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[4,4,2,2]"), op::Gather(operand, indices));
EXPECT_THAT(
root, op::AllReduce(op::DynamicUpdateSlice(
_, op::AllReduce(op::AllReduce(op::Select(_, _, gather))), _, _,
_, _)));
}
TEST_P(SpmdPartitioningTest,
GatherMergedTrivialSlicedOperandAndIndexPassthrough_PartialGrouping) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[2,2,1,1,2]<=[8] last_tile_dim_replicate}
%parameter.1 = s32[2,8,4]{2,1,0} parameter(1),
sharding={devices=[1,2,2,2]<=[8] last_tile_dim_replicate}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %parameter.1), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[8,2,2,2]"), op::AllReduce());
auto indices = AllOf(op::Shape("s32[2,4,2]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[4,2,2,2]"), op::Gather(operand, indices));
EXPECT_THAT(root,
op::AllReduce(op::AllReduce(op::DynamicUpdateSlice(
_, op::AllReduce(op::Select(_, _, gather)), _, _, _, _))));
}
TEST_P(SpmdPartitioningTest, GatherTrivialSlicedOperandPartial) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY main.4 {
%arg.0 = s64[8,2]{1,0} parameter(0), sharding={devices=[4,2]<=[8]}
%arg.1 = s32[2]{0} parameter(1), sharding={replicated}
ROOT gather = s64[2,1]{1,0} gather(arg.0, arg.1), offset_dims={0,1},
collapsed_slice_dims={}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={2,1}, indices_are_sorted=true, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s64[8,1]"), op::AllReduce());
auto indices = AllOf(op::Shape("s32[2]"), op::Subtract());
auto gather = AllOf(op::Shape("s64[2,1]"), op::Gather(operand, indices));
EXPECT_THAT(root, op::AllReduce(op::Select(_, _, gather)));
}
TEST_P(SpmdPartitioningTest, GatherParallelIndexAndOperand) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[4,1,2,1]<=[8]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={1,0}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={devices=[4,1,2,1]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[2,4,1,2]"), op::Parameter(0));
auto indices = AllOf(op::Shape("s32[2,2,4]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[2,4,1,2]"), op::Gather(operand, indices));
EXPECT_THAT(root, gather);
}
TEST_P(SpmdPartitioningTest, GatherReshardParallelIndexAndOperand) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[4,1,2,1]<=[8]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={1,0}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={devices=[4,1,2,1]1,0,3,2,4,5,6,7}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[2,4,1,2]"), op::Parameter(0));
auto indices = AllOf(op::Shape("s32[2,2,4]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[2,4,1,2]"), op::Gather(operand, indices));
EXPECT_THAT(root, op::CollectivePermute(gather));
}
TEST_P(SpmdPartitioningTest, GatherParallelIndexAndOperandReshard) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[4,1,1,1,2]<=[8] last_tile_dim_replicate}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={1,0}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={devices=[4,1,2,1]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[2,4,2,2]"), op::Parameter(0));
auto indices = AllOf(op::Shape("s32[2,2,4]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[2,4,2,2]"), op::Gather(operand, indices));
EXPECT_THAT(root, op::DynamicSlice(gather, _, _, _, _));
}
TEST_P(SpmdPartitioningTest,
GatherPartitionedOnTrivialSliceDimsForceTrivialSlice) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[8,16] parameter(0), sharding={devices=[8,4]<=[4,8]T(1,0)}
%indices = s32[4,16,1] parameter(1), sharding={devices=[4,1,1,8]<=[32] last_tile_dim_replicate}
ROOT %gather = f32[4,16,16] gather(%input, %indices), offset_dims={2},
collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=2,
slice_sizes={1,16}, sharding={devices=[4,1,1,8]<=[32] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module, PartitionComputation(
hlo_string, 32, true, false, false,
false, -1, PartitioningMethod::kTrivialSlicedOperand));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::AllReduce(op::Select(_, _, op::Gather(_, _))));
EXPECT_THAT(root->operand(0)->operand(2)->operand(1),
op::Subtract(op::Clamp(_, op::Parameter(1), _), _));
auto clamp = FindInstruction(module.get(), HloOpcode::kClamp);
EXPECT_THAT(clamp->operand(1), op::Parameter(1));
auto dynamic_slice = FindInstruction(module.get(), HloOpcode::kDynamicSlice);
EXPECT_THAT(dynamic_slice->operand(1), op::PartitionId());
auto collective_permute =
FindInstruction(module.get(), HloOpcode::kCollectivePermute);
EXPECT_THAT(collective_permute, nullptr);
}
TEST_P(SpmdPartitioningTest,
GatherPartitionedOnTrivialSliceDimsForceIndexParallel) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[8,16] parameter(0), sharding={devices=[8,4]<=[4,8]T(1,0)}
%indices = s32[4,16,1] parameter(1), sharding={devices=[4,1,1,8]<=[32] last_tile_dim_replicate}
ROOT %gather = f32[4,16,16] gather(%input, %indices), offset_dims={2},
collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=2,
slice_sizes={1,16}, sharding={devices=[4,1,1,8]<=[32] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 32, true, false, false,
false, -1, PartitioningMethod::kIndexParallel));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
op::AllReduce(op::DynamicUpdateSlice(
_, op::AllReduce(op::Select(_, _, op::Gather(op::AllReduce(_), _))),
_, _, _)));
auto gather = FindInstruction(module.get(), HloOpcode::kGather);
EXPECT_THAT(gather->operand(1),
op::Subtract(op::Clamp(_, op::Parameter(1), _), _));
auto collective_permute =
FindInstruction(module.get(), HloOpcode::kCollectivePermute);
EXPECT_NE(collective_permute, nullptr);
auto all_reduce = FindInstruction(module.get(), HloOpcode::kAllReduce);
EXPECT_THAT(all_reduce->operand(0), op::DynamicUpdateSlice(_, _, _, _));
auto dynamic_slice = FindInstruction(module.get(), HloOpcode::kDynamicSlice);
EXPECT_THAT(dynamic_slice->operand(1), op::PartitionId());
}
TEST_P(SpmdPartitioningTest, ScatterParallelDimRedistributionOperand) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[4,2,1,1]<=[8]}
%constant = s32[4] constant({0, 1, 2, 3}), sharding={replicated}
%iota = s32[1,8,4]{2,1,0} broadcast(%constant), dimensions={2},
sharding={devices=[1,8,1]<=[8]}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,8,1]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,8,1]<=[8]}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1),
sharding={devices=[8,1,1,1]<=[8]}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19,
s32[8,4,2,2]{3,2,1,0} %parameter.1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={1,0},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[1,4,2,2]"), op::Reshape());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Subtract());
auto update = AllOf(op::Shape("s32[1,4,2,2]"), op::Parameter());
auto scatter =
AllOf(op::Shape("s32[1,4,2,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root,
op::AllReduce(op::DynamicUpdateSlice(_, scatter, _, _, _, _)));
}
TEST_P(SpmdPartitioningTest, ScatterParallelDimReplicatedIndices) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[8,1,1,1]<=[8]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={replicated}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={replicated}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={replicated}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1),
sharding={devices=[8,1,1,1]<=[8]}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19,
s32[8,4,2,2]{3,2,1,0} %parameter.1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, PartitionComputation(hlo_string,
8));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[1,4,2,2]"), op::Parameter());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Subtract());
auto update = AllOf(op::Shape("s32[1,4,2,2]"), op::Parameter());
auto scatter =
AllOf(op::Shape("s32[1,4,2,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root,
op::AllReduce(op::DynamicUpdateSlice(_, scatter, _, _, _, _)));
}
TEST_P(SpmdPartitioningTest, ScatterParallelDimReplicatedOperand) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0), sharding={replicated}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,8,1]<=[8]}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,8,1]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,8,1]<=[8]}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1),
sharding={devices=[8,1,1,1]<=[8]}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19,
s32[8,4,2,2]{3,2,1,0} %parameter.1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, PartitionComputation(hlo_string,
8));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[1,4,2,2]"), op::DynamicSlice());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Subtract());
auto update = AllOf(op::Shape("s32[1,4,2,2]"), op::Parameter());
auto scatter =
AllOf(op::Shape("s32[1,4,2,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root,
op::AllReduce(op::DynamicUpdateSlice(_, scatter, _, _, _, _)));
}
TEST_P(SpmdPartitioningTest, ScatterParallelDimReplicatedUpdate) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[8,1,1,1]<=[8]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,8,1]<=[8]}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,8,1]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,8,1]<=[8]}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1), sharding={replicated}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19,
s32[8,4,2,2]{3,2,1,0} %parameter.1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, PartitionComputation(hlo_string,
8));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[1,4,2,2]"), op::Parameter());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Subtract());
auto update = AllOf(op::Shape("s32[1,4,2,2]"), op::DynamicSlice());
auto scatter =
AllOf(op::Shape("s32[1,4,2,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root,
op::AllReduce(op::DynamicUpdateSlice(_, scatter, _, _, _, _)));
}
TEST_P(SpmdPartitioningTest, ScatterParallelDimPartialReplicatedIndices) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[8,1,1,1]<=[8]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,2,1,4]<=[8] last_tile_dim_replicate}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,2,1,4]<=[8] last_tile_dim_replicate}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,2,1,4]<=[8] last_tile_dim_replicate}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1),
sharding={devices=[8,1,1,1]<=[8]}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19,
s32[8,4,2,2]{3,2,1,0} %parameter.1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, PartitionComputation(hlo_string,
8));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[1,4,2,2]"), op::Parameter());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Subtract());
auto update = AllOf(op::Shape("s32[1,4,2,2]"), op::Parameter());
auto scatter =
AllOf(op::Shape("s32[1,4,2,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root,
op::AllReduce(op::DynamicUpdateSlice(_, scatter, _, _, _, _)));
}
TEST_P(SpmdPartitioningTest, ScatterParallelDimPartialReplicatedOperand) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0), sharding={
devices=[2,1,1,1,4]<=[8] last_tile_dim_replicate}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,8,1]<=[8]}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,8,1]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,8,1]<=[8]}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1),
sharding={devices=[8,1,1,1]<=[8]}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19,
s32[8,4,2,2]{3,2,1,0} %parameter.1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, PartitionComputation(hlo_string,
8));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[1,4,2,2]"), op::DynamicSlice());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Subtract());
auto update = AllOf(op::Shape("s32[1,4,2,2]"), op::Parameter());
auto scatter =
AllOf(op::Shape("s32[1,4,2,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root,
op::AllReduce(op::DynamicUpdateSlice(_, scatter, _, _, _, _)));
}
TEST_P(SpmdPartitioningTest, ScatterParallelDimPartialReplicatedUpdate) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[8,1,1,1]<=[8]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,8,1]<=[8]}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,8,1]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,8,1]<=[8]}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1), sharding={
devices=[2,1,1,1,4]<=[8] last_tile_dim_replicate}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19,
s32[8,4,2,2]{3,2,1,0} %parameter.1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, PartitionComputation(hlo_string,
8));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[1,4,2,2]"), op::Parameter());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Subtract());
auto update = AllOf(op::Shape("s32[1,4,2,2]"), op::DynamicSlice());
auto scatter =
AllOf(op::Shape("s32[1,4,2,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root,
op::AllReduce(op::DynamicUpdateSlice(_, scatter, _, _, _, _)));
}
TEST_P(SpmdPartitioningTest, ScatterParallelDimSwappedDimensions) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0), sharding={
devices=[4,2,1,1]<=[8]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,2,4]<=[8]}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,2,4]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,2,4]<=[8]}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1), sharding={
devices=[4,2,1,1]<=[8]}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19,
s32[8,4,2,2]{3,2,1,0} %parameter.1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, PartitionComputation(hlo_string,
8));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[4,1,2,2]"), op::CollectivePermute());
auto indices = AllOf(op::Shape("s32[2,4,1]"), op::Subtract());
auto update = AllOf(op::Shape("s32[4,1,2,2]"), op::CollectivePermute());
auto scatter =
AllOf(op::Shape("s32[4,1,2,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root, op::AllReduce(op::AllReduce(
op::DynamicUpdateSlice(_, scatter, _, _, _, _))));
}
TEST_P(SpmdPartitioningTest, ScatterParallelDimFromOutsideWhilePositive) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
cond {
%parameters = (s32[8,4,2,2], s32[1,8,4], s32[8,4,2,2], s32[]) parameter(0),
sharding={{replicated}, {devices=[1,8,1]<=[8]}, {replicated}, {replicated}}
%counter = s32[] get-tuple-element(parameters), index=3, sharding={replicated}
%constant = s32[] constant(3), sharding={replicated}
ROOT %lt = pred[] compare(counter, constant), direction=LT,
sharding={replicated}
}
body {
%parameters = (s32[8,4,2,2], s32[1,8,4], s32[8,4,2,2], s32[]) parameter(0),
sharding={{replicated}, {devices=[1,8,1]<=[8]}, {replicated}, {replicated}}
%parameter.0 = s32[8,4,2,2]{3,2,1,0} get-tuple-element(parameters), index=0,
sharding={replicated}
%iota = s32[1,8,4]{2,1,0} get-tuple-element(parameters), index=1,
sharding={devices=[1,8,1]<=[8]}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,8,1]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}, sharding={devices=[1,8,1]<=[8]}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} get-tuple-element(parameters), index=2,
sharding={replicated}
%counter = s32[] get-tuple-element(parameters), index=3, sharding={replicated}
%constant = s32[] constant(1), sharding={replicated}
%updated_counter = s32[] add(counter, constant), sharding={replicated}
%scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19,
s32[8,4,2,2]{3,2,1,0} %parameter.1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0, sharding={replicated}
ROOT %tuple = (s32[8,4,2,2], s32[1,8,4], s32[8,4,2,2], s32[])
tuple(scatter.20, iota, parameter.1, updated_counter),
sharding={{replicated}, {devices=[1,8,1]<=[8]}, {replicated}, {replicated}}
}
ENTRY entry {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0), sharding={replicated}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,8,1]<=[8]}
%counter = s32[] constant(0), sharding={replicated}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1), sharding={replicated}
%tuple = (s32[8,4,2,2], s32[1,8,4], s32[8,4,2,2], s32[])
tuple(parameter.0, iota, parameter.1, counter),
sharding={{replicated}, {devices=[1,8,1]<=[8]}, {replicated}, {replicated}}
ROOT while = (s32[8,4,2,2], s32[1,8,4], s32[8,4,2,2], s32[]) while(tuple), body=body,
condition=cond,
sharding={{replicated}, {devices=[1,8,1]<=[8]}, {replicated}, {replicated}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const auto root = module->entry_computation()
->root_instruction()
->while_body()
->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[1,4,2,2]"), op::DynamicSlice());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Subtract());
auto update = AllOf(op::Shape("s32[1,4,2,2]"), op::DynamicSlice());
auto scatter =
AllOf(op::Shape("s32[1,4,2,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(
root,
op::Tuple(op::AllReduce(op::DynamicUpdateSlice(_, scatter, _, _, _, _)),
_, _, _));
}
TEST_P(SpmdPartitioningTest, ScatterParallelDimAndNonParallelDimPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[1,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),
sharding={devices=[2,2,1,1]<=[4]}
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2),
sharding={devices=[2,2,1,1]<=[4]}
%indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1),
sharding={devices=[1,2,2]<=[4]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,2,2]<=[4]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %indices), dimensions={0},
sharding={devices=[1,2,2]<=[4]}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %concatenate.19,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[4,4,2,2]"));
auto indices = AllOf(op::Shape("s32[2,4,2]"));
auto update = AllOf(op::Shape("s32[4,2,2,2]"));
auto scatter =
AllOf(op::Shape("s32[4,4,2,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root, op::AllReduce(op::AllReduce(op::DynamicUpdateSlice(
_, op::DynamicSlice(op::AllReduce(scatter), _, _, _, _),
_, _, _, _))));
}
TEST_P(SpmdPartitioningTest, b_356877097) {
absl::string_view hlo_string = R"(
HloModule jit__init
region_0.16 {
Arg_0.17 = f32[] parameter(0)
ROOT Arg_1.18 = f32[] parameter(1)
}
ENTRY main.22 {
constant.5 = f32[] constant(0), sharding={replicated}
broadcast.3 = f32[16,16]{1,0} broadcast(constant.5), dimensions={}, sharding={devices=[1,8]<=[8]}
constant.3 = s32[8,1]{1,0} constant({ {0}, {2}, {5}, {7}, {8}, {10}, {13}, {15} }), sharding={devices=[8,1]<=[8]}
iota = s32[8,1]{1,0} iota(), iota_dimension=0, sharding={devices=[8,1]<=[8]}
concatenate.15 = s32[8,2]{1,0} concatenate(constant.3, iota), dimensions={1}, sharding={devices=[8,1]<=[8]}
constant.2 = f32[] constant(1), sharding={replicated}
broadcast.1 = f32[8]{0} broadcast(constant.2), dimensions={}, sharding={devices=[8]<=[8]}
ROOT scatter.19 = f32[16,16]{1,0} scatter(broadcast.3, concatenate.15, broadcast.1),
update_window_dims={}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={0,1},
index_vector_dim=1, to_apply=region_0.16, sharding={devices=[1,8]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("f32[16,2]"), op::Broadcast());
auto indices = AllOf(op::Shape("s32[8,2]"), op::Subtract());
auto update = AllOf(op::Shape("f32[8]"), op::AllReduce());
EXPECT_THAT(root, AllOf(op::Shape("f32[16,2]"),
op::Scatter(operand, indices, update)));
}
TEST_P(SpmdPartitioningTest, ScatterMergedIndexParallelAndOperandPassthrough) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[2,2,2,1]<=[8]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1),
sharding={replicated}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19,
s32[8,4,2,2]{3,2,1,0} %parameter.1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={1,0},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[2,4,1,2]"), op::Reshape());
auto indices = AllOf(op::Shape("s32[2,2,4]"), op::Subtract());
auto update = AllOf(op::Shape("s32[2,4,1,2]"), op::DynamicSlice());
auto scatter =
AllOf(op::Shape("s32[2,4,1,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root, op::AllReduce(op::AllReduce(
op::DynamicUpdateSlice(_, scatter, _, _, _, _))));
}
TEST_P(SpmdPartitioningTest,
ScatterMergedIndexParallelAndTrivialSlicedOperand) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[4,2,1,1]<=[8]}
%parameter.1 = s32[1,8,4]{2,1,0} parameter(1),
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %parameter.1, s32[1,8,4]{2,1,0} %iota), dimensions={0},
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2),
sharding={replicated}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19,
s32[8,4,2,2]{3,2,1,0} %parameter.2),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={1,0},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[2,2,2,2]"), op::Parameter());
auto indices = AllOf(op::Shape("s32[2,2,4]"), op::Subtract());
auto update = AllOf(op::Shape("s32[2,4,2,2]"), op::DynamicSlice());
auto scatter =
AllOf(op::Shape("s32[2,2,2,2]"), op::Scatter(operand, indices, update));
VLOG(1) << module->ToString();
EXPECT_THAT(root, op::AllReduce(op::AllReduce(
op::DynamicUpdateSlice(_, scatter, _, _, _, _))));
}
TEST_P(SpmdPartitioningTest, ScatterMergedIndexParallelAndIndexPassthrough) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[4,1,1,1,2]<=[8] last_tile_dim_replicate}
%parameter.1 = s32[1,8,4]{2,1,0} parameter(1),
sharding={devices=[1,4,2]<=[8]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,4,2]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %parameter.1, s32[1,8,4]{2,1,0} %iota), dimensions={0},
sharding={devices=[1,4,2]<=[8]}
%parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2),
sharding={replicated}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19,
s32[8,4,2,2]{3,2,1,0} %parameter.2),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={1,0},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[2,4,2,2]"), op::Select());
auto indices = AllOf(op::Shape("s32[2,2,2]"), op::Subtract());
auto update = AllOf(op::Shape("s32[2,2,2,2]"), op::DynamicSlice());
auto scatter =
AllOf(op::Shape("s32[2,4,2,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root, op::AllReduce(op::DynamicUpdateSlice(
_, op::AllReduce(scatter), _, _, _, _)));
}
TEST_P(SpmdPartitioningTest,
ScatterMergedOperandPassthroughAndTrivialSlicedOperand) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[2,2,2,1]<=[8]}
%parameter.1 = s32[2,8,4]{2,1,0} parameter(1),
sharding={replicated}
%parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2),
sharding={replicated}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %parameter.1,
s32[8,4,2,2]{3,2,1,0} %parameter.2),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[4,2,1,2]"), op::Parameter());
auto indices = AllOf(op::Shape("s32[2,8,4]"), op::Subtract());
auto update = AllOf(op::Shape("s32[8,4,1,2]"), op::DynamicSlice());
auto scatter =
AllOf(op::Shape("s32[4,2,1,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root, op::AllReduce(op::AllReduce(op::AllReduce(
op::DynamicUpdateSlice(_, scatter, _, _, _, _)))));
}
TEST_P(SpmdPartitioningTest,
ScatterMergedOperandPassthroughAndIndexPassthrough) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[1,1,2,1,4]<=[8] last_tile_dim_replicate}
%parameter.1 = s32[2,8,4]{2,1,0} parameter(1),
sharding={devices=[1,2,1,4]<=[8] last_tile_dim_replicate}
%parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2),
sharding={replicated}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %parameter.1,
s32[8,4,2,2]{3,2,1,0} %parameter.2),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[8,4,1,2]"), op::Select());
auto indices = AllOf(op::Shape("s32[2,4,4]"), op::CollectivePermute());
auto update = AllOf(op::Shape("s32[4,4,1,2]"), op::DynamicSlice());
auto scatter =
AllOf(op::Shape("s32[8,4,1,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root, op::AllReduce(op::DynamicUpdateSlice(
_, op::AllReduce(scatter), _, _, _, _)));
}
TEST_P(SpmdPartitioningTest,
ScatterMergedOperandPassthroughAndIndexPassthrough_PartialGrouping) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[2,2,2,1]<=[8]}
%parameter.1 = s32[2,8,4]{2,1,0} parameter(1),
sharding={devices=[1,2,2,2]<=[8] last_tile_dim_replicate}
%parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2),
sharding={replicated}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %parameter.1,
s32[8,4,2,2]{3,2,1,0} %parameter.2),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[8,4,1,2]"), op::Select());
auto indices = AllOf(op::Shape("s32[2,4,2]"), op::Parameter());
auto update = AllOf(op::Shape("s32[4,2,1,2]"), op::DynamicSlice());
auto scatter =
AllOf(op::Shape("s32[8,4,1,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root, op::AllReduce(op::DynamicUpdateSlice(
_, op::AllReduce(op::AllReduce(scatter)), _, _, _, _)));
}
TEST_P(SpmdPartitioningTest,
ScatterMergedTrivialSlicedOperandAndIndexPassthrough) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[2,2,1,1,2]<=[8] last_tile_dim_replicate}
%parameter.1 = s32[2,8,4]{2,1,0} parameter(1),
sharding={devices=[1,2,1,4]<=[8] last_tile_dim_replicate}
%parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2),
sharding={replicated}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %parameter.1,
s32[8,4,2,2]{3,2,1,0} %parameter.2),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[4,2,2,2]"), op::Select());
auto indices = AllOf(op::Shape("s32[2,4,4]"), op::Subtract());
auto update = AllOf(op::Shape("s32[4,4,2,2]"), op::DynamicSlice());
auto scatter =
AllOf(op::Shape("s32[4,2,2,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root, op::AllReduce(op::AllReduce(op::DynamicUpdateSlice(
_, op::AllReduce(scatter), _, _, _, _))));
}
TEST_P(SpmdPartitioningTest,
ScatterMergedTrivialSlicedOperandAndIndexPassthrough_PartialGrouping) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[2,2,1,1,2]<=[8] last_tile_dim_replicate}
%parameter.1 = s32[2,8,4]{2,1,0} parameter(1),
sharding={devices=[1,2,2,2]<=[8] last_tile_dim_replicate}
%parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2),
sharding={replicated}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %parameter.1,
s32[8,4,2,2]{3,2,1,0} %parameter.2),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[8,2,2,2]"), op::Select());
auto indices = AllOf(op::Shape("s32[2,4,2]"), op::Subtract());
auto update = AllOf(op::Shape("s32[4,2,2,2]"), op::DynamicSlice());
auto scatter =
AllOf(op::Shape("s32[8,2,2,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root, op::AllReduce(op::DynamicUpdateSlice(
_, op::AllReduce(op::AllReduce(scatter)), _, _, _, _)));
}
TEST_P(SpmdPartitioningTest, ScatterTrivialSlicedOperandPartial) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s64[], rhs: s64[]) -> s64[] {
lhs = s64[] parameter(0)
rhs = s64[] parameter(1)
ROOT sum = s64[] add(lhs, rhs)
}
ENTRY main.4 {
%arg.0 = s64[8,2]{1,0} parameter(0), sharding={devices=[4,2]<=[8]}
%arg.1 = s32[2]{0} parameter(1), sharding={replicated}
%arg.2 = s64[2,1]{1,0} parameter(2), sharding={replicated}
ROOT scatter = s64[8,2]{1,0} scatter(arg.0, arg.1, arg.2),
to_apply=add,
update_window_dims={0,1},
inserted_window_dims={},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0, indices_are_sorted=true, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s64[8,1]"), op::AllReduce());
auto indices = AllOf(op::Shape("s32[2]"), op::Subtract());
auto update = AllOf(op::Shape("s64[2,1]"), op::Parameter());
auto scatter =
AllOf(op::Shape("s64[8,1]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root, op::AllReduce(op::AllReduce(op::DynamicUpdateSlice(
_, op::DynamicSlice(scatter, _, _), _, _))));
}
TEST_P(SpmdPartitioningTest,
ScatterPartitionedOnTrivialSliceDimsForceTrivialSlice) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[8,16] parameter(0), sharding={devices=[8,1,4]<=[4,8]T(1,0) last_tile_dim_replicate}
%indices = s32[4,16,1] parameter(1), sharding={devices=[4,1,1,8]<=[32] last_tile_dim_replicate}
%updates = f32[4,16,16] parameter(2), sharding={devices=[4,1,1,8]<=[32] last_tile_dim_replicate}
ROOT %scatter = f32[8,16] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={2},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=2, sharding={devices=[8,1,4]<=[4,8]T(1,0) last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module, PartitionComputation(
hlo_string, 32, true, false, false,
false, -1, PartitioningMethod::kTrivialSlicedOperand));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::AllReduce(op::Scatter(op::Select(_, _, _),
op::Subtract(_, _), _)));
auto dynamic_slice = FindInstruction(module.get(), HloOpcode::kDynamicSlice);
EXPECT_THAT(dynamic_slice->operand(1), op::PartitionId());
auto collective_permute =
FindInstruction(module.get(), HloOpcode::kCollectivePermute);
EXPECT_THAT(collective_permute, nullptr);
}
TEST_P(SpmdPartitioningTest,
ScatterPartitionedOnTrivialSliceDimsForceIndexParallel) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[8,16] parameter(0), sharding={devices=[8,4]<=[4,8]T(1,0)}
%indices = s32[4,16,1] parameter(1), sharding={devices=[4,1,1,8]<=[32] last_tile_dim_replicate}
%updates = f32[4,16,16] parameter(2), sharding={devices=[4,1,1,8]<=[32] last_tile_dim_replicate}
ROOT %scatter = f32[8,16] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={2},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=2, sharding={devices=[8,1,4]<=[4,8]T(1,0) last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 32, true, false, false,
false, -1, PartitioningMethod::kIndexParallel));
VLOG(1) << module->ToString();
auto all_to_all = FindInstruction(module.get(), HloOpcode::kAllToAll);
EXPECT_NE(all_to_all, nullptr);
auto scatter = FindInstruction(module.get(), HloOpcode::kScatter);
EXPECT_THAT(scatter->operand(1), op::Subtract(op::Parameter(1), _));
auto collective_permute =
FindInstruction(module.get(), HloOpcode::kCollectivePermute);
EXPECT_NE(collective_permute, nullptr);
auto all_reduce = FindInstruction(module.get(), HloOpcode::kAllReduce);
EXPECT_NE(all_reduce, nullptr);
auto dynamic_slice = FindInstruction(module.get(), HloOpcode::kDynamicSlice);
EXPECT_THAT(dynamic_slice->operand(1), op::PartitionId());
}
TEST_P(SpmdPartitioningTest, SortTopKNonSortDimension) {
absl::string_view hlo_string = R"(
HloModule module
%compare-greater-than.42077 (p.0.lhs.42078: f32[],
p.0.rhs.42079: f32[], p.1.lhs.42080: s32[], p.1.rhs.42081: s32[]) -> pred[] {
%p.0.lhs.42078 = f32[] parameter(0)
%bitcast-convert.135 = s32[] bitcast-convert(f32[] %p.0.lhs.42078)
%constant.45054 = s32[] constant(0)
%compare.133 = pred[] compare(s32[] %bitcast-convert.135,
s32[] %constant.45054), direction=LT
%constant.45278 = u32[] constant(2147483647)
%bitcast-convert.136 = u32[] bitcast-convert(f32[] %p.0.lhs.42078)
%subtract.337 = u32[] subtract(u32[] %constant.45278,
u32[] %bitcast-convert.136)
%bitcast-convert.137 = s32[] bitcast-convert(u32[] %subtract.337)
%select.282 = s32[] select(pred[] %compare.133, s32[] %bitcast-convert.137,
s32[] %bitcast-convert.135)
%p.0.rhs.42079 = f32[] parameter(1)
%bitcast-convert.138 = s32[] bitcast-convert(f32[] %p.0.rhs.42079)
%compare.134 = pred[] compare(s32[] %bitcast-convert.138,
s32[] %constant.45054), direction=LT
%bitcast-convert.139 = u32[] bitcast-convert(f32[] %p.0.rhs.42079)
%subtract.338 = u32[] subtract(u32[] %constant.45278,
u32[] %bitcast-convert.139)
%bitcast-convert.140 = s32[] bitcast-convert(u32[] %subtract.338)
%select.283 = s32[] select(pred[] %compare.134, s32[] %bitcast-convert.140,
s32[] %bitcast-convert.138)
%compare.135 = pred[] compare(s32[] %select.282,
s32[] %select.283), direction=GT
%compare.428 = pred[] compare(s32[] %select.283,
s32[] %select.282), direction=GT
%compare.429 = pred[] compare(pred[] %compare.135,
pred[] %compare.428), direction=EQ
%p.1.lhs.42080 = s32[] parameter(2)
%p.1.rhs.42081 = s32[] parameter(3)
%compare.430 = pred[] compare(s32[] %p.1.lhs.42080,
s32[] %p.1.rhs.42081), direction=LT
ROOT %select.579 = pred[] select(pred[] %compare.429,
pred[] %compare.430, pred[] %compare.135)
}
ENTRY %module {
%parameter.0 = f32[2,64,32128]{2,1,0} parameter(0),
sharding={devices=[2,1,4]<=[8]}
%iota = s32[2,64,32128]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[2,1,4]<=[8]}
%sort.18 = (f32[2,64,32128]{2,1,0}, s32[2,64,32128]{2,1,0}) sort(
f32[2,64,32128]{2,1,0} %parameter.0, s32[2,64,32128]{2,1,0} %iota),
dimensions={2}, is_stable=true, to_apply=%compare-greater-than.42077,
sharding={{devices=[2,1,4]<=[8]}, {devices=[2,1,4]<=[8]}}
output = f32[2,64,32128]{2,1,0} get-tuple-element(%sort.18), index=0,
sharding={devices=[2,1,4]<=[8]}
%slice.0 = f32[2,64,2]{2,1,0} slice(f32[2,64,32128]{2,1,0} output),
slice={[0:2], [0:64], [0:2]}, sharding={devices=[2,1,4]<=[8]}
output2 = s32[2,64,32128]{2,1,0} get-tuple-element(%sort.18), index=1,
sharding={replicated}
%slice.1 = s32[2,64,2]{2,1,0} slice(s32[2,64,32128]{2,1,0} output2),
slice={[0:2], [0:64], [0:2]}, sharding={devices=[2,1,4]<=[8]}
ROOT output.t = (f32[2,64,2]{2,1,0},
s32[2,64,2]{2,1,0}) tuple(slice.0, slice.1),
sharding={{replicated}, {replicated}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const HloInstruction* sort = FindInstruction(module.get(), "sort.0");
EXPECT_NE(sort, nullptr);
auto sort_match =
AllOf(op::Shape("(f32[1,16,32128], s32[1,16,32128])"), op::Sort(_, _));
EXPECT_THAT(sort, sort_match);
}
TEST_P(SpmdPartitioningTest, SortTopKPropagateBaseShape) {
absl::string_view hlo_string = R"(
HloModule module
%compare-greater-than.42077 (p.0.lhs.42078: f32[],
p.0.rhs.42079: f32[], p.1.lhs.42080: s32[], p.1.rhs.42081: s32[]) -> pred[] {
%p.0.lhs.42078 = f32[] parameter(0)
%bitcast-convert.135 = s32[] bitcast-convert(f32[] %p.0.lhs.42078)
%constant.45054 = s32[] constant(0)
%compare.133 = pred[] compare(s32[] %bitcast-convert.135,
s32[] %constant.45054), direction=LT
%constant.45278 = u32[] constant(2147483647)
%bitcast-convert.136 = u32[] bitcast-convert(f32[] %p.0.lhs.42078)
%subtract.337 = u32[] subtract(u32[] %constant.45278,
u32[] %bitcast-convert.136)
%bitcast-convert.137 = s32[] bitcast-convert(u32[] %subtract.337)
%select.282 = s32[] select(pred[] %compare.133, s32[] %bitcast-convert.137,
s32[] %bitcast-convert.135)
%p.0.rhs.42079 = f32[] parameter(1)
%bitcast-convert.138 = s32[] bitcast-convert(f32[] %p.0.rhs.42079)
%compare.134 = pred[] compare(s32[] %bitcast-convert.138,
s32[] %constant.45054), direction=LT
%bitcast-convert.139 = u32[] bitcast-convert(f32[] %p.0.rhs.42079)
%subtract.338 = u32[] subtract(u32[] %constant.45278,
u32[] %bitcast-convert.139)
%bitcast-convert.140 = s32[] bitcast-convert(u32[] %subtract.338)
%select.283 = s32[] select(pred[] %compare.134, s32[] %bitcast-convert.140,
s32[] %bitcast-convert.138)
%compare.135 = pred[] compare(s32[] %select.282,
s32[] %select.283), direction=GT
%compare.428 = pred[] compare(s32[] %select.283,
s32[] %select.282), direction=GT
%compare.429 = pred[] compare(pred[] %compare.135,
pred[] %compare.428), direction=EQ
%p.1.lhs.42080 = s32[] parameter(2)
%p.1.rhs.42081 = s32[] parameter(3)
%compare.430 = pred[] compare(s32[] %p.1.lhs.42080,
s32[] %p.1.rhs.42081), direction=LT
ROOT %select.579 = pred[] select(pred[] %compare.429,
pred[] %compare.430, pred[] %compare.135)
}
ENTRY %module {
%parameter.0 = f32[2,64,32128]{2,1,0} parameter(0),
sharding={devices=[1,1,8]<=[8]}
%iota = s32[2,64,32128]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,1,8]<=[8]}
%sort.18 = (f32[2,64,32128]{2,1,0}, s32[2,64,32128]{2,1,0}) sort(
f32[2,64,32128]{2,1,0} %parameter.0, s32[2,64,32128]{2,1,0} %iota),
dimensions={2}, is_stable=true, to_apply=%compare-greater-than.42077,
sharding={{devices=[1,1,8]<=[8]}, {devices=[1,1,8]<=[8]}}
output = f32[2,64,32128]{2,1,0} get-tuple-element(%sort.18), index=0,
sharding={devices=[1,1,8]<=[8]}
%slice.0 = f32[2,64,2]{2,1,0} slice(f32[2,64,32128]{2,1,0} output),
slice={[0:2], [0:64], [0:2]}, sharding={devices=[1,1,8]<=[8]}
output2 = s32[2,64,32128]{2,1,0} get-tuple-element(%sort.18), index=1,
sharding={replicated}
%slice.1 = s32[2,64,2]{2,1,0} slice(s32[2,64,32128]{2,1,0} output2),
slice={[0:2], [0:64], [0:2]}, sharding={devices=[1,1,8]<=[8]}
ROOT output.t = (f32[2,64,2]{2,1,0},
s32[2,64,2]{2,1,0}) tuple(slice.0, slice.1),
sharding={{replicated}, {replicated}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const HloInstruction* root = module->entry_computation()->root_instruction();
auto all_reduce_val =
AllOf(op::Shape("f32[2,64,2]"),
op::AllReduce(op::DynamicUpdateSlice(_, _, _, _, _)));
auto all_reduce_idx =
AllOf(op::Shape("s32[2,64,2]"),
op::AllReduce(op::DynamicUpdateSlice(_, _, _, _, _)));
auto tuple = AllOf(op::Shape("(f32[2,64,2], s32[2,64,2])"),
op::Tuple(all_reduce_val, all_reduce_idx));
EXPECT_THAT(root, tuple);
}
TEST_P(SpmdPartitioningTest, GatherIndexOnlyCorrectReplacement) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = bf16[1,8,6,6]{3,2,1,0} parameter(0),
sharding={replicated}
%parameter.1 = s32[2,4]{1,0} parameter(1),
sharding={devices=[2,1,4]<=[8] last_tile_dim_replicate}
%gather.100 = bf16[2,1,8,1,6]{4,3,2,1,0} gather(
bf16[1,8,6,6]{3,2,1,0} %parameter.0, s32[2,4]{1,0} %parameter.1),
offset_dims={1,2,3,4}, collapsed_slice_dims={}, start_index_map={0,1,2,3},
index_vector_dim=1, slice_sizes={1,8,1,6},
sharding={devices=[2,1,4,1,1]<=[8]}
%constant.45590 = s32[] constant(0), sharding={replicated}
%broadcast.54515 = s32[2,64,1,1]{3,2,1,0} broadcast(s32[] %constant.45590),
dimensions={},
sharding={devices=[2,1,1,1,4]<=[8] last_tile_dim_replicate}
ROOT %reshape.4243 = bf16[2,8,6]{2,1,0} reshape(
bf16[2,1,8,1,6]{4,3,2,1,0} %gather.100),
sharding={devices=[2,4,1]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const HloInstruction* root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Shape("bf16[1,8,6,6]"), op::Parameter());
auto param1 = AllOf(op::Shape("s32[1,4]"), op::Parameter());
auto reshape = AllOf(
op::Shape("bf16[1,2,6]"),
op::Reshape(op::DynamicSlice(op::Gather(param0, param1), _, _, _, _, _)));
EXPECT_THAT(root, reshape);
}
TEST_P(SpmdPartitioningTest, GatherRegressionTest1) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[1,4] parameter(0), sharding={devices=[1,8]<=[8]}
%iota.10 = s32[4]{0} iota(), iota_dimension=0, sharding={devices=[8]<=[8]}
ROOT %gather.44 = s32[1,4]{1,0} gather(%parameter.0, %iota.10),
offset_dims={0}, collapsed_slice_dims={1}, start_index_map={1}, index_vector_dim=1,
slice_sizes={1,1}, sharding={devices=[1,8]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const HloInstruction* root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Shape("s32[1,1]"), op::Parameter());
EXPECT_THAT(root, op::Gather(param0, _));
}
TEST_P(SpmdPartitioningTest, WindowedEinsumPreferMemoryFootprint) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = bf16[128,1024,4,4,1152,1,1]{6,5,4,3,2,1,0} parameter(0),
sharding={devices=[4,1,2,1,1,1,1]<=[8]}
%parameter.1 = bf16[4,4,1152,4,176,256,1]{6,5,4,3,2,1,0} parameter(1),
sharding={devices=[2,2,1,2,1,1,1]<=[8]}
%convolution.3 = bf16[128,1024,4,176,256,1,1]{6,5,4,3,2,1,0}
convolution(bf16[128,1024,4,4,1152,1,1]{6,5,4,3,2,1,0} %parameter.0,
bf16[4,4,1152,4,176,256,1]{6,5,4,3,2,1,0} %parameter.1),
window={size=1x4x176x4x4 pad=0_0x3_3x175_175x0_0x0_0
rhs_reversal=0x1x1x0x0}, dim_labels=0b34f12_34i12o0->0b12f34,
sharding={devices=[4,1,2,1,1,1,1]<=[8]}
ROOT %reshape.3973 = bf16[128,1024,4,176,256]{4,3,2,1,0}
reshape(bf16[128,1024,4,176,256,1,1]{6,5,4,3,2,1,0} %convolution.3),
sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 8,
true,
false));
const HloInstruction* while_inst = FindInstruction(module.get(), "while");
EXPECT_NE(while_inst, nullptr);
const HloComputation* cond_comp = while_inst->while_condition();
const HloInstruction* root = cond_comp->root_instruction();
EXPECT_THAT(root, op::Compare(_, op::Constant()));
const HloConstantInstruction* iterations =
Cast<HloConstantInstruction>(root->operand(1));
EXPECT_TRUE(iterations->literal().GetFirstInteger());
EXPECT_EQ(*iterations->literal().GetFirstInteger(), 4);
}
TEST_P(SpmdPartitioningTest, WindowedEinsumPreferNumberIterations) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = bf16[128,1024,4,4,1152,1,1]{6,5,4,3,2,1,0} parameter(0),
sharding={devices=[4,1,2,1,1,1,1]<=[8]}
%parameter.1 = bf16[4,4,1152,4,176,256,1]{6,5,4,3,2,1,0} parameter(1),
sharding={devices=[2,2,1,2,1,1,1]<=[8]}
%convolution.3 = bf16[128,1024,4,176,256,1,1]{6,5,4,3,2,1,0}
convolution(bf16[128,1024,4,4,1152,1,1]{6,5,4,3,2,1,0} %parameter.0,
bf16[4,4,1152,4,176,256,1]{6,5,4,3,2,1,0} %parameter.1),
window={size=1x4x176x4x4 pad=0_0x3_3x175_175x0_0x0_0
rhs_reversal=0x1x1x0x0}, dim_labels=0b34f12_34i12o0->0b12f34,
sharding={devices=[4,1,2,1,1,1,1]<=[8]}
ROOT %reshape.3973 = bf16[128,1024,4,176,256]{4,3,2,1,0}
reshape(bf16[128,1024,4,176,256,1,1]{6,5,4,3,2,1,0} %convolution.3),
sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 8,
true,
true));
const HloInstruction* while_inst = FindInstruction(module.get(), "while");
EXPECT_NE(while_inst, nullptr);
const HloComputation* cond_comp = while_inst->while_condition();
const HloInstruction* root = cond_comp->root_instruction();
EXPECT_THAT(root, op::Compare(_, op::Constant()));
const HloConstantInstruction* iterations =
Cast<HloConstantInstruction>(root->operand(1));
EXPECT_TRUE(iterations->literal().GetFirstInteger());
EXPECT_EQ(*iterations->literal().GetFirstInteger(), 2);
}
TEST_P(SpmdPartitioningTest, WindowedEinsumPreferNumberIterations2) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = bf16[512,1024,16,36,256]{4,3,2,1,0} parameter(0)
%lhs.copy = bf16[512,1024,16,36,256]{4,3,2,1,0} copy(%lhs),
sharding={devices=[8,1,4,1,1]<=[32]}
%rhs = bf16[512,1024,16,4,288]{4,3,2,1,0} parameter(1)
%rhs.copy = bf16[512,1024,16,4,288]{4,3,2,1,0} copy(%rhs),
sharding={devices=[8,1,4,1,1]<=[32]}
%reshape.2556 = bf16[512,1024,16,4,288,1,1]{6,5,4,3,2,1,0} reshape(
bf16[512,1024,16,4,288]{4,3,2,1,0} %rhs.copy), sharding={
devices=[8,1,4,1,1,1,1]<=[32]}
%reshape.2570 = bf16[512,1024,16,36,256,1,1]{6,5,4,3,2,1,0}
reshape(bf16[512,1024,16,36,256]{4,3,2,1,0} %lhs.copy), sharding={
devices=[8,1,4,1,1,1,1]<=[32]}
%convolution.10 = bf16[16,36,256,16,4,288,1]{6,5,4,3,2,1,0}
convolution(bf16[512,1024,16,36,256,1,1]{6,5,4,3,2,1,0} %reshape.2570,
bf16[512,1024,16,4,288,1,1]{6,5,4,3,2,1,0} %reshape.2556),
window={size=1x1x16x4x512 pad=0_0x0_0x15_15x3_3x0_0 rhs_reversal=0x0x1x1x0},
dim_labels=4f01b23_4i23o01->01b23f4,
sharding={devices=[4,1,1,4,2,1,1]<=[8,2,2]T(1,2,0)}
ROOT %output = bf16[16,36,256,16,4,288,1]{6,5,4,3,2,1,0}
copy(%convolution.10), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 32,
true,
true));
const HloInstruction* while_inst = FindInstruction(module.get(), "while");
EXPECT_NE(while_inst, nullptr);
const HloComputation* cond_comp = while_inst->while_condition();
const HloInstruction* root = cond_comp->root_instruction();
EXPECT_THAT(root, op::Compare(_, op::Constant()));
const HloConstantInstruction* iterations =
Cast<HloConstantInstruction>(root->operand(1));
EXPECT_TRUE(iterations->literal().GetFirstInteger());
EXPECT_EQ(*iterations->literal().GetFirstInteger(), 4);
}
TEST_P(SpmdPartitioningTest, WindowedEinsumPreferMemoryFootprint2) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = bf16[512,1024,16,36,256]{4,3,2,1,0} parameter(0)
%lhs.copy = bf16[512,1024,16,36,256]{4,3,2,1,0} copy(%lhs),
sharding={devices=[8,1,4,1,1]<=[32]}
%rhs = bf16[512,1024,16,4,288]{4,3,2,1,0} parameter(1)
%rhs.copy = bf16[512,1024,16,4,288]{4,3,2,1,0} copy(%rhs),
sharding={devices=[8,1,4,1,1]<=[32]}
%reshape.2556 = bf16[512,1024,16,4,288,1,1]{6,5,4,3,2,1,0} reshape(
bf16[512,1024,16,4,288]{4,3,2,1,0} %rhs.copy), sharding={
devices=[8,1,4,1,1,1,1]<=[32]}
%reshape.2570 = bf16[512,1024,16,36,256,1,1]{6,5,4,3,2,1,0}
reshape(bf16[512,1024,16,36,256]{4,3,2,1,0} %lhs.copy), sharding={
devices=[8,1,4,1,1,1,1]<=[32]}
%convolution.10 = bf16[16,36,256,16,4,288,1]{6,5,4,3,2,1,0}
convolution(bf16[512,1024,16,36,256,1,1]{6,5,4,3,2,1,0} %reshape.2570,
bf16[512,1024,16,4,288,1,1]{6,5,4,3,2,1,0} %reshape.2556),
window={size=1x1x16x4x512 pad=0_0x0_0x15_15x3_3x0_0 rhs_reversal=0x0x1x1x0},
dim_labels=4f01b23_4i23o01->01b23f4,
sharding={devices=[4,1,1,4,2,1,1]<=[8,2,2]T(1,2,0)}
ROOT %output = bf16[16,36,256,16,4,288,1]{6,5,4,3,2,1,0}
copy(%convolution.10), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 32,
true,
false));
const HloInstruction* while_inst = FindInstruction(module.get(), "while");
EXPECT_NE(while_inst, nullptr);
const HloComputation* cond_comp = while_inst->while_condition();
const HloInstruction* root = cond_comp->root_instruction();
EXPECT_THAT(root, op::Compare(_, op::Constant()));
const HloConstantInstruction* iterations =
Cast<HloConstantInstruction>(root->operand(1));
EXPECT_TRUE(iterations->literal().GetFirstInteger());
EXPECT_EQ(*iterations->literal().GetFirstInteger(), 8);
}
TEST_P(SpmdPartitioningTest, ContractingPartitionDotOperandsSlicedWrong) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[8,2,15,4] parameter(0)
%lhs.copy = f32[8,2,15,4] copy(%lhs),
sharding={devices=[1,2,4,1]<=[8]}
%rhs = f32[2,15,4] parameter(1)
%rhs.copy = f32[2,15,4] copy(%rhs),
sharding={devices=[2,4,1]<=[8]}
%dot = f32[8,2,2] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={2,3}, rhs_contracting_dims={1,2},
operand_precision={HIGH,HIGH},
sharding={devices=[2,2,2]<=[8]}
ROOT %output = f32[8,2,2] copy(%dot), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 8,
true,
true));
const HloInstruction* dot_op = FindInstruction(module.get(), HloOpcode::kDot);
auto op1 = op::Shape("f32[4,2,4,4]");
auto op2 = op::Shape("f32[2,4,4]");
EXPECT_THAT(dot_op, op::Dot(op1, op2));
}
TEST_P(SpmdPartitioningTest, PartitionDotGroupOnBatchContractingReshard) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,32,24,4096] parameter(0),
sharding={devices=[2,1,1,2]<=[4]}
%rhs = f32[32,4096,1024] parameter(1),
sharding={devices=[2,2,1]<=[4]}
ROOT %dot = f32[32,32,24,1024] dot(%lhs, %rhs),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={3}, rhs_contracting_dims={1},
sharding={devices=[1,2,1,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 4,
true,
true));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto dot = AllOf(op::Shape("f32[16,32,24,1024]"),
op::Dot(op::Parameter(0), op::Parameter(1)));
auto reduce_scatter = AllOf(op::Shape("f32[16,32,24,512]"),
op::DynamicSlice(op::AllReduce(dot), _, _, _, _));
EXPECT_THAT(root, AllOf(op::Reshape(op::Transpose(
op::AllToAll(op::Reshape(reduce_scatter)))),
op::Shape("f32[32,16,24,512]")));
}
TEST_P(SpmdPartitioningTest, PartitionPassthroughScatterCorrectOutputSharding) {
absl::string_view hlo_string = R"(
HloModule module
%scatter_add (parameter.0: bf16[], parameter.1: bf16[]) -> bf16[] {
%parameter.0 = bf16[] parameter(0)
%parameter.1 = bf16[] parameter(1)
ROOT %add = bf16[] add(bf16[] %parameter.0, bf16[] %parameter.1)
}
ENTRY entry {
%operand = bf16[2,1024]{1,0} parameter(0),
sharding={devices=[1,2]0,1}
%indices = s32[8,512,1]{2,1,0} parameter(1),
sharding={replicated}
%updates = bf16[8,512,1024]{2,1,0} parameter(2),
sharding={devices=[1,1,2]0,1}
ROOT %scatter = bf16[2,1024]{1,0} scatter(bf16[2,1024]{1,0} %operand,
s32[8,512,1]{2,1,0} %indices,
bf16[8,512,1024]{2,1,0} %updates), update_window_dims={2},
inserted_window_dims={0}, scatter_dims_to_operand_dims={0},
index_vector_dim=2, to_apply=%scatter_add,
sharding={devices=[1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto scatter = AllOf(op::Shape("bf16[2,512]"), op::Scatter(_, _, _));
EXPECT_THAT(root, scatter);
}
bool IsTrivialCollectivePermute(HloInstruction* hlo) {
if (hlo->opcode() != HloOpcode::kCollectivePermute) {
return false;
}
if (hlo->source_target_pairs().empty()) {
return true;
}
return absl::c_all_of(hlo->source_target_pairs(),
[](const std::pair<int64_t, int64_t>& pair) {
return pair.first == pair.second;
});
}
TEST_P(SpmdPartitioningTest, CollectivePermuteSimplifyIdentity) {
absl::string_view hlo_string = R"(
HloModule test
ENTRY entry {
%parameter.7 = f32[3,16] parameter(0), sharding={devices=[1,2]0,1}
%constant.7 = f32[] constant(0)
%pad.3 = f32[3,18] pad(f32[3,16] %parameter.7, f32[] %constant.7), padding=0_0x1_1, sharding={devices=[1,2]0,1}
%slice.8 = f32[3,16] slice(f32[3,18] %pad.3), slice={[0:3], [2:18]}, sharding={devices=[1,2]0,1}
%slice.9 = f32[3,2] slice(f32[3,18] %pad.3), slice={[0:3], [0:2]}, sharding={devices=[1,2]0,1}
ROOT %concatenate.6 = f32[3,18] concatenate(f32[3,16] %slice.8, f32[3,2] %slice.9), dimensions={1}, sharding={devices=[1,2]0,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
for (HloComputation* computation : module->computations()) {
for (HloInstruction* hlo : computation->instructions()) {
EXPECT_FALSE(IsTrivialCollectivePermute(hlo)) << hlo->ToString();
}
}
}
TEST_P(SpmdPartitioningTest, CollectivePermuteSimplifyZero) {
absl::string_view hlo_string = R"(
HloModule test
ENTRY entry {
%parameter = f32[3,16,16,16,16,132]{5,4,3,2,1,0} parameter(0), sharding={devices=[1,2,1,1,1,1]0,1}
%slice = f32[3,1,16,16,16,132]{5,4,3,2,1,0} slice(f32[3,16,16,16,16,132]{5,4,3,2,1,0} %parameter), slice={[0:3], [15:16], [0:16], [0:16], [0:16], [0:132]}, sharding={devices=[1,2,1,1,1,1]0,1}
%c0 = f32[] constant(0)
ROOT %pad = f32[3,18,16,16,16,132]{5,4,3,2,1,0} pad(f32[3,1,16,16,16,132]{5,4,3,2,1,0} %slice, f32[] %c0), padding=0_0x0_17x0_0x0_0x0_0x0_0, sharding={devices=[1,2,1,1,1,1]0,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
for (HloComputation* computation : module->computations()) {
for (HloInstruction* hlo : computation->instructions()) {
EXPECT_FALSE(IsTrivialCollectivePermute(hlo)) << hlo->ToString();
}
}
}
TEST_P(SpmdPartitioningTest, PadWithWrapPattern) {
absl::string_view hlo_string = R"(
HloModule xla_computation_apply_fn__4.61
ENTRY %xla_computation_apply_fn__4.61 (parameter.7: f32[3,16,16,16,16,132]) -> f32[3,18,16,16,16,132] {
%parameter.7 = f32[3,16,16,16,16,132]{5,4,3,2,1,0} parameter(0), sharding={devices=[1,2,1,1,1,1]0,1}
%slice.2 = f32[3,1,16,16,16,132]{5,4,3,2,1,0} slice(f32[3,16,16,16,16,132]{5,4,3,2,1,0} %parameter.7), slice={[0:3], [15:16], [0:16], [0:16], [0:16], [0:132]}, sharding={devices=[1,2,1,1,1,1]0,1}
%slice.3 = f32[3,1,16,16,16,132]{5,4,3,2,1,0} slice(f32[3,16,16,16,16,132]{5,4,3,2,1,0} %parameter.7), slice={[0:3], [0:1], [0:16], [0:16], [0:16], [0:132]}, sharding={devices=[1,2,1,1,1,1]0,1}
ROOT %concatenate.3 = f32[3,18,16,16,16,132]{5,4,3,2,1,0} concatenate(f32[3,1,16,16,16,132]{5,4,3,2,1,0} %slice.2, f32[3,16,16,16,16,132]{5,4,3,2,1,0} %parameter.7, f32[3,1,16,16,16,132]{5,4,3,2,1,0} %slice.3), dimensions={1}, sharding={devices=[1,2,1,1,1,1]0,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
for (HloComputation* computation : module->computations()) {
for (HloInstruction* hlo : computation->instructions()) {
EXPECT_FALSE(IsTrivialCollectivePermute(hlo)) << hlo->ToString();
EXPECT_NE(hlo->opcode(), HloOpcode::kAllReduce) << hlo->ToString();
}
}
}
TEST_P(SpmdPartitioningTest, PadWrapWithNegatePattern) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%parameter.1 = f32[1,18] parameter(0), sharding={devices=[1,2]0,1}
%slice.16 = f32[1,2] slice(f32[1,18] %parameter.1), slice={[0:1], [16:18]}, sharding={devices=[1,2]0,1}
%negate.2 = f32[1,2] negate(f32[1,2] %slice.16), sharding={devices=[1,2]0,1}
%slice.17 = f32[1,2] slice(f32[1,18] %parameter.1), slice={[0:1], [0:2]}, sharding={devices=[1,2]0,1}
%negate.3 = f32[1,2] negate(f32[1,2] %slice.17), sharding={devices=[1,2]0,1}
ROOT %concatenate.13 = f32[1,22] concatenate(f32[1,2] %negate.2, f32[1,18] %parameter.1, f32[1,2] %negate.3), dimensions={1}, sharding={devices=[1,2]0,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
for (HloComputation* computation : module->computations()) {
for (HloInstruction* hlo : computation->instructions()) {
EXPECT_FALSE(IsTrivialCollectivePermute(hlo)) << hlo->ToString();
EXPECT_NE(hlo->opcode(), HloOpcode::kAllReduce) << hlo->ToString();
}
}
}
TEST_P(SpmdPartitioningTest, PadWrapWithMultipleModifiersPattern) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%parameter.1 = f32[1,18] parameter(0), sharding={devices=[1,2]0,1}
%slice.16 = f32[1,2] slice(f32[1,18] %parameter.1), slice={[0:1], [16:18]}, sharding={devices=[1,2]0,1}
%mod0.16 = f32[1,2] rsqrt(f32[1,2] %slice.16), sharding={devices=[1,2]0,1}
%mod1.16 = f32[1,2] sine(f32[1,2] %mod0.16), sharding={devices=[1,2]0,1}
%slice.17 = f32[1,2] slice(f32[1,18] %parameter.1), slice={[0:1], [0:2]}, sharding={devices=[1,2]0,1}
%mod0.17 = f16[1,2] convert(f32[1,2] %slice.17), sharding={devices=[1,2]0,1}
%mod1.17 = f16[1,2] cosine(f16[1,2] %mod0.17), sharding={devices=[1,2]0,1}
%mod2.17 = f32[1,2] convert(f16[1,2] %mod1.17), sharding={devices=[1,2]0,1}
ROOT %concatenate.13 = f32[1,22] concatenate(f32[1,2] %mod1.16, f32[1,18] %parameter.1, f32[1,2] %mod2.17), dimensions={1}, sharding={devices=[1,2]0,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
for (HloComputation* computation : module->computations()) {
for (HloInstruction* hlo : computation->instructions()) {
const HloOpcode op = hlo->opcode();
EXPECT_FALSE(IsTrivialCollectivePermute(hlo)) << hlo->ToString();
EXPECT_NE(op, HloOpcode::kAllReduce) << hlo->ToString();
if (hlo->operand_count() != 1) {
continue;
}
const PrimitiveType type = hlo->shape().element_type();
const HloOpcode child_op = hlo->operand(0)->opcode();
const PrimitiveType child_type = hlo->operand(0)->shape().element_type();
if (op == HloOpcode::kSin) {
EXPECT_EQ(child_op, HloOpcode::kRsqrt);
} else if (op == HloOpcode::kConvert && type == F32) {
EXPECT_EQ(child_op, HloOpcode::kCos);
EXPECT_EQ(child_type, F16);
} else if (op == HloOpcode::kCos) {
EXPECT_EQ(child_op, HloOpcode::kConvert);
EXPECT_EQ(child_type, F16);
}
}
}
}
TEST_P(SpmdPartitioningTest, BroadcastAsReplicate) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[1,1] parameter(0), sharding={devices=[2,2]<=[4]}
ROOT %copy = f32[1,1] copy(%param0), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(0), op::Shape("f32[1,1]"));
EXPECT_THAT(root, AllOf(op::Copy(op::AllReduce(op::Select(_, param0, _))),
op::Shape("f32[1,1]")));
}
TEST_P(SpmdPartitioningTest, BroadcastAsReplicate2) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[1,2] parameter(0), sharding={devices=[2,2]<=[4]}
ROOT %copy = f32[1,2] copy(%param0), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(0), op::Shape("f32[1,1]"));
auto broadcast =
AllOf(op::AllReduce(op::Select(_, param0, _)), op::Shape("f32[1,1]"));
EXPECT_THAT(
root,
AllOf(op::Copy(op::AllReduce(op::DynamicUpdateSlice(_, broadcast, _, _))),
op::Shape("f32[1,2]")));
}
TEST_P(SpmdPartitioningTest, BroadcastAsReplicate3) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[1,1] parameter(0),
sharding={devices=[2,1,2]<=[4] last_tile_dim_replicate}
ROOT %copy = f32[1,1] copy(%param0), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(0), op::Shape("f32[1,1]"));
EXPECT_THAT(root, AllOf(op::Copy(op::AllReduce(op::Select(_, param0, _))),
op::Shape("f32[1,1]")));
}
TEST_P(SpmdPartitioningTest, TupleWithSubgroupManual) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
constant = f32[6,3]{1,0}
constant({{1,3,7},{5,1,4},{1,2,8},{2,3,7},{5,2,4},{2,2,8}}),
sharding={replicated}
param = (f32[6,3]{1,0}, f32[]) parameter(0),
sharding={{devices=[2,1,2]<=[4] last_tile_dims={manual}},{replicated}}
gte = f32[6,3]{1,0} get-tuple-element(param), index=0,
sharding={devices=[2,1,2]<=[4] last_tile_dims={manual}}
ROOT tuple = (f32[6,3]{1,0}, f32[6,3]{1,0}) tuple(constant, gte),
sharding={{replicated},{devices=[2,1,2]<=[4] last_tile_dims={manual}}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
op::Tuple(op::Constant(), op::GetTupleElement(op::Parameter(0))));
}
TEST_P(SpmdPartitioningTest, SubgroupManualSharedOperand) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
constant = f32[] constant(1), sharding={replicated}
broadcast = f32[2,2] broadcast(constant), dimensions={},
sharding={devices=[2,1,2]<=[4] last_tile_dims={manual}}
ROOT add = f32[2,2] add(broadcast, broadcast),
sharding={devices=[2,1,2]<=[4] last_tile_dims={manual}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Add(op::Broadcast(op::Constant()),
op::Broadcast(op::Constant())));
}
TEST_P(SpmdPartitioningTest, SubgroupManualAllReduce) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
param = f32[2,2] parameter(0),
sharding={devices=[2,1,2]0,2,1,3 last_tile_dims={manual}}
ROOT all-reduce = f32[2,2]{1,0} all-reduce(param), to_apply=sum,
replica_groups={{2,0},{1,3}}, use_global_device_ids=true, channel_id=1,
sharding={devices=[2,1,2]0,2,1,3 last_tile_dims={manual}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::AllReduce(op::Parameter(0)), op::Shape("f32[1,2]")));
EXPECT_EQ(root->replica_groups().size(), 2);
}
TEST_P(SpmdPartitioningTest, SubgroupIllegalManualAllReduce) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
param = f32[2,2] parameter(0),
sharding={devices=[2,1,2]0,2,1,3 last_tile_dims={manual}}
ROOT all-reduce = f32[2,2]{1,0} all-reduce(param), to_apply=sum,
replica_groups={{1,0},{2,3}}, use_global_device_ids=true, channel_id=1,
sharding={devices=[2,1,2]0,2,1,3 last_tile_dims={manual}}
}
)";
auto module_status = PartitionComputation(hlo_string, 4);
EXPECT_FALSE(module_status.status().ok());
EXPECT_THAT(module_status.status().ToString(),
::testing::HasSubstr("Manual all-reduce across devices that "
"belong to different manual subgroups"));
}
TEST_P(SpmdPartitioningTest, AllReduceNoSharding) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
param = f32[2,2] parameter(0), sharding={devices=[2,2]<=[4]}
ROOT all-reduce = f32[2,2]{1,0} all-reduce(param), to_apply=sum,
replica_groups={{0,1,2,3}}, use_global_device_ids=true, channel_id=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::AllReduce(), op::Shape("f32[2,2]")));
EXPECT_EQ(root->replica_groups().size(), 1);
}
TEST_P(SpmdPartitioningTest, SubgroupManualReduce) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
constant = f32[] constant(0),
sharding={devices=[2,2]<=[4] last_tile_dims={manual,replicated}}
param = f32[2,2] parameter(0),
sharding={devices=[2,1,2]0,2,1,3 last_tile_dims={manual}}
ROOT reduce = f32[2] reduce(param, constant), dimensions={0}, to_apply=sum,
sharding={devices=[1,2,2]<=[4] last_tile_dims={manual,replicated}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
op::AllReduce(op::Reduce(op::Parameter(0), op::Constant())));
EXPECT_EQ(root->replica_groups().size(), 2);
}
TEST_P(SpmdPartitioningTest, ScatterPreferUpdateIndexIfSmaller) {
absl::string_view hlo_string = R"(
HloModule module
%scatter_add_reducer__33.191857 (parameter.191858: bf16[], parameter.191859: bf16[]) -> bf16[] {
%parameter.191858 = bf16[] parameter(0)
%parameter.191859 = bf16[] parameter(1)
ROOT %add.4425 = bf16[] add(bf16[] %parameter.191858, bf16[] %parameter.191859)
}
ENTRY entry {
p1 = s32[2048,1024,1]{2,1,0} parameter(0)
p2 = bf16[2048,1024,2040]{2,1,0} parameter(1)
%constant.8635 = bf16[] constant(0)
%broadcast.21781 = bf16[50048,2040]{1,0} broadcast(bf16[] %constant.8635), dimensions={},
sharding={devices=[1,2,4]<=[8] last_tile_dim_replicate}
%select.1954 = s32[2048,1024,1]{2,1,0} copy(%p1), sharding={devices=[4,1,1,2]<=[8] last_tile_dim_replicate}
%slice.1274 = bf16[2048,1024,2040]{2,1,0} copy(%p2),
sharding={devices=[4,1,1,2]<=[8] last_tile_dim_replicate}
%scatter.34 = bf16[50048,2040]{1,0} scatter(bf16[50048,2040]{1,0} %broadcast.21781,
s32[2048,1024,1]{2,1,0} %select.1954, bf16[2048,1024,2040]{2,1,0} %slice.1274),
update_window_dims={2}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0},
index_vector_dim=2, to_apply=%scatter_add_reducer__33.191857,
sharding={devices=[1,2,4]<=[8] last_tile_dim_replicate}
ROOT c = bf16[50048,2040]{1,0} copy(scatter.34),
sharding={replicated}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root, op::Copy(op::AllReduce(op::DynamicUpdateSlice(
_,
op::CollectivePermute(op::AllReduce(op::Scatter(
op::Shape("bf16[50048,1020]"), op::Shape("s32[512,1024,1]"),
op::Shape("bf16[512,1024,1020]")))),
_, _))));
}
TEST_P(SpmdPartitioningTest, ScatterPreferTrivialIfSmallerThanIndices) {
absl::string_view hlo_string = R"(
HloModule module
%scatter_add_reducer__33.191857 (parameter.191858: bf16[], parameter.191859: bf16[]) -> bf16[] {
%parameter.191858 = bf16[] parameter(0)
%parameter.191859 = bf16[] parameter(1)
ROOT %add.4425 = bf16[] add(bf16[] %parameter.191858, bf16[] %parameter.191859)
}
ENTRY entry {
p1 = s32[32,512,3]{2,1,0} parameter(0)
p2 = bf16[32,512]{1,0} parameter(1)
%constant.8635 = bf16[] constant(0)
%broadcast.21781 = bf16[32,512,50001]{2,1,0} broadcast(bf16[] %constant.8635), dimensions={},
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%select.1954 = s32[32,512,3]{2,1,0} copy(%p1), sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%slice.1274 = bf16[32,512]{1,0} copy(%p2),
sharding={devices=[1,4,2]<=[8] last_tile_dim_replicate}
%scatter.34 = bf16[32,512,50001]{2,1,0} scatter(bf16[32,512,50001]{2,1,0} %broadcast.21781,
s32[32,512,3]{2,1,0} %select.1954, bf16[32,512]{1,0} %slice.1274),
update_window_dims={}, inserted_window_dims={0,1,2}, scatter_dims_to_operand_dims={0,1,2},
index_vector_dim=2, to_apply=%scatter_add_reducer__33.191857,
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
ROOT c = bf16[32,512,50001]{2,1,0} copy(scatter.34),
sharding={replicated}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
op::Copy(op::AllReduce(op::DynamicUpdateSlice(
_,
op::AllReduce(op::Scatter(op::Shape("bf16[32,128,50001]"),
op::Shape("s32[32,256,3]"),
op::Shape("bf16[32,256]"))),
_, _, _))));
}
TEST_P(SpmdPartitioningTest, GatherOperandPassthroughIndexPassthrough) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[2,9] parameter(0), sharding={replicated}
%indices = s32[7] parameter(1), sharding={replicated}
%input.copy = f32[2,9] copy(%input), sharding={devices=[1,2,2]1,0,3,2 last_tile_dim_replicate}
%indices.copy = s32[7] copy(%indices), sharding={devices=[2,2]1,2,3,0 last_tile_dim_replicate}
%gather = f32[7,9] gather(%input.copy, %indices.copy), offset_dims={1},
collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1,
slice_sizes={1,9}, sharding={devices=[2,2]<=[4]}
ROOT %copy = f32[7,9] copy(%gather), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const HloInstruction* gather = FindInstruction(module.get(), "gather.1");
EXPECT_NE(gather, nullptr);
EXPECT_THAT(gather,
AllOf(op::Shape("f32[4,5]"),
op::Gather(op::Shape("f32[2,5]"), op::Shape("s32[4]"))));
}
TEST_P(SpmdPartitioningTest, GatherIndexPassthroughTrivialSlice) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[17,9] parameter(0)
%indices = s32[2,3] parameter(1)
%input.copy = f32[17,9] copy(%input), sharding={devices=[2,1,2]3,2,1,0 last_tile_dim_replicate}
%indices.copy = s32[2,3] copy(%indices), sharding={devices=[2,1,2]<=[4] last_tile_dim_replicate}
%gather = f32[2,3,9] gather(%input.copy, %indices.copy), offset_dims={2},
collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=2,
slice_sizes={1,9}, sharding={devices=[2,1,1,2]1,0,3,2 last_tile_dim_replicate}
ROOT %copy = f32[2,3,9] copy(%gather), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const HloInstruction* gather = FindInstruction(module.get(), "gather.1");
EXPECT_NE(gather, nullptr);
EXPECT_THAT(gather,
AllOf(op::Shape("f32[1,3,9]"),
op::Gather(op::Shape("f32[9,9]"), op::Shape("s32[1,3]"))));
}
TEST_P(SpmdPartitioningTest, GatherReplicatedCorrectOutput) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[64,2,250112] parameter(0), sharding={devices=[16,1,2]<=[32]}
%indices = s32[10,1] parameter(1), sharding={replicated}
%input.copy = f32[64,2,250112] copy(%input), sharding={
devices=[16,1,2]<=[32]}
%indices.copy = s32[10,1] copy(%indices), sharding={replicated}
%gather = f32[64,2,10] gather(f32[64,2,250112] %input,
s32[10,1]{1,0} %indices.copy), offset_dims={0,1}, collapsed_slice_dims={2},
start_index_map={2}, index_vector_dim=1, slice_sizes={64,2,1},
sharding={devices=[16,1,1,2]<=[32] last_tile_dim_replicate}
ROOT %copy = (f32[64,2,10]) tuple(gather),
sharding={{devices=[16,1,1,2]<=[32] last_tile_dim_replicate}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 32));
VLOG(1) << module->ToString();
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Shape("(f32[4,2,10])"));
}
TEST_P(SpmdPartitioningTest, GatherTrivialRestoreSharding) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = bf16[250112,4096] parameter(0), sharding={replicated}
%cpy.input = bf16[250112,4096] copy(%input), sharding={devices=[32,1]<=[32]}
%indices = s32[64,1,1] parameter(1), sharding={replicated}
%cpy.indices = s32[64,1,1] copy(%indices), sharding={replicated}
%gather = bf16[64,1,4096] gather(bf16[250112,4096] %cpy.input, s32[64,1,1] %cpy.indices),
offset_dims={2}, collapsed_slice_dims={0}, start_index_map={0},
index_vector_dim=2, slice_sizes={1,4096}, sharding={replicated}
ROOT %copy = bf16[64,1,4096] copy(gather), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 32));
VLOG(1) << module->ToString();
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Shape("bf16[64,1,4096]"));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Copy(op::AllReduce(op::Select(
_, _, op::Gather(op::Shape("bf16[7816,4096]"), _)))));
}
TEST_P(SpmdPartitioningTest, SliceTo1) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[512] parameter(0), sharding={devices=[4]<=[4]}
ROOT slice.134 = f32[1] slice(input), slice={[0:1]},
sharding={devices=[4]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
EXPECT_THAT(module->entry_computation()->root_instruction(),
AllOf(op::Slice(op::Parameter()), op::Shape("f32[1]")));
}
TEST_P(SpmdPartitioningTest, SliceTo1_8Shards) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[4,4] parameter(0), sharding={devices=[4,2]<=[8]}
ROOT %slice = f32[1,4] slice(%input), slice={[0:1], [0:4]},
sharding={devices=[4,2]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
EXPECT_THAT(module->entry_computation()->root_instruction(),
AllOf(op::Copy(op::Parameter()), op::Shape("f32[1,2]")));
}
TEST_P(SpmdPartitioningTest, SliceTo1PartialReplicate) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[16] parameter(0),
sharding={devices=[2,2]<=[4] last_tile_dim_replicate}
ROOT slice.134 = f32[1] slice(input), slice={[0:1]},
sharding={devices=[2,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
EXPECT_THAT(module->entry_computation()->root_instruction(),
AllOf(op::Slice(op::Parameter()), op::Shape("f32[1]")));
}
TEST_P(SpmdPartitioningTest, SliceTo2) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[512] parameter(0), sharding={devices=[4]<=[4]}
ROOT slice.134 = f32[2] slice(input), slice={[0:2]},
sharding={devices=[4]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto slice1 = AllOf(op::Slice(op::Parameter()), op::Shape("f32[2]"));
auto halo =
op::CollectivePermute(AllOf(op::Slice(slice1), op::Shape("f32[1]")));
auto slice_self = AllOf(op::Slice(slice1), op::Shape("f32[1]"));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Copy(AllOf(op::DynamicSlice(op::Concatenate(halo, slice_self), _),
op::Shape("f32[1]"))));
}
TEST_P(SpmdPartitioningTest, SliceToMiddle2) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[512] parameter(0), sharding={devices=[8]<=[8]}
ROOT %slice = f32[2] slice(input), slice={[300:302]},
sharding={devices=[8]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
auto slice = AllOf(op::Slice(op::Parameter()), op::Shape("f32[2]"));
auto halo_slice = AllOf(op::Slice(slice), op::Shape("f32[1]"));
auto halo = AllOf(op::CollectivePermute(halo_slice), op::Shape("f32[1]"));
VLOG(1) << module->ToString();
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Copy(op::Select(_, halo, halo)));
}
TEST_P(SpmdPartitioningTest, SliceToMiddle2PartiallyReplicated) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[512] parameter(0),
sharding={devices=[8,2]<=[16] last_tile_dim_replicate}
ROOT %slice = f32[2] slice(input), slice={[300:302]},
sharding={devices=[8,2]<=[16] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 16));
auto slice = AllOf(op::Slice(op::Parameter()), op::Shape("f32[2]"));
auto halo_slice = AllOf(op::Slice(slice), op::Shape("f32[1]"));
auto halo = AllOf(op::CollectivePermute(halo_slice), op::Shape("f32[1]"));
VLOG(1) << module->ToString();
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Copy(op::Select(_, halo, halo)));
}
TEST_P(SpmdPartitioningTest, SliceToHalfSize) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[32] parameter(0), sharding={devices=[16]<=[16]}
ROOT %slice = f32[16] slice(input), slice={[0:16]},
sharding={devices=[16]<=[16]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 16));
VLOG(1) << module->ToString();
auto piece1 =
AllOf(op::Pad(op::CollectivePermute(op::Slice(op::Parameter())), _),
op::Shape("f32[2]"));
auto piece2 =
op::Select(_, op::CollectivePermute(op::Parameter()), op::Parameter());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Copy(op::DynamicSlice(op::Select(_, piece1, piece2), _)));
}
TEST_P(SpmdPartitioningTest, PadToDoubleSize) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[16] parameter(0), sharding={devices=[16]<=[16]}
%pv = f32[] constant(-1)
ROOT %pad = f32[32] pad(input, pv), padding=0_16,
sharding={devices=[16]<=[16]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 16));
VLOG(1) << module->ToString();
auto cp1 = op::CollectivePermute(op::Parameter(0));
auto cp2 = op::CollectivePermute(op::Parameter(0));
auto piece1 = op::Select(_, cp1, op::Parameter(0));
auto piece2 = op::Select(_, cp2, cp1);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Select(_, op::Concatenate(piece1, piece2),
op::Broadcast(op::Constant())));
}
TEST_P(SpmdPartitioningTest, PadAllPadvalue) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[16] parameter(0), sharding={devices=[16]<=[16]}
%pv = f32[] constant(-1)
ROOT %pad = f32[16] pad(input, pv), padding=16_-16,
sharding={devices=[16]<=[16]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 16));
VLOG(1) << module->ToString();
EXPECT_THAT(module->entry_computation()->root_instruction(),
AllOf(op::Broadcast(op::Constant()), op::Shape("f32[1]")));
}
TEST_P(SpmdPartitioningTest, PadFrom1To24) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[1] parameter(0), sharding={devices=[8]<=[8]}
%pv = f32[] constant(-1)
ROOT %pad = f32[24] pad(input, pv), padding=3_20,
sharding={devices=[8]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto cp = op::CollectivePermute(op::Parameter(0));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
AllOf(op::Shape("f32[3]"),
op::Select(_, op::Concatenate(cp, op::Broadcast(op::Constant())),
op::Broadcast(op::Constant()))));
}
TEST_P(SpmdPartitioningTest, SliceToLessThanHalf) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[100,2] parameter(0), sharding={devices=[2,1]0,1}
ROOT slice.20 = f32[6,2] slice(input), slice={[0:6], [0:2]}, sharding={devices=[2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto cp = op::CollectivePermute(op::Slice(op::Parameter(0)));
auto self = op::Slice(op::Parameter(0));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Copy(op::Select(_, cp, self)));
}
TEST_P(SpmdPartitioningTest, PartialDusReplicate) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[3,2] parameter(0), sharding={devices=[8,2]<=[16]}
ROOT %copy = f32[3,2] copy(input), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 16));
VLOG(1) << module->ToString();
auto dus =
AllOf(op::Shape("f32[3,2]"),
op::DynamicUpdateSlice(op::Broadcast(),
op::Select(_, op::Parameter(0), _), _, _));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Copy(AllOf(op::AllReduce(op::AllReduce(dus)))));
}
TEST_P(SpmdPartitioningTest, GatherPassthrough) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
p = f32[16,64,768,768]{3,2,1,0} parameter(0), sharding={replicated}
c = f32[16,64,768,768]{3,2,1,0} copy(p), sharding={devices=[1,4,1,1]<=[4]}
constant.1669 = s32[] constant(0)
iota.1012 = s32[6]{0} iota(), iota_dimension=0, sharding={replicated}
constant.1748 = s32[] constant(128), sharding={replicated}
broadcast.2642 = s32[6]{0} broadcast(constant.1748), dimensions={}, sharding={replicated}
multiply.92 = s32[6]{0} multiply(iota.1012, broadcast.2642), sharding={replicated}
broadcast.2643 = s32[2,6]{1,0} broadcast(multiply.92), dimensions={1}, sharding={replicated}
transpose.542 = s32[6,2]{0,1} transpose(broadcast.2643), dimensions={1,0}, sharding={replicated}
pad.19 = s32[6,4]{1,0} pad(transpose.542, constant.1669), padding=0_0x2_0, sharding={replicated}
ROOT gather.1 = f32[16,64,6,128,128]{4,3,2,1,0} gather(c, pad.19), offset_dims={0,1,3,4}, collapsed_slice_dims={}, start_index_map={0,1,2,3}, index_vector_dim=1, slice_sizes={16,64,128,128}, sharding={devices=[1,4,1,1,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Gather(), op::Shape("f32[16,16,6,128,128]")));
}
TEST_P(SpmdPartitioningTest, ComplexReshardFromPartialReplicate) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%p = f32[4,15,4,16] parameter(0)
%p.copy = f32[4,15,4,16] copy(p),
sharding={devices=[1,1,1,2,4]<=[4,2]T(1,0) last_tile_dim_replicate}
%a = f32[4,15,4,16] add(p.copy, p.copy),
sharding={devices=[1,1,1,2,4]<=[4,2]T(1,0) last_tile_dim_replicate}
ROOT %c2 = f32[4,15,4,16] copy(a), sharding={devices=[1,8,1,1]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Copy(op::Reshape(op::Reshape(op::Transpose(op::AllToAll(_))))));
}
TEST_P(SpmdPartitioningTest, ComplexReshardToPartialReplicate) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%p = f32[4,15,4,16] parameter(0)
%p.copy = f32[4,15,4,16] copy(p),
sharding={devices=[1,4,2,1]<=[8]}
%a = f32[4,15,4,16] add(p.copy, p.copy),
sharding={devices=[1,4,2,1]<=[8]}
ROOT %c2 = f32[4,15,4,16] copy(a), sharding={devices=[1,1,1,2,4]<=[4,2]T(1,0) last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Copy(op::Reshape(op::Transpose(op::AllToAll(_)))));
}
TEST_P(SpmdPartitioningTest, ComplexReshardMoveMergeDimensionRight) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%p = f32[4,15,4,15] parameter(0)
%p.copy = f32[4,15,4,15] copy(p),
sharding={devices=[1,4,1,2]<=[8]}
%a = f32[4,15,4,15] add(p.copy, p.copy),
sharding={devices=[1,4,1,2]<=[8]}
ROOT %c2 = f32[4,15,4,15] copy(a), sharding={devices=[1,1,1,8]<=[4,2]T(1,0)}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Copy(op::Reshape(
op::Slice(op::Reshape(op::Transpose(op::AllToAll(_)))))));
}
TEST_P(SpmdPartitioningTest, ComplexReshardMoveMergeDimensionLeft) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%p = f32[2,15,1,2] parameter(0)
%p.copy = f32[2,15,1,2] copy(p),
sharding={devices=[1,4,1,2]<=[8]}
%a = f32[2,15,1,2] add(p.copy, p.copy),
sharding={devices=[1,4,1,2]<=[8]}
ROOT %c2 = f32[2,15,1,2] copy(a), sharding={devices=[1,8,1,1]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Copy(op::Reshape(op::Reshape(op::Transpose(op::AllToAll(_))))));
}
TEST_P(SpmdPartitioningTest, ComplexReshardMoveMergeDimensionLeftReorder) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%p = f32[4,15,4,16] parameter(0)
%p.copy = f32[4,15,4,16] copy(p),
sharding={devices=[1,4,1,2]<=[8]}
%a = f32[4,15,4,16] add(p.copy, p.copy),
sharding={devices=[1,4,1,2]<=[8]}
ROOT %c2 = f32[4,15,4,16] copy(a), sharding={devices=[1,8,1,1]<=[4,2]T(1,0)}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Copy(op::Reshape(op::CollectivePermute(
op::Reshape(op::Transpose(op::AllToAll(_)))))));
}
TEST_P(SpmdPartitioningTest, PaddedConvReshard) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%p = bf16[16,256,256,384]{3,2,1,0} parameter(0)
%p2 = bf16[3,3,384,384]{3,2,1,0} parameter(1)
%p.copy = bf16[16,256,256,384]{3,2,1,0} copy(%p), sharding={devices=[2,1,4,1]<=[8]}
%p2.copy = bf16[3,3,384,384]{3,2,1,0} copy(%p2), sharding={replicated}
ROOT %convolution.10115 = bf16[16,256,256,384]{3,2,1,0} convolution(%p.copy, %p2.copy), window={size=3x3 pad=128_128x128_128 rhs_dilate=128x128}, dim_labels=b01f_01io->b01f, sharding={devices=[2,1,4,1]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Convolution(
op::DynamicSlice(op::Pad(_, op::Constant()), _, _, _, _), _));
}
TEST_P(SpmdPartitioningTest, KeepPartitionedNonSlicedDimension) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%p = bf16[16,128,128,384]{3,2,1,0} parameter(0), sharding={replicated}
%constant.1165 = s32[] constant(0), sharding={replicated}
constant.1151 = s32[] constant(192), sharding={replicated}
broadcast.1152 = s32[2]{0} broadcast(constant.1151), dimensions={}, sharding={replicated}
slice.1576 = s32[1]{0} slice(broadcast.1152), slice={[0:1]}, sharding={replicated}
reshape.1888 = s32[] reshape(slice.1576), sharding={replicated}
slice.1546 = s32[1]{0} slice(broadcast.1152), slice={[1:2]}, sharding={replicated}
reshape.1890 = s32[] reshape(slice.1546), sharding={replicated}
constant.861 = bf16[] constant(0), sharding={replicated}
broadcast.862 = bf16[16,512,512,384]{3,2,1,0} broadcast(constant.861), dimensions={}, sharding={devices=[2,2,1,1]<=[4]}
%c = bf16[16,128,128,384]{3,2,1,0} copy(p), sharding={devices=[2,2,1,1]<=[4]}
add.228 = bf16[16,128,128,384]{3,2,1,0} add(c, c), sharding={devices=[2,2,1,1]<=[4]}
ROOT dynamic-update-slice.111 = bf16[16,512,512,384]{3,2,1,0} dynamic-update-slice(broadcast.862, add.228, constant.1165, reshape.1888, reshape.1890, constant.1165), sharding={devices=[2,2,1,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::DynamicSlice(AllOf(op::DynamicUpdateSlice(),
op::Shape("bf16[8,512,512,384]")),
_, _, _, _));
}
TEST_P(SpmdPartitioningTest,
KeepPartitionedNonSlicedDimensionWithConstantIndices) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
p1 = bf16[16,192,192,384]{3,2,1,0} parameter(0), sharding={replicated}
p2 = bf16[16,128,128,384]{3,2,1,0} parameter(1), sharding={replicated}
c1 = bf16[16,192,192,384]{3,2,1,0} copy(p1), sharding={devices=[2,2,2,1]<=[8]}
c2 = bf16[16,128,128,384]{3,2,1,0} copy(p2), sharding={devices=[2,2,2,1]<=[8]}
constant.1163 = bf16[] constant(0), sharding={replicated}
constant.1165 = s32[] constant(0), sharding={replicated}
pad.179 = bf16[16,224,224,384]{3,2,1,0} pad(c1, constant.1163), padding=0_0x16_16x16_16x0_0, sharding={devices=[2,2,2,1]<=[8]}
add.439 = bf16[16,128,128,384]{3,2,1,0} add(c2, c2), sharding={devices=[2,2,2,1]<=[8]}
constant.1070 = s32[] constant(48), sharding={replicated}
dynamic-update-slice.128 = bf16[16,224,224,384]{3,2,1,0} dynamic-update-slice(pad.179, add.439, constant.1165, constant.1070, constant.1070, constant.1165), sharding={devices=[2,2,2,1]<=[8]}
ROOT c = bf16[16,224,224,384]{3,2,1,0} copy(dynamic-update-slice.128), sharding={devices=[2,2,2,1]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Copy(op::DynamicSlice(
AllOf(op::DynamicUpdateSlice(), op::Shape("bf16[8,224, 224,384]")), _,
_, _, _)));
}
TEST_P(SpmdPartitioningTest, CustomCallManualSharding) {
const char* const hlo_string = R"(
HloModule pjit_xmap_dummy.5
ENTRY %main.21 (Arg_0.1: f32[4,4,8], Arg_1.2: f32[4,8]) -> (f32[4,4,8], f32[4]) {
%Arg_0.1 = f32[4,4,8]{2,1,0} parameter(0), sharding={devices=[4,1,1]<=[4]}
%copy.3 = f32[4,4,8]{2,1,0} copy(f32[4,4,8]{2,1,0} %Arg_0.1), sharding={devices=[4,1,1]<=[4]}
%custom-call.4 = f32[1,4,8]{2,1,0} custom-call(f32[4,4,8]{2,1,0} %copy.3), custom_call_target="SPMDFullToShardShape", sharding={manual}
%reshape.7 = f32[4,8]{1,0} reshape(f32[1,4,8]{2,1,0} %custom-call.4), sharding={manual}
%Arg_1.2 = f32[4,8]{1,0} parameter(1), sharding={replicated}
%copy.2 = f32[4,8]{1,0} copy(f32[4,8]{1,0} %Arg_1.2), sharding={replicated}
%custom-call.6 = f32[4,8]{1,0} custom-call(f32[4,8]{1,0} %copy.2), custom_call_target="SPMDFullToShardShape", sharding={manual}
%custom-call.8 = (f32[4,8]{1,0}, f32[1]{0}) custom-call(f32[4,8]{1,0} %reshape.7, f32[4,8]{1,0} %custom-call.6), custom_call_target="dummy", operand_layout_constraints={f32[4,8]{1,0}, f32[4,8]{1,0}}, api_version=API_VERSION_STATUS_RETURNING, sharding={{manual}, {manual}}
%get-tuple-element.9 = f32[4,8]{1,0} get-tuple-element((f32[4,8]{1,0}, f32[1]{0}) %custom-call.8), index=0, sharding={manual}
%reshape.11 = f32[1,4,8]{2,1,0} reshape(f32[4,8]{1,0} %get-tuple-element.9), sharding={manual}
%copy.1 = f32[1,4,8]{2,1,0} copy(f32[1,4,8]{2,1,0} %reshape.11), sharding={manual}
%custom-call.14 = f32[4,4,8]{2,1,0} custom-call(f32[1,4,8]{2,1,0} %copy.1), custom_call_target="SPMDShardToFullShape", sharding={devices=[4,1,1]<=[4]}
%reshape.18 = f32[4,4,8]{2,1,0} reshape(f32[4,4,8]{2,1,0} %custom-call.14), sharding={devices=[4,1,1]<=[4]}
%get-tuple-element.10 = f32[1]{0} get-tuple-element((f32[4,8]{1,0}, f32[1]{0}) %custom-call.8), index=1, sharding={manual}
%reshape.12 = f32[1,1]{1,0} reshape(f32[1]{0} %get-tuple-element.10), sharding={manual}
%copy = f32[1,1]{1,0} copy(f32[1,1]{1,0} %reshape.12), sharding={manual}
%custom-call.16 = f32[4,1]{1,0} custom-call(f32[1,1]{1,0} %copy), custom_call_target="SPMDShardToFullShape", sharding={devices=[4,1]<=[4]}
%reshape.17 = f32[4]{0} reshape(f32[4,1]{1,0} %custom-call.16), sharding={devices=[4]<=[4]}
%reshape.19 = f32[4]{0} reshape(f32[4]{0} %reshape.17), sharding={devices=[4]<=[4]}
ROOT %tuple.20 = (f32[4,4,8]{2,1,0}, f32[4]{0}) tuple(f32[4,4,8]{2,1,0} %reshape.18, f32[4]{0} %reshape.19), sharding={{replicated}, {replicated}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::DynamicUpdateSlice(
_, op::Shape("f32[1,4,8]"), _, _, _)),
op::AllReduce(op::DynamicUpdateSlice(
_, op::Shape("f32[1]"), _))));
}
TEST_P(SpmdPartitioningTest, UnevenPadAllToAllReshard) {
const char* const hlo_string = R"(
HloModule pjit_xmap_dummy.5
ENTRY %main.21 {
%Arg_0.1 = f32[19,19]{1,0} parameter(0), sharding={devices=[4,2]<=[8]}
%add.3171 = f32[19,19]{1,0} add(%Arg_0.1, %Arg_0.1), sharding={devices=[4,2]<=[8]}
%transpose.3172 = f32[19,19]{0,1} transpose(%add.3171), dimensions={1,0}, sharding={devices=[2,4]<=[4,2]T(1,0)}
ROOT %add.3173 = f32[19,19]{1,0} add(%add.3171, %transpose.3172), sharding={devices=[4,2]<=[8]}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
XLA_VLOG_LINES(1, module->ToString());
int64_t collective_permute_count = 0;
for (auto* i : module->entry_computation()->instructions()) {
if (i->opcode() == HloOpcode::kCollectivePermute) {
++collective_permute_count;
}
}
EXPECT_EQ(collective_permute_count, 1);
}
TEST_P(SpmdPartitioningTest, UnevenPadAllToAllReshard2) {
const char* const hlo_string = R"(
HloModule pjit_xmap_dummy.5
ENTRY %main.21 {
%Arg_0.1 = f32[5,5]{1,0} parameter(0), sharding={devices=[4,2]<=[8]}
add.3171 = f32[5,5]{1,0} add(Arg_0.1, Arg_0.1), sharding={devices=[4,2]<=[8]}
transpose.3172 = f32[5,5]{0,1} transpose(add.3171), dimensions={1,0}, sharding={devices=[2,4]<=[4,2]T(1,0)}
ROOT add.3173 = f32[5,5]{1,0} add(add.3171, transpose.3172), sharding={devices=[4,2]<=[8]}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
XLA_VLOG_LINES(1, module->ToString());
int64_t collective_permute_count = 0;
for (auto* i : module->entry_computation()->instructions()) {
if (i->opcode() == HloOpcode::kCollectivePermute) {
++collective_permute_count;
}
}
EXPECT_EQ(collective_permute_count, 3);
}
TEST_P(SpmdPartitioningTest, CustomCallShardingRegistration) {
class BatchableCustomCallPartitioner : public CustomCallPartitioner {
public:
HloSharding PropagateUserSharding(
const HloInstruction* instruction, const HloInstruction* user,
const HloSharding& sharding) const override {
return sharding;
}
std::optional<HloSharding> InferShardingFromOperands(
const HloInstruction* instruction) const override {
if (instruction->operand(0)->has_sharding()) {
return instruction->operand(0)->sharding();
}
return std::nullopt;
}
bool IsCustomCallShardable(
const HloInstruction* instruction) const override {
return true;
}
absl::Status Partition(spmd::SpmdPartitioningVisitor* partitioner,
HloInstruction* hlo) const override {
if (hlo->shape().rank() <= 2) {
return partitioner->DefaultAction(hlo);
}
const int first_non_batch_dim = hlo->shape().rank() - 2;
HloInstruction* operand = hlo->mutable_operand(0);
HloSharding target_sharding =
hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(
hlo->sharding(), {first_non_batch_dim, first_non_batch_dim + 1});
spmd::PartitionedHlo operand_partitioned =
partitioner->GetPartitionedHlo(operand).Reshard(target_sharding);
HloCustomCallInstruction* custom_call =
Cast<HloCustomCallInstruction>(hlo);
Shape partitioned_shape_with_layout_constraint =
operand_partitioned.hlo()->shape();
(*partitioned_shape_with_layout_constraint.mutable_layout()) =
custom_call->operand_shapes_with_layout()[0].layout();
HloInstruction* partitioned_hlo = partitioner->builder()->AddInstruction(
HloInstruction::CreateCustomCall(
operand_partitioned.hlo()->shape(), {operand_partitioned.hlo()},
"BatchableCustomCall",
{partitioned_shape_with_layout_constraint}));
partitioned_hlo->set_sharding(target_sharding);
spmd::PartitionedHlo result_partitioned =
spmd::PartitionedHlo(partitioned_hlo,
operand_partitioned.base_shape(),
operand_partitioned.state())
.Reshard(hlo->sharding());
partitioner->SetPartitionedHlo(hlo, result_partitioned);
return absl::OkStatus();
}
};
RegisterCustomCallPartitioner(
"BatchableCustomCall",
std::make_unique<BatchableCustomCallPartitioner>());
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%p = f32[102,128,128]{2,1,0:T(8,128)} parameter(0), sharding={devices=[2,1,2]<=[4]}
ROOT custom-call = f32[102,128,128]{2,1,0:T(8,128)} custom-call(p), custom_call_target="BatchableCustomCall", operand_layout_constraints={f32[102,128,128]{2,1,0}}, sharding={devices=[2,1,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::DynamicSlice(
AllOf(op::CustomCall(_), op::Shape("f32[51,128,128]")),
_, _, _));
}
TEST_P(SpmdPartitioningTest, ManualGetTupleElement) {
const char* const hlo_string = R"(
HloModule pjit
orclone {
lhs.1 = u32[] parameter(0)
rhs.1 = u32[] parameter(2)
or.2 = u32[] or(lhs.1, rhs.1)
lhs.0 = u32[] parameter(1)
rhs.0 = u32[] parameter(3)
or.3 = u32[] or(lhs.0, rhs.0)
ROOT tuple.4 = (u32[], u32[]) tuple(or.2, or.3)
}
ENTRY %main.21 {
select.104 = u32[2,2]{1,0} parameter(0), sharding={manual}
shift-left.5 = u32[2,2]{1,0} parameter(1), sharding={manual}
constant.4183 = u32[] constant(0), sharding={manual}
reduce.1 = (u32[2]{0}, u32[2]{0}) reduce(shift-left.5, select.104, constant.4183, constant.4183), dimensions={1}, sharding={{manual},{manual}}, to_apply=orclone
ROOT get-tuple-element.13 = u32[2]{0} get-tuple-element(reduce.1), index=0, sharding={manual}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::GetTupleElement(op::Reduce(_, _, _, _)));
}
TEST_P(SpmdPartitioningTest, CombiningScatterPartitiong) {
const char* const hlo_string = R"(
HloModule pjit
region_110.8267 {
Arg_0.8268 = bf16[] parameter(0)
Arg_1.8269 = bf16[] parameter(1)
ROOT add.8270 = bf16[] add(Arg_0.8268, Arg_1.8269)
}
ENTRY %main.21 {
broadcast.8659 = bf16[2,8,12288,192,64]{4,3,2,1,0} parameter(0), sharding={devices=[2,1,2,4,1]<=[16]}
reshape.9796 = bf16[2,1,12288,192,64]{4,3,2,1,0} parameter(1), sharding={devices=[2,1,2,4,1]<=[16]}
iota.50 = s32[2,1]{1,0} iota(), iota_dimension=0, sharding={devices=[2,1,8]<=[16] last_tile_dim_replicate}
constant.1585 = s32[] constant(0), sharding={replicated}
broadcast.3764 = s32[2,1]{1,0} broadcast(constant.1585), dimensions={}, sharding={devices=[2,1,8]<=[16] last_tile_dim_replicate}
reshape_idx = s32[2,1]{1,0} parameter(2), sharding={devices=[2,1,8]<=[16] last_tile_dim_replicate}
concatenate.8907 = s32[2,5]{1,0} concatenate(iota.50, reshape_idx, broadcast.3764, broadcast.3764, broadcast.3764), dimensions={1}, sharding={devices=[2,1,8]<=[16] last_tile_dim_replicate}
scatter.9797 = bf16[2,8,12288,192,64]{4,3,2,1,0} scatter(broadcast.8659, concatenate.8907, reshape.9796), update_window_dims={1,2,3,4}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0,1,2,3,4}, index_vector_dim=1, indices_are_sorted=true, unique_indices=true, to_apply=region_110.8267, sharding={devices=[2,1,2,4,1]<=[16]}
ROOT c = bf16[2,8,12288,192,64]{4,3,2,1,0} copy(scatter.9797), sharding={devices=[2,1,2,4,1]<=[16]}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 16));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Copy(AllOf(op::Shape("bf16[1,8,6144,48,64]"), op::Scatter(_, _, _))));
EXPECT_EQ(FindInstruction(module.get(), HloOpcode::kAllReduce), nullptr);
}
TEST_P(SpmdPartitioningTest, MatchOutputAlignmentNonContractingDot) {
const char* const hlo_string = R"(
HloModule pjit
ENTRY %main.21 {
multiply.3535 = f32[4,4]{1,0} parameter(0), sharding={devices=[2,4,2]0,8,1,9,2,10,3,11,4,12,5,13,6,14,7,15 last_tile_dim_replicate}
reshape.4221 = f32[4,4]{1,0} parameter(1), sharding={devices=[4,1,4]0,8,4,12,1,9,5,13,2,10,6,14,3,11,7,15 last_tile_dim_replicate}
dot.11597 = f32[4,4]{1,0} dot(multiply.3535, reshape.4221), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[2,1,8]0,8,1,9,2,10,3,11,4,12,5,13,6,14,7,15 last_tile_dim_replicate}
ROOT copy.1 = f32[4,4]{1,0} copy(dot.11597), sharding={devices=[2,1,8]0,8,1,9,2,10,3,11,4,12,5,13,6,14,7,15 last_tile_dim_replicate}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 16));
EXPECT_EQ(FindInstruction(module.get(), HloOpcode::kCollectivePermute),
nullptr);
}
TEST_P(SpmdPartitioningTest, ComplexReshardPartialMerging) {
const char* const hlo_string = R"(
HloModule pjit
ENTRY %main.21 {
multiply.3535 = f32[256,256,256]{2,1,0} parameter(0), sharding={devices=[2,1,2,2]<=[8] last_tile_dim_replicate}
ROOT copy.1 = f32[256,256,256]{2,1,0} copy(multiply.3535), sharding={devices=[1,2,1,4]<=[8] last_tile_dim_replicate}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_NE(FindInstruction(module.get(), HloOpcode::kAllToAll), nullptr);
}
TEST_P(SpmdPartitioningTest, PartialReshardingInfiniteLoops) {
const char* const hlo_string = R"(
HloModule pjit
ENTRY %main.21 {
multiply.3535 = f32[256,256,256]{2,1,0} parameter(0), sharding={devices=[4,1,1,2]<=[8] last_tile_dim_replicate}
ROOT copy.1 = f32[256,256,256]{2,1,0} copy(multiply.3535), sharding={devices=[2,2,1,2]<=[8] last_tile_dim_replicate}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
XLA_VLOG_LINES(1, module->ToString());
}
TEST_P(SpmdPartitioningTest, GatherCostModelForUnmatchedSharding) {
const char* const hlo_string = R"(
HloModule pjit
region_10.581.clone {
Arg_0.53 = bf16[] parameter(0)
Arg_1.53 = bf16[] parameter(1)
ROOT add.1294 = bf16[] add(Arg_0.53, Arg_1.53)
}
ENTRY %main.21 {
p0 = bf16[8192,128]{1,0} parameter(0), sharding={devices=[2,4,2]<=[2,4,2]T(2,1,0) last_tile_dim_replicate}
p1 = s32[16384,1]{1,0} parameter(1), sharding={devices=[8,1,2]<=[16] last_tile_dim_replicate}
gather.0 = bf16[16384,128]{1,0} gather(p0, p1), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,128}, sharding={devices=[8,2]<=[16]}
constant.2467 = bf16[] constant(0)
reduce.1749 = bf16[16384]{0} reduce(gather.0, constant.2467), dimensions={1}, to_apply=region_10.581.clone, sharding={devices=[8,2]<=[16] last_tile_dim_replicate}
ROOT copy.1 = bf16[16384]{0} copy(reduce.1749), sharding={devices=[8,2]<=[16] last_tile_dim_replicate}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 16));
XLA_VLOG_LINES(1, module->ToString());
auto* gather = FindInstruction(module.get(), HloOpcode::kGather);
EXPECT_NE(gather, nullptr);
EXPECT_THAT(gather, op::Shape("bf16[2048,64]"));
}
TEST_P(SpmdPartitioningTest, ScatterCostModelForUnmatchedSharding) {
const char* const hlo_string = R"(
HloModule pjit
%region_335.4575 {
%Arg_0.4576 = bf16[] parameter(0)
%Arg_1.4577 = bf16[] parameter(1)
ROOT %add.4578 = bf16[] add(%Arg_0.4576, %Arg_1.4577)
}
ENTRY %main.21 {
%p0 = bf16[8192,128]{1,0} parameter(0), sharding={devices=[2,4,2]<=[2,4,2]T(2,1,0) last_tile_dim_replicate}
%p1 = s32[32768,1]{1,0} parameter(1), sharding={devices=[8,1,2]<=[16] last_tile_dim_replicate}
%p2 = bf16[32768,128]{1,0} parameter(2), sharding={devices=[8,2]<=[16]}
%scatter.0 = bf16[8192,128]{1,0} scatter(%p0, %p1, %p2), update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%region_335.4575, sharding={devices=[2,4,2]<=[2,4,2]T(2,1,0) last_tile_dim_replicate}
ROOT %convert.427 = f32[8192,128]{1,0} convert(%scatter.0), sharding={devices=[2,4,2]<=[2,4,2]T(2,1,0) last_tile_dim_replicate}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 16));
XLA_VLOG_LINES(1, module->ToString());
auto* scatter = FindInstruction(module.get(), HloOpcode::kScatter);
EXPECT_NE(scatter, nullptr);
auto* updates = scatter->operand(2);
EXPECT_THAT(updates, op::Shape("bf16[4096,64]"));
}
TEST_P(SpmdPartitioningTest, ComplexReshardUnmerge) {
const char* const hlo_string = R"(
HloModule Test
ENTRY main.4 {
Arg_0.1 = f32[8,8,8,8]{3,2,1,0} parameter(0), sharding={devices=[1,1,2,8]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}
tuple.2 = (f32[8,8,8,8]{3,2,1,0}) tuple(Arg_0.1), sharding={{devices=[1,4,2,2]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}}
ROOT get-tuple-element.3 = f32[8,8,8,8]{3,2,1,0} get-tuple-element(tuple.2), index=0, sharding={devices=[1,4,2,2]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 16));
XLA_VLOG_LINES(1, module->ToString());
auto* allreduce = FindInstruction(module.get(), HloOpcode::kAllReduce);
EXPECT_EQ(allreduce, nullptr);
auto* alltoall = FindInstruction(module.get(), HloOpcode::kAllToAll);
EXPECT_NE(alltoall, nullptr);
}
TEST_P(SpmdPartitioningTest, ComplexReshardUnmergeToRight) {
const char* const hlo_string = R"(
HloModule Test
ENTRY main.4 {
Arg_0.1 = f32[8,32]{1,0} parameter(0), sharding={devices=[8,1]<=[4,2]T(1,0)}
tuple.2 = (f32[8,32]{1,0}) tuple(Arg_0.1), sharding={{devices=[2,4]<=[4,2]T(1,0)}}
ROOT get-tuple-element.3 = f32[8,32]{1,0} get-tuple-element(tuple.2), index=0, sharding={devices=[2,4]<=[4,2]T(1,0)}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
XLA_VLOG_LINES(1, module->ToString());
auto* allreduce = FindInstruction(module.get(), HloOpcode::kAllReduce);
EXPECT_EQ(allreduce, nullptr);
auto* alltoall = FindInstruction(module.get(), HloOpcode::kAllToAll);
EXPECT_NE(alltoall, nullptr);
}
TEST_P(SpmdPartitioningTest, ComplexReshardUnmergeToLeft) {
const char* const hlo_string = R"(
HloModule Test
ENTRY main.4 {
Arg_0.1 = f32[8,32]{1,0} parameter(0), sharding={devices=[1,8]<=[4,2]T(1,0)}
tuple.2 = (f32[8,32]{1,0}) tuple(Arg_0.1), sharding={{devices=[2,4]<=[4,2]T(1,0)}}
ROOT get-tuple-element.3 = f32[8,32]{1,0} get-tuple-element(tuple.2), index=0, sharding={devices=[2,4]<=[4,2]T(1,0)}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
XLA_VLOG_LINES(1, module->ToString());
auto* allreduce = FindInstruction(module.get(), HloOpcode::kAllReduce);
EXPECT_EQ(allreduce, nullptr);
auto* alltoall = FindInstruction(module.get(), HloOpcode::kAllToAll);
EXPECT_NE(alltoall, nullptr);
}
TEST_P(SpmdPartitioningTest, NoComplexReshardUnmergeToLeft) {
const char* const hlo_string = R"(
HloModule Test
ENTRY main.4 {
Arg_0.1 = f32[8,33]{1,0} parameter(0), sharding={devices=[1,8]<=[4,2]T(1,0)}
tuple.2 = (f32[8,33]{1,0}) tuple(Arg_0.1), sharding={{devices=[2,4]<=[4,2]T(1,0)}}
ROOT get-tuple-element.3 = f32[8,33]{1,0} get-tuple-element(tuple.2), index=0, sharding={devices=[2,4]<=[4,2]T(1,0)}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
XLA_VLOG_LINES(1, module->ToString());
auto* allreduce = FindInstruction(module.get(), HloOpcode::kAllReduce);
EXPECT_NE(allreduce, nullptr);
auto* alltoall = FindInstruction(module.get(), HloOpcode::kAllToAll);
EXPECT_EQ(alltoall, nullptr);
}
TEST_P(SpmdPartitioningTest, ReshardCrash) {
const char* const hlo_string = R"(
HloModule Test
ENTRY main.6 {
Arg_0.1 = f32[8,32,4] parameter(0), sharding={devices=[4,2,1]0,2,1,3,4,6,5,7}
ROOT copy = copy(Arg_0.1), sharding={devices=[2,2,2]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
XLA_VLOG_LINES(1, module->ToString());
auto* alltoall = FindInstruction(module.get(), HloOpcode::kAllToAll);
EXPECT_NE(alltoall, nullptr);
}
TEST_P(SpmdPartitioningTest, ReshardNoFullRematCompatible) {
const char* const hlo_string = R"(
HloModule Test
ENTRY main.6 {
Arg_0.1 = f32[6,32,4] parameter(0), sharding={devices=[4,2,1]0,2,1,3,4,6,5,7}
ROOT copy = copy(Arg_0.1), sharding={devices=[2,2,2]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
XLA_VLOG_LINES(1, module->ToString());
auto* allreduce = FindInstruction(module.get(), HloOpcode::kAllReduce);
EXPECT_NE(allreduce, nullptr);
EXPECT_EQ(allreduce->replica_groups().size(), 2);
EXPECT_EQ(FindInstruction(module.get(), HloOpcode::kCollectivePermute),
nullptr);
}
TEST_P(SpmdPartitioningTest, ReshardNoFullRematIncompatible) {
const char* const hlo_string = R"(
HloModule Test
ENTRY main.6 {
Arg_0.1 = f32[6,32,4] parameter(0), sharding={devices=[4,2,1]0,2,1,3,4,6,5,7}
ROOT copy = copy(Arg_0.1), sharding={devices=[2,2,2]0,1,3,4,2,6,5,7}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
XLA_VLOG_LINES(1, module->ToString());
auto* allreduce = FindInstruction(module.get(), HloOpcode::kAllReduce);
EXPECT_NE(allreduce, nullptr);
EXPECT_EQ(allreduce->replica_groups().size(), 2);
EXPECT_NE(FindInstruction(module.get(), HloOpcode::kCollectivePermute),
nullptr);
}
TEST_P(SpmdPartitioningTest, OutfeedChainedManualPartitioned) {
const char* const hlo_string = R"(
HloModule Test
ENTRY %entry (p0: f32[8], p1: f32[1]) -> (f32[1], token[]) {
%p1 = f32[1]{0} parameter(1), sharding={replicated}
%p0 = f32[8]{0} parameter(0), sharding={manual}
%tuple.1 = (f32[8]{0}) tuple(f32[8]{0} %p0), sharding={{manual}}
%constant.8 = u32[2]{0} constant({3, 12})
%tuple.10 = (u32[2]{0}) tuple(u32[2]{0} %constant.8), sharding={{manual}}
%aa.1 = token[] after-all()
%outfeed.1 = token[] outfeed((u32[2]{0}) %tuple.10, token[] %aa.1), outfeed_shape=(u32[2]{0}), sharding={{manual}, {manual}}
%outfeed.2 = token[] outfeed((f32[8]{0}) %tuple.1, token[] %outfeed.1), outfeed_shape=(f32[8]{0}), sharding={{manual}, {manual}}
ROOT %tuple.15 = (f32[1]{0}, token[]) tuple(f32[1]{0} %p1, token[] %outfeed.2), sharding={{replicated}, {manual}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
XLA_VLOG_LINES(1, module->ToString());
auto* outfeed = FindInstruction(module.get(), HloOpcode::kOutfeed);
EXPECT_NE(outfeed, nullptr);
EXPECT_THAT(outfeed->operand(0), op::Shape("(u32[2]{0})"));
}
TEST_P(SpmdPartitioningTest, PadUneven) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[128,13,257] parameter(0), sharding={devices=[1,2,1]0,1}
%const = f32[] constant(0)
ROOT %pad = f32[128,14,257] pad(%param0, %const), padding=0_0x0_1x0_0,
sharding={devices=[1,2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Select(), op::Shape("f32[128,7,257]")));
}
TEST_P(SpmdPartitioningTest, MatchOutputPartitioningForContractingRHS) {
absl::string_view hlo_string = R"(
HloModule extracted_module
ENTRY %extracted_computation {
%param = bf16[256,1,114688]{2,1,0} parameter(0)
%reshape.788 = bf16[256,114688]{1,0} reshape(bf16[256,1,114688]{2,1,0} %param), sharding={devices=[1,4,2]<=[2,4]T(1,0) last_tile_dim_replicate}
%param.1 = bf16[1,114688,14336]{2,1,0} parameter(1)
%reshape.747 = bf16[114688,14336]{1,0} reshape(bf16[1,114688,14336]{2,1,0} %param.1), sharding={devices=[4,2]<=[2,4]T(1,0)}
%dot.89 = bf16[256,14336]{1,0} dot(bf16[256,114688]{1,0} %reshape.788, bf16[114688,14336]{1,0} %reshape.747), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[1,8]<=[8]}
%reshape.789 = bf16[256,1,14336]{2,1,0} reshape(bf16[256,14336]{1,0} %dot.89), sharding={devices=[1,1,8]<=[8]}
ROOT %copy = bf16[256,1,14336]{2,1,0} copy(bf16[256,1,14336]{2,1,0} %reshape.789)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto* dot = FindInstruction(module.get(), HloOpcode::kDot);
EXPECT_NE(dot, nullptr);
EXPECT_NE(dot->operand(1)->opcode(), HloOpcode::kAllReduce);
}
TEST_P(SpmdPartitioningTest, MatchOutputPartitioningForContractingLHS) {
absl::string_view hlo_string = R"(
HloModule extracted_module
ENTRY %extracted_computation {
%param = bf16[256,1,114688]{2,1,0} parameter(0)
%reshape.788 = bf16[256,114688]{1,0} reshape(bf16[256,1,114688]{2,1,0} %param), sharding={devices=[2,4]<=[8]}
%param.1 = bf16[1,114688,14336]{2,1,0} parameter(1)
%reshape.747 = bf16[114688,14336]{1,0} reshape(bf16[1,114688,14336]{2,1,0} %param.1), sharding={devices=[4,1,2]<=[2,4]T(1,0) last_tile_dim_replicate}
%dot.89 = bf16[256,14336]{1,0} dot(bf16[256,114688]{1,0} %reshape.788, bf16[114688,14336]{1,0} %reshape.747), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[8,1]<=[8]}
%reshape.789 = bf16[256,1,14336]{2,1,0} reshape(bf16[256,14336]{1,0} %dot.89), sharding={devices=[8,1,1]<=[8]}
ROOT %copy = bf16[256,1,14336]{2,1,0} copy(bf16[256,1,14336]{2,1,0} %reshape.789)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto* dot = FindInstruction(module.get(), HloOpcode::kDot);
EXPECT_NE(dot, nullptr);
EXPECT_NE(dot->operand(0)->opcode(), HloOpcode::kAllReduce);
}
TEST_P(SpmdPartitioningTest, TopKCustomCallTopKDimSharded) {
absl::string_view hlo_string = R"(
HloModule module
region_695.22546 {
Arg_2.22549 = s32[] parameter(2)
Arg_3.22550 = s32[] parameter(3)
Arg_0.22547 = bf16[] parameter(0)
Arg_1.22548 = bf16[] parameter(1)
ROOT compare.22551 = pred[] compare(Arg_0.22547, Arg_1.22548), direction=GT, type=TOTALORDER
}
ENTRY %entry {
%multiply.43401 = bf16[64,256000]{1,0} parameter(0), sharding={devices=[1,2]0,1}
%custom-call = (bf16[64,40]{1,0}, s32[64,40]{1,0}) custom-call(bf16[64,256000]{1,0} %multiply.43401), custom_call_target="TopK", called_computations={%region_695.22546}, sharding={{devices=[1,2]0,1}, {devices=[1,2]0,1}}
%get-tuple-element.336 = bf16[64,40]{1,0} get-tuple-element((bf16[64,40]{1,0}, s32[64,40]{1,0}) %custom-call), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto sort_instruction = FindInstruction(module.get(), HloOpcode::kSort);
EXPECT_THAT(sort_instruction,
op::Shape("(bf16[64,80]{1,0}, s32[64,80]{1,0})"));
auto topk_instruction = FindInstruction(module.get(), HloOpcode::kCustomCall);
auto topk_operand = topk_instruction->operand(0);
EXPECT_EQ(topk_instruction->custom_call_target(), "TopK");
EXPECT_THAT(topk_instruction,
op::Shape("(bf16[64,40]{1,0}, s32[64,40]{1,0})"));
EXPECT_THAT(topk_operand, op::Shape("bf16[64,128000]{1,0}"));
}
TEST_P(SpmdPartitioningTest, TopKCustomCallNonTopKDimSharded) {
absl::string_view hlo_string = R"(
HloModule module
region_695.22546 {
Arg_2.22549 = s32[] parameter(2)
Arg_3.22550 = s32[] parameter(3)
Arg_0.22547 = bf16[] parameter(0)
Arg_1.22548 = bf16[] parameter(1)
ROOT compare.22551 = pred[] compare(Arg_0.22547, Arg_1.22548), direction=GT, type=TOTALORDER
}
ENTRY %entry {
%multiply.43401 = bf16[64,256000]{1,0} parameter(0), sharding={devices=[2,1]0,1}
%custom-call = (bf16[64,40]{1,0}, s32[64,40]{1,0}) custom-call(bf16[64,256000]{1,0} %multiply.43401), custom_call_target="TopK", called_computations={%region_695.22546}, sharding={{devices=[1,2]0,1}, {devices=[2,1]0,1}}
%get-tuple-element.336 = bf16[64,40]{1,0} get-tuple-element((bf16[64,40]{1,0}, s32[64,40]{1,0}) %custom-call), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto sort_instruction = FindInstruction(module.get(), HloOpcode::kSort);
CHECK_NE(sort_instruction, nullptr);
auto topk_instruction = FindInstruction(module.get(), HloOpcode::kCustomCall);
auto topk_operand = topk_instruction->operand(0);
EXPECT_EQ(topk_instruction->custom_call_target(), "TopK");
EXPECT_THAT(topk_instruction,
op::Shape("(bf16[32,40]{1,0}, s32[32,40]{1,0})"));
EXPECT_THAT(topk_operand, op::Shape("bf16[32,256000]{1,0}"));
}
TEST_P(SpmdPartitioningTest,
TopKCustomCallTopkReplicatedOperandNonTopKDimSharded) {
absl::string_view hlo_string = R"(
HloModule module
region_695.22546 {
Arg_2.22549 = s32[] parameter(2)
Arg_3.22550 = s32[] parameter(3)
Arg_0.22547 = bf16[] parameter(0)
Arg_1.22548 = bf16[] parameter(1)
ROOT compare.22551 = pred[] compare(Arg_0.22547, Arg_1.22548), direction=GT, type=TOTALORDER
}
ENTRY %entry {
%multiply.43401 = bf16[64,256000]{1,0} parameter(0), sharding={devices=[2,1]0,1}
%custom-call = (bf16[64,40]{1,0}, s32[64,40]{1,0}) custom-call(bf16[64,256000]{1,0} %multiply.43401), custom_call_target="TopK", called_computations={%region_695.22546}, sharding={{replicated}, {replicated}}
%get-tuple-element.336 = bf16[64,40]{1,0} get-tuple-element((bf16[64,40]{1,0}, s32[64,40]{1,0}) %custom-call), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto sort_instruction = FindInstruction(module.get(), HloOpcode::kSort);
EXPECT_THAT(sort_instruction,
op::Shape("(bf16[32,40]{1,0}, s32[32,40]{1,0})"));
auto topk_instruction = FindInstruction(module.get(), HloOpcode::kCustomCall);
auto topk_operand = topk_instruction->operand(0);
EXPECT_EQ(topk_instruction->custom_call_target(), "TopK");
EXPECT_THAT(topk_instruction,
op::Shape("(bf16[32,40]{1,0}, s32[32,40]{1,0})"));
EXPECT_THAT(topk_operand, op::Shape("bf16[32,256000]{1,0}"));
}
TEST_P(SpmdPartitioningTest,
TopKCustomCallTopkReplicatedOperandTopKDimSharded) {
absl::string_view hlo_string = R"(
HloModule module
region_695.22546 {
Arg_2.22549 = s32[] parameter(2)
Arg_3.22550 = s32[] parameter(3)
Arg_0.22547 = bf16[] parameter(0)
Arg_1.22548 = bf16[] parameter(1)
ROOT compare.22551 = pred[] compare(Arg_0.22547, Arg_1.22548), direction=GT, type=TOTALORDER
}
ENTRY %entry {
%multiply.43401 = bf16[64,256000]{1,0} parameter(0), sharding={devices=[1,2]0,1}
%custom-call = (bf16[64,40]{1,0}, s32[64,40]{1,0}) custom-call(bf16[64,256000]{1,0} %multiply.43401), custom_call_target="TopK", called_computations={%region_695.22546}, sharding={{replicated}, {replicated}}
%get-tuple-element.336 = bf16[64,40]{1,0} get-tuple-element((bf16[64,40]{1,0}, s32[64,40]{1,0}) %custom-call), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto sort_instruction = FindInstruction(module.get(), HloOpcode::kSort);
EXPECT_THAT(sort_instruction,
op::Shape("(bf16[64,80]{1,0}, s32[64,80]{1,0})"));
auto topk_instruction = FindInstruction(module.get(), HloOpcode::kCustomCall);
auto topk_operand = topk_instruction->operand(0);
EXPECT_EQ(topk_instruction->custom_call_target(), "TopK");
EXPECT_THAT(topk_instruction,
op::Shape("(bf16[64,40]{1,0}, s32[64,40]{1,0})"));
EXPECT_THAT(topk_operand, op::Shape("bf16[64,128000]{1,0}"));
}
TEST_P(SpmdPartitioningTest, TopKCustomCallManualSharding) {
absl::string_view hlo_string = R"(
HloModule module
region {
Arg_2.22549 = s32[] parameter(2)
Arg_3.22550 = s32[] parameter(3)
Arg_0.22547 = bf16[] parameter(0)
Arg_1.22548 = bf16[] parameter(1)
ROOT compare.22551 = pred[] compare(Arg_0.22547, Arg_1.22548), direction=GT, type=TOTALORDER
}
ENTRY %entry {
%p0 = bf16[64,256000]{1,0} parameter(0), sharding={manual}
%custom-call = (bf16[64,40]{1,0}, s32[64,40]{1,0}) custom-call(bf16[64,256000]{1,0} %p0), custom_call_target="TopK", called_computations={%region}, sharding={{manual}, {manual}}
%get-tuple-element.336 = bf16[64,40]{1,0} get-tuple-element((bf16[64,40]{1,0}, s32[64,40]{1,0}) %custom-call), index=0, sharding={manual}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
EXPECT_EQ(FindInstruction(module.get(), HloOpcode::kSort), nullptr);
auto topk_instruction = FindInstruction(module.get(), HloOpcode::kCustomCall);
EXPECT_EQ(topk_instruction->custom_call_target(), "TopK");
EXPECT_THAT(topk_instruction->operand(0), op::Shape("bf16[64,256000]{1,0}"));
EXPECT_THAT(topk_instruction,
op::Shape("(bf16[64,40]{1,0}, s32[64,40]{1,0})"));
}
TEST_P(SpmdPartitioningTest, WindowedEinsumShouldMatchLhs_b305313406) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %entry {
%copy.11 = bf16[64,2048,20480]{2,1,0} parameter(0), sharding={devices=[8,1,4]<=[32]}
%reshape.44 = bf16[20480,65536]{1,0} parameter(1), sharding={devices=[4,4,2]0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23,8,24,9,25,10,26,11,27,12,28,13,29,14,30,15,31 last_tile_dim_replicate}
ROOT %dot.339 = bf16[64,2048,65536]{2,1,0} dot(bf16[64,2048,20480]{2,1,0} %copy.11, bf16[20480,65536]{1,0} %reshape.44), lhs_contracting_dims={2}, rhs_contracting_dims={0}, sharding={devices=[8,1,4]<=[32]}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 32,
true,
true,
false,
true,
-1));
XLA_VLOG_LINES(1, module->ToString());
const auto collective_permute =
AllOf(op::CollectivePermute(), op::Shape("bf16[8,2048,1,5120]"));
const auto broadcast =
AllOf(op::Broadcast(), op::Shape("bf16[8,2048,16384]"));
const auto all_reduce =
AllOf(op::AllReduce(), op::Shape("bf16[20480,16384]"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::GetTupleElement(op::While(op::Tuple(
op::Reshape(), all_reduce, op::Broadcast(),
collective_permute, op::Constant()))),
op::Shape("bf16[8,2048,16384]")));
}
TEST_P(SpmdPartitioningTest, ComplexReshapeReshard) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %extracted_computation (param: f32[13,128,312,16,312]) -> f32[13,39936,4992] {
%param = f32[13,128,312,16,312]{4,2,3,1,0} parameter(0)
%copy.1261 = f32[13,128,312,16,312]{4,3,2,1,0} copy(f32[13,128,312,16,312]{4,2,3,1,0} %param), sharding={devices=[1,32,1,2,1,2]<=[2,64]T(1,0) last_tile_dim_replicate}
%reshape.27217 = f32[13,39936,4992]{2,1,0} reshape(f32[13,128,312,16,312]{4,3,2,1,0} %copy.1261), sharding={devices=[1,2,32,2]<=[2,32,2]T(2,1,0) last_tile_dim_replicate}
%copy.1260 = f32[13,39936,4992]{2,1,0} copy(f32[13,39936,4992]{2,1,0} %reshape.27217), sharding={devices=[1,2,32,2]<=[2,32,2]T(2,1,0) last_tile_dim_replicate}
ROOT %copy = f32[13,39936,4992]{2,1,0} copy(f32[13,39936,4992]{2,1,0} %copy.1260)
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 128,
true,
true,
false,
true,
-1));
XLA_VLOG_LINES(1, module->ToString());
auto all_to_all = FindInstruction(module.get(), HloOpcode::kAllToAll);
EXPECT_NE(all_to_all, nullptr);
}
TEST_P(SpmdPartitioningTest, SortAllGatherNonMovableDimension) {
const char* const hlo_string = R"(
HloModule module
top_k_gt_f32_comparator_64.35303 {
Arg_2.35306 = s32[] parameter(2)
Arg_3.35307 = s32[] parameter(3)
Arg_0.35304 = f32[] parameter(0)
Arg_1.35305 = f32[] parameter(1)
ROOT compare.35308 = pred[] compare(Arg_0.35304, Arg_1.35305), direction=GT
}
ENTRY entry {
param.0 = f32[4,16384,4096]{2,1,0} parameter(0), sharding={devices=[4,4,4]<=[64]}
param.1 = s32[4,16384,4096]{2,1,0} parameter(1), sharding={devices=[4,4,4]<=[64]}
ROOT sort.209 = (f32[4,16384,4096]{2,1,0}, s32[4,16384,4096]{2,1,0}) sort(param.0, param.1), dimensions={2}, to_apply=top_k_gt_f32_comparator_64.35303, sharding={{devices=[4,4,4]<=[64]}, {devices=[4,4,4]<=[64]}}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(
hlo_string, 64,
true,
true));
XLA_VLOG_LINES(1, module->ToString());
auto* root = module->entry_computation()->root_instruction();
auto* sort = FindInstruction(module.get(), HloOpcode::kSort);
EXPECT_THAT(
root,
AllOf(op::Tuple(),
op::Shape("(f32[1,4096,1024]{2,1,0}, s32[1,4096,1024]{2,1,0})")));
EXPECT_THAT(
sort,
AllOf(op::Sort(
AllOf(op::AllReduce(), op::Shape("f32[1,4096,4096]{2,1,0}")),
AllOf(op::AllReduce(), op::Shape("s32[1,4096,4096]{2,1,0}"))),
op::Shape("(f32[1,4096,4096]{2,1,0}, s32[1,4096,4096]{2,1,0})")));
}
TEST_P(SpmdPartitioningTest, PartitionOffloading) {
const char* const hlo_string = R"(
HloModule module, entry_computation_layout={(f32[1,256,128]{2,1,0})->f32[1,256,128]{2,1,0}}
ENTRY offloading (param0: f32[1,256,128]) -> f32[1,256,128] {
zero = f32[] constant(0), sharding={replicated}
broadcast = f32[256,256,128]{2,1,0} broadcast(zero), dimensions={}, sharding={devices=[1,1,4]0,1,2,3}
param0 = f32[1,256,128]{2,1,0} parameter(0), sharding={devices=[1,1,4]0,1,2,3}
move-to-host = f32[1,256,128]{2,1,0} custom-call(param0), custom_call_target="MoveToHost", sharding={devices=[1,1,4]0,1,2,3}
izero = s32[] constant(0)
dynamic-update-slice = f32[256,256,128]{2,1,0} dynamic-update-slice(broadcast, move-to-host, izero, izero, izero), sharding={devices=[1,1,4]0,1,2,3}
dynamic-slice = f32[1,256,128]{2,1,0} dynamic-slice(dynamic-update-slice, izero, izero, izero), dynamic_slice_sizes={1,256,128}, sharding={devices=[1,1,4]0,1,2,3}
move-to-device = f32[1,256,128]{2,1,0} custom-call(dynamic-slice), custom_call_target="MoveToDevice", sharding={devices=[1,4,1]0,1,2,3}
ROOT copy = f32[1,256,128]{2,1,0} copy(move-to-device), sharding={devices=[1,4,1]0,1,2,3}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(
hlo_string, 4,
true,
true));
XLA_VLOG_LINES(1, module->ToString());
auto move_to_host = FindInstruction(module.get(), "move-to-host.1");
auto move_to_device = FindInstruction(module.get(), "move-to-device.1");
EXPECT_EQ(
FindInstruction(module.get(), HloOpcode::kDynamicUpdateSlice)->operand(1),
move_to_host);
EXPECT_EQ(move_to_device->operand(0)->opcode(), HloOpcode::kDynamicSlice);
EXPECT_THAT(move_to_host, op::Shape("f32[1,256,32]"));
EXPECT_THAT(move_to_device, op::Shape("f32[1,256,32]"));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/spmd_partitioner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/spmd_partitioner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1d84fa11-8082-48bf-843b-b2081373cfdd | cpp | tensorflow/tensorflow | coalescing_analysis | third_party/xla/xla/service/gpu/model/coalescing_analysis.cc | third_party/xla/xla/service/gpu/model/coalescing_analysis_test.cc | #include "xla/service/gpu/model/coalescing_analysis.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <cstdlib>
#include <optional>
#include <stack>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/MathExtras.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Support/LLVM.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/model/affine_map_evaluator.h"
#include "xla/service/gpu/model/indexing_analysis.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/gpu/model/tiled_hlo_instruction.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
bool IsReadCoalescedHeuristic(HloFusionAnalysis::EmitterFusionKind fusion_kind,
const HloInstruction* producer,
const HloInstruction* consumer) {
if (fusion_kind != HloFusionAnalysis::EmitterFusionKind::kTranspose) {
auto is_broadcast = [&](const HloInstruction* instr) {
while (true) {
if (instr->opcode() == HloOpcode::kBroadcast ||
instr->opcode() == HloOpcode::kIota) {
return true;
}
if (instr->operand_count() != 1) return false;
if (instr->opcode() != HloOpcode::kBitcast && !instr->IsElementwise()) {
return false;
}
instr = instr->operand(0);
}
};
auto is_bad_transpose = [&](const HloInstruction* instr) {
if (instr->opcode() == HloOpcode::kFusion) {
for (auto* instr : instr->fused_instructions()) {
if (TransposesMinorDimension(instr) &&
!is_broadcast(instr->operand(0))) {
return true;
}
}
return false;
}
return TransposesMinorDimension(instr) &&
!is_broadcast(instr->operand(0));
};
if (is_bad_transpose(producer)) return false;
if (consumer && is_bad_transpose(consumer)) return false;
}
if (fusion_kind == HloFusionAnalysis::EmitterFusionKind::kReduction &&
IsInputFusibleReduction(*producer) && consumer &&
IsInputFusibleReduction(*consumer)) {
return false;
}
return true;
}
bool IsTiledReadCoalescedHeuristic(const TiledHloInstruction& operand,
const se::DeviceDescription& device_info) {
const Shape& shape = operand.hlo()->shape();
int64_t contiguous_read_elements = 1;
for (const auto dim_idx : shape.layout().minor_to_major()) {
if (operand.tile_stride(dim_idx) != 1) {
break;
}
int64_t tile_size = operand.tile_size(dim_idx);
int64_t dim_size = shape.dimensions(dim_idx);
contiguous_read_elements *= std::min(tile_size, dim_size);
if (tile_size < dim_size) {
break;
}
}
int64_t contiguous_bytes_accessed =
contiguous_read_elements *
ShapeUtil::ByteSizeOfPrimitiveType(operand.hlo()->shape().element_type());
return contiguous_bytes_accessed >=
device_info.dram_to_l2_transaction_size_bytes();
}
namespace {
using ::mlir::AffineBinaryOpExpr;
using ::mlir::AffineConstantExpr;
using ::mlir::AffineExpr;
using ::mlir::AffineExprKind;
using ::mlir::AffineMap;
using ::mlir::getAffineConstantExpr;
using ::mlir::MLIRContext;
bool EstimateCoalescingViaMemoryTransactionsCount(
absl::Span<const Interval> intervals, PrimitiveType element_type) {
constexpr int64_t kBytesPerMemoryTransaction = 128;
int64_t type_size = ShapeUtil::ByteSizeOfPrimitiveType(element_type);
int memory_transactions = 0;
int total_num_elements = 0;
for (const auto& range : intervals) {
int64_t num_elements = range.upper - range.lower + 1;
memory_transactions += llvm::divideCeilSigned(num_elements * type_size,
kBytesPerMemoryTransaction);
total_num_elements += num_elements;
}
if (memory_transactions == 0) {
return true;
}
int memory_transactions_lower_bound = llvm::divideCeilSigned(
total_num_elements * type_size, kBytesPerMemoryTransaction);
constexpr float kIsCoalescedThreshold = 0.9;
return memory_transactions_lower_bound >
memory_transactions * kIsCoalescedThreshold;
}
Shape GetLinearizedShape(const Shape& shape) {
if (shape.rank() == 0) {
return shape;
}
std::vector<int64_t> dims{ShapeUtil::ElementsIn(shape)};
auto result = Shape(shape.element_type(), dims,
absl::InlinedVector<bool, 4>(dims.size(), false), {});
*result.mutable_layout() = xla::Layout({0});
return result;
}
std::optional<GroupedByOpIndexingMap> GetThreadIdToInputMemoryLayoutsMaps(
const HloFusionAdaptor& fusion_adaptor,
absl::Span<const HloInstruction* const> operands,
const HloFusionAnalysis& fusion_analysis,
KernelFusionInterface* fusion_interface, MLIRContext* mlir_context) {
GroupedByOpIndexingMap result;
for (const auto& [root_index, hero] :
llvm::enumerate(fusion_analysis.fusion_heroes())) {
for (const auto& [hero_operand_index, hero_operand] :
llvm::enumerate(hero.GetOperands())) {
if (hero_operand.shape().rank() == 0) {
continue;
}
std::optional<IndexingMap> thread_id_to_hero_operand_map =
fusion_interface->ComputeThreadIdToInputIndexing(
root_index, hero_operand_index, mlir_context);
if (!thread_id_to_hero_operand_map.has_value()) {
return std::nullopt;
}
GroupedByOpIndexingMap instr_indexing_keyed_by_operands =
ComputeGroupedOutputToInputIndexing(fusion_adaptor, hero_operand,
mlir_context);
for (const HloInstruction* operand : operands) {
auto operand_indexing_maps_it =
instr_indexing_keyed_by_operands.find(operand);
if (operand_indexing_maps_it ==
instr_indexing_keyed_by_operands.end()) {
continue;
}
const Shape& operand_shape = operand->shape();
IndexingMap operand_logical_to_physical_map =
GetIndexingMapFromLogicalToPhysicalLayout(operand_shape,
mlir_context);
IndexingMap operand_physical_to_linearized_shape = GetBitcastMap(
ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(
operand_shape),
GetLinearizedShape(operand_shape), mlir_context);
IndexingMap operand_logical_to_linearized_physical_shape =
operand_logical_to_physical_map *
operand_physical_to_linearized_shape;
operand_logical_to_linearized_physical_shape.Simplify();
for (const IndexingMap& operand_indexing_map :
operand_indexing_maps_it->second) {
if (operand_indexing_map.IsUndefined()) {
result[operand] = {operand_indexing_map};
break;
}
IndexingMap logical_output_to_linearized_physical_input_map =
operand_indexing_map *
operand_logical_to_linearized_physical_shape;
IndexingMap thread_id_to_linearized_physical_input_map =
*thread_id_to_hero_operand_map *
logical_output_to_linearized_physical_input_map;
thread_id_to_linearized_physical_input_map.Simplify();
result[operand].insert(thread_id_to_linearized_physical_input_map);
}
}
}
}
return result;
}
void AssignValuesToRTVars(IndexingMap* indexing_map) {
if (indexing_map->GetRTVarsCount() == 0) {
return;
}
MLIRContext* mlir_context = indexing_map->GetMLIRContext();
llvm::SmallVector<AffineExpr, 2> symbol_replacements;
for (int64_t symbol_id = 0; symbol_id < indexing_map->GetRangeVarsCount();
++symbol_id) {
symbol_replacements.push_back(
mlir::getAffineSymbolExpr(symbol_id, mlir_context));
}
for (const IndexingMap::Variable& rt_var : indexing_map->GetRTVars()) {
symbol_replacements.push_back(getAffineConstantExpr(
(rt_var.bounds.lower + rt_var.bounds.upper) / 2, mlir_context));
}
AffineMap thread_x_to_input_no_dim_symbols =
indexing_map->GetAffineMap().replaceDimsAndSymbols(
{}, symbol_replacements, indexing_map->GetDimVarsCount(),
indexing_map->GetRangeVarsCount());
*indexing_map = IndexingMap{thread_x_to_input_no_dim_symbols,
indexing_map->GetDimVars(),
indexing_map->GetRangeVars(),
{}};
indexing_map->Simplify();
indexing_map->RemoveUnusedSymbols();
}
void AssignValuesToOuterLoopIVs(IndexingMap* indexing_map) {
if (indexing_map->GetRangeVarsCount() <= 1) {
return;
}
MLIRContext* mlir_context = indexing_map->GetMLIRContext();
llvm::SmallVector<AffineExpr, 2> symbol_replacements;
for (int64_t symbol_id = 0; symbol_id < indexing_map->GetRangeVarsCount() - 1;
++symbol_id) {
symbol_replacements.push_back(getAffineConstantExpr(
indexing_map->GetRangeVar(symbol_id).bounds.lower, mlir_context));
}
symbol_replacements.push_back(mlir::getAffineSymbolExpr(0, mlir_context));
AffineMap thread_x_to_input_no_dim_symbols =
indexing_map->GetAffineMap().replaceDimsAndSymbols(
{}, symbol_replacements, indexing_map->GetDimVarsCount(), 1);
*indexing_map = IndexingMap{thread_x_to_input_no_dim_symbols,
indexing_map->GetDimVars(),
{indexing_map->GetRangeVars().back()},
{}};
indexing_map->Simplify();
indexing_map->RemoveUnusedSymbols();
}
struct PartitionedExpr {
explicit PartitionedExpr(MLIRContext* mlir_context) {
AffineExpr zero = getAffineConstantExpr(0, mlir_context);
func_of_d0 = zero;
func_of_s0 = zero;
}
AffineExpr func_of_d0;
AffineExpr func_of_s0;
};
std::optional<PartitionedExpr> Partition(AffineExpr expr) {
PartitionedExpr result(expr.getContext());
std::vector<AffineExpr> summands;
std::stack<AffineExpr> dfs;
dfs.push(expr);
while (!dfs.empty()) {
auto top = dfs.top();
dfs.pop();
auto sum = mlir::dyn_cast<AffineBinaryOpExpr>(top);
if (sum && sum.getKind() == AffineExprKind::Add) {
dfs.push(sum.getLHS());
dfs.push(sum.getRHS());
continue;
}
bool depends_on_thread_x = top.isFunctionOfDim(0);
bool depends_on_range = top.isFunctionOfSymbol(0);
if (depends_on_thread_x && depends_on_range) {
return std::nullopt;
}
if (depends_on_thread_x) {
result.func_of_d0 = top + result.func_of_d0;
}
if (depends_on_range) {
result.func_of_s0 = top + result.func_of_s0;
}
}
return result;
}
void FindAllIndices(AffineExpr expr, int dim_id, int symbol_id,
const std::vector<Interval>& dimension_ranges,
const std::vector<Interval>& symbol_ranges,
std::vector<int64_t>* dimensions,
std::vector<int64_t>* symbols,
std::vector<int64_t>* indices) {
if (dim_id < dimension_ranges.size()) {
Interval dim_range = dimension_ranges[dim_id];
for (int64_t dim_value = dim_range.lower; dim_value <= dim_range.upper;
++dim_value) {
dimensions->push_back(dim_value);
FindAllIndices(expr, dim_id + 1, symbol_id, dimension_ranges,
symbol_ranges, dimensions, symbols, indices);
dimensions->pop_back();
}
return;
}
if (symbol_id < symbol_ranges.size()) {
Interval symbol_range = symbol_ranges[symbol_id];
for (int64_t symbol_value = symbol_range.lower;
symbol_value <= symbol_range.upper; ++symbol_value) {
symbols->push_back(symbol_value);
FindAllIndices(expr, dim_id, symbol_id + 1, dimension_ranges,
symbol_ranges, dimensions, symbols, indices);
symbols->pop_back();
}
return;
}
indices->push_back(EvaluateAffineExpr(expr, *dimensions, *symbols));
}
std::vector<Interval> FindIntervals(
AffineExpr expr, const std::vector<Interval>& dimension_ranges,
const std::vector<Interval>& symbol_ranges = {}) {
std::vector<int64_t> dimensions, symbols;
std::vector<int64_t> linear_indices;
FindAllIndices(expr, 0, 0, dimension_ranges, symbol_ranges, &dimensions,
&symbols, &linear_indices);
std::sort(linear_indices.begin(), linear_indices.end());
linear_indices.erase(
std::unique(linear_indices.begin(), linear_indices.end()),
linear_indices.end());
std::vector<Interval> intervals;
for (int i = 0, start, end; i < linear_indices.size();) {
start = linear_indices[i++];
end = start;
while (i < linear_indices.size() && linear_indices[i] == end + 1) {
++end;
++i;
}
intervals.push_back(Interval{start, end});
}
return intervals;
}
std::vector<Interval> ExtendIntervals(const std::vector<Interval>& intervals,
int64_t length) {
std::vector<Interval> overlapped_intervals;
for (int i = 0; i < intervals.size();) {
int64_t lower = intervals[i].lower;
int64_t upper = intervals[i].upper + length;
++i;
while (i < intervals.size() && upper >= intervals[i].lower - 1) {
upper = std::max(upper, intervals[i].upper + length);
++i;
}
overlapped_intervals.push_back(Interval{lower, upper});
}
return overlapped_intervals;
}
std::vector<Interval> FindContiguousIntervals(
const PartitionedExpr& partitioned_expr, const IndexingMap& indexing_map) {
constexpr int64_t kNumThreadsPerWarp = 32;
MLIRContext* mlir_context = indexing_map.GetMLIRContext();
AffineExpr thread_x = mlir::getAffineDimExpr(0, mlir_context);
AffineExpr range = mlir::getAffineSymbolExpr(0, mlir_context);
if (partitioned_expr.func_of_d0 == thread_x) {
return {Interval{0, kNumThreadsPerWarp - 1}};
}
if (auto mul =
mlir::dyn_cast<AffineBinaryOpExpr>(partitioned_expr.func_of_d0);
mul && mul.getKind() == AffineExprKind::Mul) {
if (auto multiplier = mlir::dyn_cast<AffineConstantExpr>(mul.getRHS());
multiplier) {
if (multiplier.getValue() == -1) {
return {Interval{0, kNumThreadsPerWarp - 1}};
}
if (partitioned_expr.func_of_s0 == range) {
Interval range_interval = indexing_map.GetSymbolBound(0);
int64_t num_elems = range_interval.GetLoopTripCount();
if (num_elems >= std::abs(multiplier.getValue())) {
return {Interval{0, multiplier.getValue() * (kNumThreadsPerWarp - 1) +
num_elems - 1}};
}
std::vector<Interval> intervals;
for (int i = 0, dm = 0; i < kNumThreadsPerWarp;
++i, dm += multiplier.getValue()) {
intervals.push_back(
{range_interval.lower + dm, range_interval.upper + dm});
}
return intervals;
}
std::vector<Interval> intervals;
for (int i = 0, dm = 0; i < kNumThreadsPerWarp;
++i, dm += multiplier.getValue()) {
intervals.push_back({dm, dm});
}
return intervals;
}
}
auto intervals = FindIntervals(partitioned_expr.func_of_d0,
{indexing_map.GetDimVars(0).bounds});
if (partitioned_expr.func_of_s0 != range) {
return intervals;
}
Interval range_interval = indexing_map.GetSymbolBound(0);
return ExtendIntervals(intervals, range_interval.GetLoopTripCount() - 1);
}
bool IsIndexingCoalesced(IndexingMap& thread_x_to_linearized_input,
PrimitiveType element_type) {
if (thread_x_to_linearized_input.IsUndefined()) {
return false;
}
if (thread_x_to_linearized_input.GetAffineMap().getNumResults() == 0) {
return true;
}
AssignValuesToRTVars(&thread_x_to_linearized_input);
MLIRContext* mlir_context = thread_x_to_linearized_input.GetMLIRContext();
AffineExpr thread_x_dim = mlir::getAffineDimExpr(
KernelFusionInterface::kIndexingMapThreadIdxDims[0], mlir_context);
AffineExpr c0 = getAffineConstantExpr(0, mlir_context);
IndexingMap thread_x_first_32_elements{
AffineMap::get(1, 0, {thread_x_dim, c0, c0, c0, c0, c0}, mlir_context),
{IndexingMap::Variable{{0, 31}}},
{},
{}};
IndexingMap thread_x_to_input_sample =
thread_x_first_32_elements * thread_x_to_linearized_input;
thread_x_to_input_sample.Simplify();
thread_x_to_input_sample.RescaleSymbols();
thread_x_to_input_sample.RemoveUnusedSymbols();
if (thread_x_to_input_sample.IsKnownEmpty()) {
return true;
}
AssignValuesToOuterLoopIVs(&thread_x_to_input_sample);
auto partitioned_expr =
Partition(thread_x_to_input_sample.GetAffineMap().getResult(0));
if (!partitioned_expr.has_value()) {
return false;
}
if (thread_x_to_input_sample.GetConstraintsCount() > 1 ||
(thread_x_to_input_sample.GetConstraintsCount() == 1 &&
thread_x_to_input_sample.GetConstraints().begin()->first !=
partitioned_expr->func_of_d0 + partitioned_expr->func_of_s0)) {
return false;
}
return EstimateCoalescingViaMemoryTransactionsCount(
FindContiguousIntervals(*partitioned_expr, thread_x_to_input_sample),
element_type);
}
}
CoalescingAnalysis::CoalescingAnalysis(
const HloInstruction* instr,
absl::Span<const HloInstruction* const> operands,
const HloFusionAnalysis& fusion_analysis,
KernelFusionInterface* fusion_interface, MLIRContext* mlir_context,
bool use_heuristic) {
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(instr);
if (!use_heuristic && ComputeCoalescingForAllOperands(
*fusion_adaptor, operands, fusion_analysis,
fusion_interface, mlir_context)) {
return;
}
is_coalesced_computed_by_heuristic_ =
IsReadCoalescedHeuristic(fusion_analysis.GetEmitterFusionKind(), instr);
}
CoalescingAnalysis::CoalescingAnalysis(
const HloInstruction* producer, const HloInstruction* consumer,
absl::Span<const HloInstruction* const> operands,
const HloFusionAnalysis& fusion_analysis,
KernelFusionInterface* fusion_interface, MLIRContext* mlir_context,
bool use_heuristic) {
auto fusion_adaptor =
HloFusionAdaptor::ForProducerConsumer(producer, consumer);
if (!use_heuristic && ComputeCoalescingForAllOperands(
*fusion_adaptor, operands, fusion_analysis,
fusion_interface, mlir_context)) {
return;
}
is_coalesced_computed_by_heuristic_ = IsReadCoalescedHeuristic(
fusion_analysis.GetEmitterFusionKind(), producer, consumer);
}
bool CoalescingAnalysis::ComputeCoalescingForAllOperands(
const HloFusionAdaptor& fusion_adaptor,
absl::Span<const HloInstruction* const> operands,
const HloFusionAnalysis& fusion_analysis,
KernelFusionInterface* fusion_interface, MLIRContext* mlir_context) {
std::optional<GroupedByOpIndexingMap> thread_id_to_input_memory_layouts =
GetThreadIdToInputMemoryLayoutsMaps(fusion_adaptor, operands,
fusion_analysis, fusion_interface,
mlir_context);
if (!thread_id_to_input_memory_layouts.has_value()) {
return false;
}
for (const HloInstruction* operand : operands) {
if (operand->shape().rank() == 0) {
coalescing_per_operand_.insert({operand, true});
continue;
}
auto operand_indexing_maps =
thread_id_to_input_memory_layouts->find(operand);
if (operand_indexing_maps == thread_id_to_input_memory_layouts->end()) {
coalescing_per_operand_.insert({operand, true});
continue;
}
for (IndexingMap operand_indexing_map : operand_indexing_maps->second) {
bool is_coalesced = IsIndexingCoalesced(operand_indexing_map,
operand->shape().element_type());
auto [it, inserted] =
coalescing_per_operand_.insert({operand, is_coalesced});
if (!inserted) {
it->second &= is_coalesced;
}
if (!is_coalesced) break;
}
}
return true;
}
bool CoalescingAnalysis::IsReadCoalesced(const HloInstruction* operand) const {
auto it = coalescing_per_operand_.find(operand);
if (it == coalescing_per_operand_.end()) {
return is_coalesced_computed_by_heuristic_;
}
return it->second;
}
}
} | #include "xla/service/gpu/model/coalescing_analysis.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/fusions/fusions.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/model/symbolic_tile_analysis.h"
#include "xla/service/gpu/model/tiled_hlo_computation.h"
#include "xla/service/gpu/model/tiled_hlo_instruction.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
class CoalescingTest : public HloTestBase {
public:
std::vector<bool> IsReadCoalescedPerOperand(absl::string_view hlo_string) {
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
HloInstruction* root = module->entry_computation()->root_instruction();
return IsReadCoalescedPerOperand(root);
}
std::vector<bool> IsReadCoalescedPerOperand(const HloInstruction* root) {
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(root);
auto analysis = HloFusionAnalysis::Create(*root, device_info_);
auto emitter = GetFusionEmitter(PreBufferAssignmentFusionInfo{analysis});
auto fusion = dynamic_cast<KernelFusionInterface*>(emitter.get());
EXPECT_NE(fusion, nullptr);
CoalescingAnalysis coalescing_analysis(root, root->operands(), analysis,
fusion, &mlir_context_,
false);
std::vector<bool> results;
for (const HloInstruction* operand : root->operands()) {
results.push_back(coalescing_analysis.IsReadCoalesced(operand));
}
return results;
}
bool IsReadCoalescedHeuristic(absl::string_view hlo_string) {
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
HloInstruction* root = module->entry_computation()->root_instruction();
auto analysis = HloFusionAnalysis::Create(*root, device_info_);
return xla::gpu::IsReadCoalescedHeuristic(analysis.GetEmitterFusionKind(),
root->operand(0), root);
}
protected:
stream_executor::DeviceDescription device_info_ =
TestGpuDeviceInfo::RTXA6000DeviceInfo();
mlir::MLIRContext mlir_context_;
};
TEST_F(CoalescingTest, IdentityLayout) {
absl::string_view ir = R"(
HloModule m
fusion {
p0 = f32[100, 200] parameter(0)
p1 = f32[100, 200] parameter(1)
ROOT adthread_x = f32[100, 200] add(p0, p1)
}
ENTRY e {
p0 = f32[100, 200] parameter(0)
p1 = f32[100, 200] parameter(1)
ROOT fusion = f32[100, 200] fusion(p0, p1), kind=kInput, calls=fusion
}
)";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(true, true));
}
TEST_F(CoalescingTest, RhsTransposedLayout) {
absl::string_view ir = R"(
HloModule m
fusion {
p0 = f32[100, 200]{1, 0} parameter(0)
p1 = f32[100, 200]{0, 1} parameter(1)
ROOT exp = f32[100, 200]{1, 0} add(p0, p1)
}
ENTRY e {
p0 = f32[100, 200]{1, 0} parameter(0)
p1 = f32[100, 200]{0, 1} parameter(1)
ROOT fusion = f32[100, 200]{1, 0} fusion(p0, p1), kind=kInput, calls=fusion
}
)";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(true, false));
}
TEST_F(CoalescingTest, OutputTransposedLayout) {
absl::string_view ir = R"(
HloModule m
fusion {
p0 = f32[100, 200]{1, 0} parameter(0)
p1 = f32[100, 200]{1, 0} parameter(1)
ROOT exp = f32[100, 200]{0, 1} add(p0, p1)
}
ENTRY e {
p0 = f32[100, 200]{1, 0} parameter(0)
p1 = f32[100, 200]{1, 0} parameter(1)
ROOT fusion = f32[100, 200]{0, 1} fusion(p0, p1), kind=kInput, calls=fusion
}
)";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(false, false));
}
TEST_F(CoalescingTest, OutputAndLhsTransposedLayout) {
absl::string_view ir = R"(
HloModule m
fusion {
p0 = f32[100, 200]{1, 0} parameter(0)
p1 = f32[100, 200]{0, 1} parameter(1)
ROOT add = f32[100, 200]{1, 0} add(p0, p1)
}
ENTRY e {
p0 = f32[100, 200]{1, 0} parameter(0)
p1 = f32[100, 200]{0, 1} parameter(1)
ROOT fusion = f32[100, 200]{1, 0} fusion(p0, p1), kind=kInput, calls=fusion
}
)";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(true, false));
}
TEST_F(CoalescingTest, Transpose) {
absl::string_view ir = R"(
HloModule module
fusion {
%input = f32[1, 6400, 32] parameter(0)
ROOT transpose = f32[1, 32, 6400] transpose(%input), dimensions={0, 2, 1}
}
ENTRY entry {
%input = f32[1, 6400, 32] parameter(0)
ROOT %fusion = f32[1, 32, 6400] fusion(%input), kind=kLoop, calls=fusion
})";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(true));
}
TEST_F(CoalescingTest, TransposeOfBroadcastHeuristic) {
absl::string_view ir = R"(
HloModule module
fusion {
input = f32[1, 32, 6400] parameter(0)
ROOT slice = f32[1, 32, 100] slice(input), slice={[0:1:1], [0:32:1], [0:6400:64]}
}
ENTRY entry {
p0 = f32[32] parameter(0)
broadcast = f32[1, 6400, 32] broadcast(p0), dimensions={2}
transpose = f32[1, 32, 6400] transpose(broadcast), dimensions={0, 2, 1}
ROOT %fusion = f32[1, 32, 100] fusion(transpose), kind=kLoop, calls=fusion
})";
EXPECT_TRUE(IsReadCoalescedHeuristic(ir));
}
TEST_F(CoalescingTest, TransposeOfIotaHeuristic) {
absl::string_view ir = R"(
HloModule module
fusion {
p0 = f32[32, 100, 64] parameter(0)
ROOT slice = f32[32, 100, 1] slice(p0), slice={[0:32:1], [0:100:1], [0:1:1]}
}
ENTRY entry {
iota = f32[100, 64, 32] iota(), iota_dimension=1
transpose = f32[32, 100, 64] transpose(iota), dimensions={2, 0, 1}
ROOT %fusion = f32[32, 100, 1] fusion(transpose), kind=kLoop, calls=fusion
})";
EXPECT_TRUE(IsReadCoalescedHeuristic(ir));
}
TEST_F(CoalescingTest, TransposeOfAddHeuristic) {
absl::string_view ir = R"(
HloModule module
fusion {
p0 = f32[32, 100, 64] parameter(0)
ROOT slice = f32[32, 100, 1] slice(p0), slice={[0:32:1], [0:100:1], [0:1:1]}
}
ENTRY entry {
input = f32[100, 64, 32] parameter(0)
add = f32[100, 64, 32] add(input, input)
transpose = f32[32, 100, 64] transpose(add), dimensions={2, 0, 1}
ROOT %fusion = f32[32, 100, 1] fusion(transpose), kind=kLoop, calls=fusion
})";
EXPECT_FALSE(IsReadCoalescedHeuristic(ir));
}
TEST_F(CoalescingTest, TransposeOnlyOuterDims) {
absl::string_view ir = R"(
HloModule module
fusion {
%input = f32[100, 32, 64] parameter(0)
ROOT transpose = f32[32, 100, 64] transpose(%input), dimensions={1, 0, 2}
}
ENTRY entry {
%input = f32[100, 32, 64] parameter(0)
ROOT %fusion = f32[32, 100, 64] fusion(%input), kind=kLoop, calls=fusion
})";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(true));
}
TEST_F(CoalescingTest, PadOp) {
absl::string_view ir = R"(
HloModule module
fusion {
p0 = f32[997, 436] parameter(0)
p1 = f32[] parameter(1)
ROOT pad = f32[1024, 512] pad(p0, p1), padding=10_17x24_52
}
ENTRY entry {
p0 = f32[997, 436] parameter(0)
p1 = f32[] parameter(1)
ROOT %fusion = f32[1024, 512] fusion(p0, p1), kind=kLoop, calls=fusion
})";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(true, true));
}
TEST_F(CoalescingTest, RowReduction) {
absl::string_view ir = R"(
HloModule module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
%input = f32[100,64,512] parameter(0)
%c0 = f32[] constant(0)
ROOT reduce = f32[100,64] reduce(%input, %c0), dimensions={2}, to_apply=add
}
ENTRY entry {
%input = f32[100,64,512] parameter(0)
ROOT %fusion = f32[100,64] fusion(%input), kind=kInput, calls=fusion
})";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(true));
}
TEST_F(CoalescingTest, MultiRowReduction) {
absl::string_view ir = R"(
HloModule module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
%input = f32[100,64,4] parameter(0)
%c0 = f32[] constant(0)
ROOT reduce = f32[100,64] reduce(%input, %c0), dimensions={2}, to_apply=add
}
ENTRY entry {
%input = f32[100,64,4] parameter(0)
ROOT %fusion = f32[100,64] fusion(%input), kind=kInput, calls=fusion
})";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(true));
}
TEST_F(CoalescingTest, ColumnReduction) {
absl::string_view ir = R"(
HloModule module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
%input = f32[100,64,32] parameter(0)
%c0 = f32[] constant(0)
ROOT reduce = f32[100,32] reduce(%input, %c0),
dimensions={1}, to_apply=add
}
ENTRY entry {
%input = f32[100,64,32] parameter(0)
ROOT %fusion = f32[100,32] fusion(%input), kind=kInput, calls=fusion
})";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(true));
}
TEST_F(CoalescingTest, VariadicReduceViaLoopEmitter) {
absl::string_view ir = R"(
HloModule module
max {
p0 = s32[] parameter(0)
p1 = s32[] parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
max01 = s32[] maximum(p0, p1)
max23 = s32[] maximum(p2, p3)
ROOT max = (s32[], s32[]) tuple(max01, max23)
}
fusion {
p0 = s32 [5696,10,4] parameter(0)
p1 = s32 [5696,10,4] parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
ROOT reduce = (s32[5696,4], s32[5696,4]) reduce(s32[5696,10,4] p0,
s32[5696,10,4] p1, s32[] p2, s32[] p3), dimensions={1}, to_apply=max
}
ENTRY entry {
p0 = s32 [5696,10,4] parameter(0)
p1 = s32 [5696,10,4] parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
ROOT f = (s32[5696,4], s32[5696,4]) fusion(p0, p1, p2, p3),
kind=kInput, calls=fusion
})";
EXPECT_THAT(IsReadCoalescedPerOperand(ir),
ElementsAre(false, false, true, true));
}
TEST_F(CoalescingTest, VariadicReduceViaReductionEmitter) {
absl::string_view ir = R"(
HloModule module
max {
p0 = s32[] parameter(0)
p1 = s32[] parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
max01 = s32[] maximum(p0, p1)
max23 = s32[] maximum(p2, p3)
ROOT max = (s32[], s32[]) tuple(max01, max23)
}
fusion {
p0 = s32[32,40] parameter(0)
p1 = s32[32,40] parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
ROOT reduce = (s32[32], s32[32])
reduce(s32[32,40] p0, s32[32,40] p1, s32[] p2, s32[] p3),
dimensions={1}, to_apply=max
}
ENTRY entry {
p0 = s32[32,40] parameter(0)
p1 = s32[32,40] parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
ROOT f = (s32[32], s32[32]) fusion(p0, p1, p2, p3),
kind=kInput, calls=fusion
})";
EXPECT_THAT(IsReadCoalescedPerOperand(ir),
ElementsAre(true, true, true, true));
}
TEST_F(CoalescingTest, Gather) {
absl::string_view ir = R"(
HloModule module
fusion {
operand = f32[33, 76, 70] parameter(0)
indices = s32[1806, 2] parameter(1)
ROOT gather = f32[1806, 7, 8, 4] gather(operand, indices),
offset_dims={1,2,3}, collapsed_slice_dims={}, start_index_map={0,1},
index_vector_dim=1, slice_sizes={7,8,4}
}
ENTRY entry {
p0 = f32[33, 76, 70] parameter(0)
p1 = s32[1806, 2] parameter(1)
ROOT %fusion = f32[1806, 7, 8, 4] fusion(p0, p1), kind=kLoop, calls=fusion
})";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(false, true));
}
TEST_F(CoalescingTest, DynamicSlice) {
absl::string_view ir = R"(
HloModule module
fusion {
%src = s32[2,2,258] parameter(0)
%of1 = s32[] parameter(1)
%of2 = s32[] parameter(2)
%of3 = s32[] parameter(3)
ROOT %ds = s32[1,2,32] dynamic-slice(s32[2,2,258] %src,
s32[] %of1, s32[] %of2, s32[] %of3),
dynamic_slice_sizes={1, 2, 32}
}
ENTRY entry {
%p0 = s32[2,2,258] parameter(0)
%p1 = s32[] parameter(1)
%p2 = s32[] parameter(2)
%p3 = s32[] parameter(3)
ROOT %fusion = s32[1,2,32] fusion(p0, p1, p2, p3), kind=kLoop, calls=fusion
})";
EXPECT_THAT(IsReadCoalescedPerOperand(ir),
ElementsAre(true, true, true, true));
}
TEST_F(CoalescingTest, UnusedParameter) {
Shape shape = ShapeUtil::MakeShape(F32, {100000});
auto module = std::make_unique<HloModule>("m", HloModuleConfig{});
HloComputation::Builder b("b");
auto p0 = b.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
auto p1 = b.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloComputation::Builder sub_builder("subcomp");
HloInstruction* p0f = sub_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "p0f"));
HloInstruction* p1f = sub_builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "p1f"));
ASSERT_NE(p1f, nullptr);
sub_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0f));
HloComputation* subcomp = module->AddEmbeddedComputation(sub_builder.Build());
auto fusion = HloInstruction::CreateFusion(
shape, HloInstruction::FusionKind::kLoop, {p0, p1}, subcomp);
b.AddInstruction(std::move(fusion));
module->AddEntryComputation(b.Build());
EXPECT_THAT(IsReadCoalescedPerOperand(
module->entry_computation()->root_instruction()),
ElementsAre(true, true));
}
TEST_F(CoalescingTest, Param) {
absl::string_view ir = R"(
HloModule module
fusion {
%p0 = u32[48,2,1280] parameter(0)
%p1 = u32[48,1,1280] parameter(1)
%p2 = u32[48,1,1280] parameter(2)
%concat = u32[48,2,1280] concatenate(u32[48,1,1280] %p1,
u32[48,1,1280] %p2), dimensions={1}
ROOT %shift = u32[48,2,1280] shift-right-logical(
u32[48,2,1280] %concat, u32[48,2,1280] %p0)
}
ENTRY entry {
%p0 = u32[48,2,1280] parameter(0)
%p1 = u32[48,1,1280] parameter(1)
%p2 = u32[48,1,1280] parameter(2)
ROOT %fusion = u32[48,2,1280] fusion(p0, p1, p2), kind=kLoop, calls=fusion
})";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(true, true, true));
}
class CoalescingForTiledHloTest : public CoalescingTest {
public:
std::vector<bool> IsTiledReadCoalescedPerOperand(
const HloInstruction* root, absl::Span<int64_t const> tile_sizes) {
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(root);
SymbolicTileAnalysis symbolic_tile_analysis =
std::get<SymbolicTileAnalysis>(SymbolicTileAnalysis::AnalyzeFusion(
*fusion_adaptor, &mlir_context_));
TiledHloComputation tiled_hlo_computation =
*symbolic_tile_analysis.ComputeTiledHloInstructions(
tile_sizes, true,
true);
const TiledHloInstruction* tiled_hlo_root = tiled_hlo_computation.GetRoot();
std::vector<bool> result;
for (const TiledHloInstruction* operand : tiled_hlo_root->operands()) {
result.push_back(IsTiledReadCoalescedHeuristic(*operand, device_info_));
}
return result;
}
};
TEST_F(CoalescingForTiledHloTest, TiledReadCoalescedHeuristic_Transpose) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule m
ENTRY main {
p0 = f32[2048, 48] parameter(0)
ROOT transpose = f32[48, 2048] transpose(p0), dimensions={1, 0}
})"));
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(IsTiledReadCoalescedPerOperand(root, {1, 2048}),
ElementsAre(false));
EXPECT_THAT(IsTiledReadCoalescedPerOperand(root, {48, 32}),
ElementsAre(true));
}
TEST_F(CoalescingForTiledHloTest,
TiledReadCoalescedHeuristic_MaskingIsHandledCorrectly) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule m
ENTRY main {
p0 = f32[2048, 12] parameter(0)
ROOT transpose = f32[12, 2048] transpose(p0), dimensions={1, 0}
})"));
const HloInstruction* root = module->entry_computation()->root_instruction();
constexpr int kNumBytesPerParamRow = 12 * 4;
ASSERT_GT(device_info_.dram_to_l2_transaction_size_bytes(),
kNumBytesPerParamRow);
EXPECT_THAT(IsTiledReadCoalescedPerOperand(root, {16, 4}), ElementsAre(true));
EXPECT_THAT(IsTiledReadCoalescedPerOperand(root, {1024, 1}),
ElementsAre(false));
}
TEST_F(CoalescingForTiledHloTest, RhsTransposedLayout) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule m
ENTRY main {
p0 = f32[256, 512]{1,0} parameter(0)
p1 = f32[256, 512]{0,1} parameter(1)
ROOT add = f32[256, 512]{1,0} add(p0, p1)
})"));
const HloInstruction* root = module->entry_computation()->root_instruction();
constexpr int kExpectedDramToL2TransactionSize = 64;
ASSERT_EQ(device_info_.dram_to_l2_transaction_size_bytes(),
kExpectedDramToL2TransactionSize);
EXPECT_THAT(IsTiledReadCoalescedPerOperand(root, {1, 16}),
ElementsAre(true, false));
EXPECT_THAT(IsTiledReadCoalescedPerOperand(root, {16, 1}),
ElementsAre(false, true));
EXPECT_THAT(IsTiledReadCoalescedPerOperand(root, {16, 16}),
ElementsAre(true, true));
EXPECT_THAT(IsTiledReadCoalescedPerOperand(root, {8, 8}),
ElementsAre(false, false));
}
TEST_F(CoalescingForTiledHloTest, SmallDataTypes) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule m
ENTRY main {
p0 = s8[256, 512] parameter(0)
p1 = s8[256, 512] parameter(1)
ROOT add = s8[256, 512] add(p0, p1)
})"));
const HloInstruction* root = module->entry_computation()->root_instruction();
constexpr int kExpectedDramToL2TransactionSize = 64;
ASSERT_EQ(device_info_.dram_to_l2_transaction_size_bytes(),
kExpectedDramToL2TransactionSize);
EXPECT_THAT(IsTiledReadCoalescedPerOperand(root, {16, 16}),
ElementsAre(false, false));
EXPECT_THAT(IsTiledReadCoalescedPerOperand(root, {16, 32}),
ElementsAre(false, false));
EXPECT_THAT(IsTiledReadCoalescedPerOperand(root, {16, 64}),
ElementsAre(true, true));
EXPECT_THAT(IsTiledReadCoalescedPerOperand(root, {16, 128}),
ElementsAre(true, true));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/coalescing_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/coalescing_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bf3e0057-6b6b-4aa4-8a31-26a70b5a6de1 | cpp | tensorflow/tensorflow | batch_scheduler | tensorflow/core/kernels/batching_util/batch_scheduler.cc | tensorflow/core/kernels/batching_util/batch_scheduler_test.cc | #include "tensorflow/core/kernels/batching_util/batch_scheduler.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
namespace tensorflow {
namespace serving {
absl::StatusOr<MixedPriorityBatchingPolicy> GetMixedPriorityBatchingPolicy(
absl::string_view attr_value) {
if (attr_value == kLowPriorityPaddingWithMaxBatchSizeAttrValue) {
return MixedPriorityBatchingPolicy::kLowPriorityPaddingWithMaxBatchSize;
} else if (attr_value ==
kLowPriorityPaddingWithNextAllowedBatchSizeAttrValue) {
return MixedPriorityBatchingPolicy::
kLowPriorityPaddingWithNextAllowedBatchSize;
} else if (attr_value == kPriorityIsolationAttrValue) {
return MixedPriorityBatchingPolicy::kPriorityIsolation;
}
return absl::InvalidArgumentError(absl::StrFormat(
"Unknown mixed priority batching policy: %s", attr_value));
}
}
} | #include "tensorflow/core/kernels/batching_util/batch_scheduler.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/notification.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/platform/criticality.h"
namespace tensorflow {
namespace serving {
namespace {
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::Pointer;
using ::testing::Property;
TEST(MixedPriorityBatchingPolicyTest, InvalidAttrValueError) {
EXPECT_THAT(
GetMixedPriorityBatchingPolicy("invalid_attr_value"),
testing::StatusIs(
absl::StatusCode::kInvalidArgument,
::testing::HasSubstr(
"Unknown mixed priority batching policy: invalid_attr_value")));
}
using MixedPriorityBatchingPolicyParameterizedTest = ::testing::TestWithParam<
std::tuple<std::string, MixedPriorityBatchingPolicy>>;
TEST_P(MixedPriorityBatchingPolicyParameterizedTest,
GetMixedPriorityBatchingPolicySuccess) {
auto [attr_name, policy] = GetParam();
EXPECT_THAT(GetMixedPriorityBatchingPolicy(attr_name),
testing::IsOkAndHolds(Eq(policy)));
}
INSTANTIATE_TEST_SUITE_P(
Parameter, MixedPriorityBatchingPolicyParameterizedTest,
::testing::Values(
std::make_tuple(
kLowPriorityPaddingWithMaxBatchSizeAttrValue,
MixedPriorityBatchingPolicy::
kLowPriorityPaddingWithMaxBatchSize),
std::make_tuple(
kLowPriorityPaddingWithNextAllowedBatchSizeAttrValue,
MixedPriorityBatchingPolicy::
kLowPriorityPaddingWithNextAllowedBatchSize),
std::make_tuple(
kPriorityIsolationAttrValue,
MixedPriorityBatchingPolicy::kPriorityIsolation)));
class FakeTask : public BatchTask {
public:
explicit FakeTask(size_t size) : size_(size) {}
~FakeTask() override = default;
size_t size() const override { return size_; }
private:
const size_t size_;
FakeTask(const FakeTask&) = delete;
void operator=(const FakeTask&) = delete;
};
TEST(TaskCriticalityTest, CriticalityDefaultsToCritical) {
FakeTask fake_task(0);
EXPECT_EQ(fake_task.criticality(), tsl::criticality::Criticality::kCritical);
}
TEST(TaskQueueTest, EmptyTaskQueue) {
TaskQueue<FakeTask> task_queue;
EXPECT_TRUE(task_queue.empty());
EXPECT_EQ(0, task_queue.num_tasks());
EXPECT_EQ(0, task_queue.size());
}
TEST(TaskQueueTest, AddTaskToTaskQueue) {
TaskQueue<FakeTask> task_queue;
task_queue.AddTask(std::make_unique<FakeTask>(1), 1);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(1, task_queue.num_tasks());
EXPECT_EQ(1, task_queue.size());
}
TEST(TaskQueueTest, AddTasksToTaskQueue) {
TaskQueue<FakeTask> task_queue;
task_queue.AddTask(std::make_unique<FakeTask>(1), 1);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(1, task_queue.num_tasks());
EXPECT_EQ(1, task_queue.size());
task_queue.AddTask(std::make_unique<FakeTask>(2), 2);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(2, task_queue.num_tasks());
EXPECT_EQ(3, task_queue.size());
task_queue.AddTask(std::make_unique<FakeTask>(3), 3);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(3, task_queue.num_tasks());
EXPECT_EQ(6, task_queue.size());
}
TEST(TaskQueueTest, RemoveTaskFromTaskQueueWithSingleTask) {
TaskQueue<FakeTask> task_queue;
task_queue.AddTask(std::make_unique<FakeTask>(1), 1);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(1, task_queue.num_tasks());
EXPECT_EQ(1, task_queue.size());
EXPECT_THAT(task_queue.RemoveTask(),
Pointee(Property(&FakeTask::size, Eq(1))));
EXPECT_TRUE(task_queue.empty());
EXPECT_EQ(0, task_queue.num_tasks());
EXPECT_EQ(0, task_queue.size());
}
TEST(TaskQueueTest, RemoveTaskFromTaskQueueWithMultipleTasks) {
TaskQueue<FakeTask> task_queue;
task_queue.AddTask(std::make_unique<FakeTask>(2), 1);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(1, task_queue.num_tasks());
EXPECT_EQ(2, task_queue.size());
task_queue.AddTask(std::make_unique<FakeTask>(1), 2);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(2, task_queue.num_tasks());
EXPECT_EQ(3, task_queue.size());
EXPECT_THAT(task_queue.RemoveTask(),
Pointee(Property(&FakeTask::size, Eq(2))));
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(1, task_queue.num_tasks());
EXPECT_EQ(1, task_queue.size());
}
TEST(TaskQueueTest, RemoveTasksFromTaskQueue) {
TaskQueue<FakeTask> task_queue;
task_queue.AddTask(std::make_unique<FakeTask>(1), 1);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(1, task_queue.num_tasks());
EXPECT_EQ(1, task_queue.size());
task_queue.AddTask(std::make_unique<FakeTask>(2), 2);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(2, task_queue.num_tasks());
EXPECT_EQ(3, task_queue.size());
task_queue.AddTask(std::make_unique<FakeTask>(3), 3);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(3, task_queue.num_tasks());
EXPECT_EQ(6, task_queue.size());
EXPECT_THAT(task_queue.RemoveTask(3),
ElementsAre(Pointee(Property(&FakeTask::size, Eq(1))),
Pointee(Property(&FakeTask::size, Eq(2)))));
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(1, task_queue.num_tasks());
EXPECT_EQ(3, task_queue.size());
}
TEST(TaskQueueTest, RemoveTasksFewerThanArgFromTaskQueue) {
TaskQueue<FakeTask> task_queue;
task_queue.AddTask(std::make_unique<FakeTask>(1), 1);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(1, task_queue.num_tasks());
EXPECT_EQ(1, task_queue.size());
task_queue.AddTask(std::make_unique<FakeTask>(2), 2);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(2, task_queue.num_tasks());
EXPECT_EQ(3, task_queue.size());
task_queue.AddTask(std::make_unique<FakeTask>(3), 3);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(3, task_queue.num_tasks());
EXPECT_EQ(6, task_queue.size());
EXPECT_THAT(task_queue.RemoveTask(5),
ElementsAre(Pointee(Property(&FakeTask::size, Eq(1))),
Pointee(Property(&FakeTask::size, Eq(2)))));
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(1, task_queue.num_tasks());
EXPECT_EQ(3, task_queue.size());
}
TEST(TaskQueueTest, RemoveAllTasksWhenArgGreaterThanTaskSize) {
TaskQueue<FakeTask> task_queue;
task_queue.AddTask(std::make_unique<FakeTask>(1), 1);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(1, task_queue.num_tasks());
EXPECT_EQ(1, task_queue.size());
task_queue.AddTask(std::make_unique<FakeTask>(2), 2);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(2, task_queue.num_tasks());
EXPECT_EQ(3, task_queue.size());
task_queue.AddTask(std::make_unique<FakeTask>(3), 3);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(3, task_queue.num_tasks());
EXPECT_EQ(6, task_queue.size());
EXPECT_THAT(task_queue.RemoveTask(8),
ElementsAre(Pointee(Property(&FakeTask::size, Eq(1))),
Pointee(Property(&FakeTask::size, Eq(2))),
Pointee(Property(&FakeTask::size, Eq(3)))));
EXPECT_TRUE(task_queue.empty());
EXPECT_EQ(0, task_queue.num_tasks());
EXPECT_EQ(0, task_queue.size());
}
TEST(TaskQueueTest, EarliestStartTimeWithEmptyQueue) {
TaskQueue<FakeTask> task_queue;
EXPECT_FALSE(task_queue.EarliestTaskStartTime().has_value());
}
TEST(TaskQueueTest, EarliestStartTimeWithMultipleTasksInQueue) {
TaskQueue<FakeTask> task_queue;
task_queue.AddTask(std::make_unique<FakeTask>(1), 1);
task_queue.AddTask(std::make_unique<FakeTask>(2), 2);
std::optional<uint64_t> result = task_queue.EarliestTaskStartTime();
EXPECT_TRUE(result.has_value());
EXPECT_EQ(*result, 1);
}
TEST(TaskQueueTest, EarliestStartTimeAfterTaskRemoval) {
TaskQueue<FakeTask> task_queue;
task_queue.AddTask(std::make_unique<FakeTask>(1), 1);
task_queue.AddTask(std::make_unique<FakeTask>(2), 2);
task_queue.AddTask(std::make_unique<FakeTask>(3), 3);
std::optional<uint64_t> result = task_queue.EarliestTaskStartTime();
EXPECT_TRUE(result.has_value());
EXPECT_EQ(*result, 1);
EXPECT_THAT(task_queue.RemoveTask(3),
ElementsAre(Pointee(Property(&FakeTask::size, Eq(1))),
Pointee(Property(&FakeTask::size, Eq(2)))));
result = task_queue.EarliestTaskStartTime();
EXPECT_TRUE(result.has_value());
EXPECT_EQ(*result, 3);
}
TEST(BatchTest, Basic) {
Batch<FakeTask> batch;
EXPECT_EQ(0, batch.num_tasks());
EXPECT_TRUE(batch.empty());
EXPECT_EQ(0, batch.size());
EXPECT_FALSE(batch.IsClosed());
auto task0 = new FakeTask(3);
batch.AddTask(std::unique_ptr<FakeTask>(task0));
EXPECT_EQ(1, batch.num_tasks());
EXPECT_FALSE(batch.empty());
EXPECT_EQ(task0->size(), batch.size());
EXPECT_EQ(task0->size(), batch.task(0).size());
EXPECT_FALSE(batch.IsClosed());
auto task1 = new FakeTask(7);
batch.AddTask(std::unique_ptr<FakeTask>(task1));
EXPECT_EQ(2, batch.num_tasks());
EXPECT_FALSE(batch.empty());
EXPECT_EQ(task0->size() + task1->size(), batch.size());
EXPECT_EQ(task1->size(), batch.task(1).size());
EXPECT_EQ(task1->size(), batch.mutable_task(1)->size());
EXPECT_FALSE(batch.IsClosed());
batch.Close();
EXPECT_TRUE(batch.IsClosed());
EXPECT_EQ(2, batch.num_tasks());
EXPECT_FALSE(batch.empty());
EXPECT_EQ(task0->size() + task1->size(), batch.size());
EXPECT_EQ(task0->size(), batch.task(0).size());
EXPECT_EQ(task1->size(), batch.task(1).size());
EXPECT_EQ(7, batch.RemoveTask()->size());
EXPECT_EQ(3, batch.size());
EXPECT_EQ(3, batch.RemoveTask()->size());
EXPECT_EQ(0, batch.size());
EXPECT_TRUE(batch.empty());
}
TEST(BatchTest, WaitUntilClosed) {
Batch<FakeTask> batch;
batch.AddTask(std::unique_ptr<FakeTask>(new FakeTask(3)));
EXPECT_FALSE(batch.IsClosed());
std::unique_ptr<Thread> close_thread(
Env::Default()->StartThread(ThreadOptions(), "test", [&batch]() {
Env::Default()->SleepForMicroseconds(100);
batch.Close();
}));
batch.WaitUntilClosed();
EXPECT_TRUE(batch.IsClosed());
}
TEST(BatchTest, DeletionBlocksUntilClosed) {
Batch<FakeTask>* batch = new Batch<FakeTask>;
batch->AddTask(std::unique_ptr<FakeTask>(new FakeTask(3)));
EXPECT_FALSE(batch->IsClosed());
Notification do_delete, deleted;
std::unique_ptr<Thread> delete_thread(Env::Default()->StartThread(
ThreadOptions(), "test", [&batch, &do_delete, &deleted]() {
do_delete.WaitForNotification();
delete batch;
deleted.Notify();
}));
do_delete.Notify();
Env::Default()->SleepForMicroseconds(10 * 1000 );
EXPECT_FALSE(deleted.HasBeenNotified());
batch->Close();
deleted.WaitForNotification();
}
TEST(BatchTest, RemoveAllTasks) {
Batch<FakeTask> batch;
auto task0 = new FakeTask(3);
batch.AddTask(std::unique_ptr<FakeTask>(task0));
auto task1 = new FakeTask(7);
batch.AddTask(std::unique_ptr<FakeTask>(task1));
batch.Close();
EXPECT_TRUE(batch.IsClosed());
std::vector<std::unique_ptr<FakeTask>> tasks_in_batch =
batch.RemoveAllTasks();
EXPECT_EQ(2, tasks_in_batch.size());
EXPECT_TRUE(batch.empty());
EXPECT_EQ(task0, tasks_in_batch[0].get());
EXPECT_EQ(task1, tasks_in_batch[1].get());
EXPECT_THAT(batch.RemoveAllTasks(), ::testing::IsEmpty());
EXPECT_THAT(batch.RemoveAllTasks(), ::testing::IsEmpty());
}
TEST(BatchTest, TryTrimToNewSizeTrimsAndReturnsTrimmedElementsInOrder) {
Batch<FakeTask> batch;
auto task0 = new FakeTask(3);
batch.AddTask(std::unique_ptr<FakeTask>(task0));
auto task1 = new FakeTask(5);
batch.AddTask(std::unique_ptr<FakeTask>(task1));
auto task2 = new FakeTask(7);
batch.AddTask(std::unique_ptr<FakeTask>(task2));
auto task3 = new FakeTask(9);
batch.AddTask(std::unique_ptr<FakeTask>(task3));
std::vector<std::unique_ptr<FakeTask>> trimmed_tasks;
batch.TryTrimToNewSize( 8,
trimmed_tasks);
EXPECT_EQ(batch.size(), 8);
EXPECT_EQ(batch.num_tasks(), 2);
EXPECT_THAT(trimmed_tasks, ElementsAre(Pointer(task2), Pointer(task3)));
batch.Close();
}
TEST(BatchTest, TryTrimToNewSizeDoesNotTrimWhenItWouldNeedToSplitATask) {
Batch<FakeTask> batch;
auto task0 = new FakeTask(3);
batch.AddTask(std::unique_ptr<FakeTask>(task0));
auto task1 = new FakeTask(5);
batch.AddTask(std::unique_ptr<FakeTask>(task1));
std::vector<std::unique_ptr<FakeTask>> trimmed_tasks;
batch.TryTrimToNewSize( 4,
trimmed_tasks);
EXPECT_EQ(batch.size(), 8);
EXPECT_EQ(batch.num_tasks(), 2);
EXPECT_TRUE(trimmed_tasks.empty());
batch.Close();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/batch_scheduler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/batch_scheduler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
54d68a94-04d4-4a1e-9f32-a3aab696534c | cpp | abseil/abseil-cpp | gaussian_distribution | absl/random/gaussian_distribution.cc | absl/random/gaussian_distribution_test.cc | #include "absl/random/gaussian_distribution.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace random_internal {
const gaussian_distribution_base::Tables
gaussian_distribution_base::zg_ = {
{3.7130862467425505, 3.442619855899000214, 3.223084984581141565,
3.083228858216868318, 2.978696252647779819, 2.894344007021528942,
2.82312535054891045, 2.761169372387176857, 2.706113573121819549,
2.656406411261359679, 2.610972248431847387, 2.56903362592493778,
2.530009672388827457, 2.493454522095372106, 2.459018177411830486,
2.426420645533749809, 2.395434278011062457, 2.365871370117638595,
2.337575241339236776, 2.310413683698762988, 2.284274059677471769,
2.25905957386919809, 2.234686395590979036, 2.21108140887870297,
2.188180432076048731, 2.165926793748921497, 2.144270182360394905,
2.123165708673976138, 2.102573135189237608, 2.082456237992015957,
2.062782274508307978, 2.043521536655067194, 2.02464697337738464,
2.006133869963471206, 1.987959574127619033, 1.970103260854325633,
1.952545729553555764, 1.935269228296621957, 1.918257300864508963,
1.901494653105150423, 1.884967035707758143, 1.868661140994487768,
1.852564511728090002, 1.836665460258444904, 1.820952996596124418,
1.805416764219227366, 1.790046982599857506, 1.77483439558606837,
1.759770224899592339, 1.744846128113799244, 1.730054160563729182,
1.71538674071366648, 1.700836618569915748, 1.686396846779167014,
1.6720607540975998, 1.657821920954023254, 1.643674156862867441,
1.629611479470633562, 1.615628095043159629, 1.601718380221376581,
1.587876864890574558, 1.574098216022999264, 1.560377222366167382,
1.546708779859908844, 1.533087877674041755, 1.519509584765938559,
1.505969036863201937, 1.492461423781352714, 1.478981976989922842,
1.465525957342709296, 1.452088642889222792, 1.438665316684561546,
1.425251254514058319, 1.411841712447055919, 1.398431914131003539,
1.385017037732650058, 1.371592202427340812, 1.358152454330141534,
1.34469275175354519, 1.331207949665625279, 1.317692783209412299,
1.304141850128615054, 1.290549591926194894, 1.27691027356015363,
1.263217961454619287, 1.249466499573066436, 1.23564948326336066,
1.221760230539994385, 1.207791750415947662, 1.193736707833126465,
1.17958738466398616, 1.165335636164750222, 1.150972842148865416,
1.136489852013158774, 1.121876922582540237, 1.107123647534034028,
1.092218876907275371, 1.077150624892893482, 1.061905963694822042,
1.046470900764042922, 1.030830236068192907, 1.014967395251327842,
0.9988642334929808131, 0.9825008035154263464, 0.9658550794011470098,
0.9489026255113034436, 0.9316161966151479401, 0.9139652510230292792,
0.8959153525809346874, 0.8774274291129204872, 0.8584568431938099931,
0.8389522142975741614, 0.8188539067003538507, 0.7980920606440534693,
0.7765839878947563557, 0.7542306644540520688, 0.7309119106424850631,
0.7064796113354325779, 0.6807479186691505202, 0.6534786387399710295,
0.6243585973360461505, 0.5929629424714434327, 0.5586921784081798625,
0.5206560387620546848, 0.4774378372966830431, 0.4265479863554152429,
0.3628714310970211909, 0.2723208648139477384, 0},
{0.001014352564120377413, 0.002669629083880922793, 0.005548995220771345792,
0.008624484412859888607, 0.01183947865788486861, 0.01516729801054656976,
0.01859210273701129151, 0.02210330461592709475, 0.02569329193593428151,
0.02935631744000685023, 0.03308788614622575758, 0.03688438878665621645,
0.04074286807444417458, 0.04466086220049143157, 0.04863629585986780496,
0.05266740190305100461, 0.05675266348104984759, 0.06089077034804041277,
0.06508058521306804567, 0.06932111739357792179, 0.07361150188411341722,
0.07795098251397346301, 0.08233889824223575293, 0.08677467189478028919,
0.09125780082683036809, 0.095787849121731522, 0.1003644410286559929,
0.1049872554094214289, 0.1096560210148404546, 0.1143705124488661323,
0.1191305467076509556, 0.1239359802028679736, 0.1287867061959434012,
0.1336826525834396151, 0.1386237799845948804, 0.1436100800906280339,
0.1486415742423425057, 0.1537183122081819397, 0.1588403711394795748,
0.1640078546834206341, 0.1692208922373653057, 0.1744796383307898324,
0.1797842721232958407, 0.1851349970089926078, 0.1905320403191375633,
0.1959756531162781534, 0.2014661100743140865, 0.2070037094399269362,
0.2125887730717307134, 0.2182216465543058426, 0.2239026993850088965,
0.229632325232116602, 0.2354109422634795556, 0.2412389935454402889,
0.2471169475123218551, 0.2530452985073261551, 0.2590245673962052742,
0.2650553022555897087, 0.271138079138385224, 0.2772735029191887857,
0.2834622082232336471, 0.2897048604429605656, 0.2960021568469337061,
0.3023548277864842593, 0.3087636380061818397, 0.3152293880650116065,
0.3217529158759855901, 0.3283350983728509642, 0.3349768533135899506,
0.3416791412315512977, 0.3484429675463274756, 0.355269384847918035,
0.3621594953693184626, 0.3691144536644731522, 0.376135469510563536,
0.3832238110559021416, 0.3903808082373155797, 0.3976078564938743676,
0.404906420807223999, 0.4122780401026620578, 0.4197243320495753771,
0.4272469983049970721, 0.4348478302499918513, 0.4425287152754694975,
0.4502916436820402768, 0.458138716267873114, 0.4660721526894572309,
0.4740943006930180559, 0.4822076463294863724, 0.4904148252838453348,
0.4987186354709807201, 0.5071220510755701794, 0.5156282382440030565,
0.5242405726729852944, 0.5329626593838373561, 0.5417983550254266145,
0.5507517931146057588, 0.5598274127040882009, 0.5690299910679523787,
0.5783646811197646898, 0.5878370544347081283, 0.5974531509445183408,
0.6072195366251219584, 0.6171433708188825973, 0.6272324852499290282,
0.6374954773350440806, 0.6479418211102242475, 0.6585820000500898219,
0.6694276673488921414, 0.6804918409973358395, 0.6917891434366769676,
0.7033360990161600101, 0.7151515074105005976, 0.7272569183441868201,
0.7396772436726493094, 0.7524415591746134169, 0.7655841738977066102,
0.7791460859296898134, 0.7931770117713072832, 0.8077382946829627652,
0.8229072113814113187, 0.8387836052959920519, 0.8555006078694531446,
0.873243048910072206, 0.8922816507840289901, 0.9130436479717434217,
0.9362826816850632339, 0.9635996931270905952, 1}};
}
ABSL_NAMESPACE_END
} | #include "absl/random/gaussian_distribution.h"
#include <algorithm>
#include <cmath>
#include <cstddef>
#include <ios>
#include <iterator>
#include <random>
#include <string>
#include <type_traits>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/macros.h"
#include "absl/log/log.h"
#include "absl/numeric/internal/representation.h"
#include "absl/random/internal/chi_square.h"
#include "absl/random/internal/distribution_test_util.h"
#include "absl/random/internal/sequence_urbg.h"
#include "absl/random/random.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/strip.h"
namespace {
using absl::random_internal::kChiSquared;
template <typename RealType>
class GaussianDistributionInterfaceTest : public ::testing::Test {};
using RealTypes =
std::conditional<absl::numeric_internal::IsDoubleDouble(),
::testing::Types<float, double>,
::testing::Types<float, double, long double>>::type;
TYPED_TEST_SUITE(GaussianDistributionInterfaceTest, RealTypes);
TYPED_TEST(GaussianDistributionInterfaceTest, SerializeTest) {
using param_type =
typename absl::gaussian_distribution<TypeParam>::param_type;
const TypeParam kParams[] = {
1,
std::nextafter(TypeParam(1), TypeParam(0)),
std::nextafter(TypeParam(1), TypeParam(2)),
TypeParam(1e-8), TypeParam(1e-4), TypeParam(2), TypeParam(1e4),
TypeParam(1e8), TypeParam(1e20), TypeParam(2.5),
std::numeric_limits<TypeParam>::infinity(),
std::numeric_limits<TypeParam>::max(),
std::numeric_limits<TypeParam>::epsilon(),
std::nextafter(std::numeric_limits<TypeParam>::min(),
TypeParam(1)),
std::numeric_limits<TypeParam>::min(),
std::numeric_limits<TypeParam>::denorm_min(),
std::numeric_limits<TypeParam>::min() / 2,
std::nextafter(std::numeric_limits<TypeParam>::min(),
TypeParam(0)),
};
constexpr int kCount = 1000;
absl::InsecureBitGen gen;
for (const auto mod : {0, 1, 2, 3}) {
for (const auto x : kParams) {
if (!std::isfinite(x)) continue;
for (const auto y : kParams) {
const TypeParam mean = (mod & 0x1) ? -x : x;
const TypeParam stddev = (mod & 0x2) ? -y : y;
const param_type param(mean, stddev);
absl::gaussian_distribution<TypeParam> before(mean, stddev);
EXPECT_EQ(before.mean(), param.mean());
EXPECT_EQ(before.stddev(), param.stddev());
{
absl::gaussian_distribution<TypeParam> via_param(param);
EXPECT_EQ(via_param, before);
EXPECT_EQ(via_param.param(), before.param());
}
auto sample_min = before.max();
auto sample_max = before.min();
for (int i = 0; i < kCount; i++) {
auto sample = before(gen);
if (sample > sample_max) sample_max = sample;
if (sample < sample_min) sample_min = sample;
EXPECT_GE(sample, before.min()) << before;
EXPECT_LE(sample, before.max()) << before;
}
if (!std::is_same<TypeParam, long double>::value) {
LOG(INFO) << "Range{" << mean << ", " << stddev << "}: " << sample_min
<< ", " << sample_max;
}
std::stringstream ss;
ss << before;
if (!std::isfinite(mean) || !std::isfinite(stddev)) {
continue;
}
absl::gaussian_distribution<TypeParam> after(-0.53f, 2.3456f);
EXPECT_NE(before.mean(), after.mean());
EXPECT_NE(before.stddev(), after.stddev());
EXPECT_NE(before.param(), after.param());
EXPECT_NE(before, after);
ss >> after;
EXPECT_EQ(before.mean(), after.mean());
EXPECT_EQ(before.stddev(), after.stddev())
<< ss.str() << " "
<< (ss.good() ? "good " : "")
<< (ss.bad() ? "bad " : "")
<< (ss.eof() ? "eof " : "")
<< (ss.fail() ? "fail " : "");
}
}
}
}
class GaussianModel {
public:
GaussianModel(double mean, double stddev) : mean_(mean), stddev_(stddev) {}
double mean() const { return mean_; }
double variance() const { return stddev() * stddev(); }
double stddev() const { return stddev_; }
double skew() const { return 0; }
double kurtosis() const { return 3.0; }
double InverseCDF(double p) {
ABSL_ASSERT(p >= 0.0);
ABSL_ASSERT(p < 1.0);
return mean() + stddev() * -absl::random_internal::InverseNormalSurvival(p);
}
private:
const double mean_;
const double stddev_;
};
struct Param {
double mean;
double stddev;
double p_fail;
int trials;
};
class GaussianDistributionTests : public testing::TestWithParam<Param>,
public GaussianModel {
public:
GaussianDistributionTests()
: GaussianModel(GetParam().mean, GetParam().stddev) {}
template <typename D>
bool SingleZTest(const double p, const size_t samples);
template <typename D>
double SingleChiSquaredTest();
absl::random_internal::pcg64_2018_engine rng_{0x2B7E151628AED2A6};
};
template <typename D>
bool GaussianDistributionTests::SingleZTest(const double p,
const size_t samples) {
D dis(mean(), stddev());
std::vector<double> data;
data.reserve(samples);
for (size_t i = 0; i < samples; i++) {
const double x = dis(rng_);
data.push_back(x);
}
const double max_err = absl::random_internal::MaxErrorTolerance(p);
const auto m = absl::random_internal::ComputeDistributionMoments(data);
const double z = absl::random_internal::ZScore(mean(), m);
const bool pass = absl::random_internal::Near("z", z, 0.0, max_err);
const double jb =
static_cast<double>(m.n) / 6.0 *
(std::pow(m.skewness, 2.0) + std::pow(m.kurtosis - 3.0, 2.0) / 4.0);
if (!pass || jb > 9.21) {
LOG(INFO)
<< "p=" << p << " max_err=" << max_err << "\n"
" mean=" << m.mean << " vs. " << mean() << "\n"
" stddev=" << std::sqrt(m.variance) << " vs. " << stddev() << "\n"
" skewness=" << m.skewness << " vs. " << skew() << "\n"
" kurtosis=" << m.kurtosis << " vs. " << kurtosis() << "\n"
" z=" << z << " vs. 0\n"
" jb=" << jb << " vs. 9.21";
}
return pass;
}
template <typename D>
double GaussianDistributionTests::SingleChiSquaredTest() {
const size_t kSamples = 10000;
const int kBuckets = 50;
std::vector<double> cutoffs;
const double kInc = 1.0 / static_cast<double>(kBuckets);
for (double p = kInc; p < 1.0; p += kInc) {
cutoffs.push_back(InverseCDF(p));
}
if (cutoffs.back() != std::numeric_limits<double>::infinity()) {
cutoffs.push_back(std::numeric_limits<double>::infinity());
}
D dis(mean(), stddev());
std::vector<int32_t> counts(cutoffs.size(), 0);
for (int j = 0; j < kSamples; j++) {
const double x = dis(rng_);
auto it = std::upper_bound(cutoffs.begin(), cutoffs.end(), x);
counts[std::distance(cutoffs.begin(), it)]++;
}
const int dof = static_cast<int>(counts.size()) - 1;
const double threshold = absl::random_internal::ChiSquareValue(dof, 0.98);
const double expected =
static_cast<double>(kSamples) / static_cast<double>(counts.size());
double chi_square = absl::random_internal::ChiSquareWithExpected(
std::begin(counts), std::end(counts), expected);
double p = absl::random_internal::ChiSquarePValue(chi_square, dof);
if (chi_square > threshold) {
for (size_t i = 0; i < cutoffs.size(); i++) {
LOG(INFO) << i << " : (" << cutoffs[i] << ") = " << counts[i];
}
LOG(INFO) << "mean=" << mean() << " stddev=" << stddev() << "\n"
" expected " << expected << "\n"
<< kChiSquared << " " << chi_square << " (" << p << ")\n"
<< kChiSquared << " @ 0.98 = " << threshold;
}
return p;
}
TEST_P(GaussianDistributionTests, ZTest) {
const size_t kSamples = 10000;
const auto& param = GetParam();
const int expected_failures =
std::max(1, static_cast<int>(std::ceil(param.trials * param.p_fail)));
const double p = absl::random_internal::RequiredSuccessProbability(
param.p_fail, param.trials);
int failures = 0;
for (int i = 0; i < param.trials; i++) {
failures +=
SingleZTest<absl::gaussian_distribution<double>>(p, kSamples) ? 0 : 1;
}
EXPECT_LE(failures, expected_failures);
}
TEST_P(GaussianDistributionTests, ChiSquaredTest) {
const int kTrials = 20;
int failures = 0;
for (int i = 0; i < kTrials; i++) {
double p_value =
SingleChiSquaredTest<absl::gaussian_distribution<double>>();
if (p_value < 0.0025) {
failures++;
}
}
EXPECT_LE(failures, 4);
}
std::vector<Param> GenParams() {
return {
Param{0.0, 1.0, 0.01, 100},
Param{0.0, 1e2, 0.01, 100},
Param{0.0, 1e4, 0.01, 100},
Param{0.0, 1e8, 0.01, 100},
Param{0.0, 1e16, 0.01, 100},
Param{0.0, 1e-3, 0.01, 100},
Param{0.0, 1e-5, 0.01, 100},
Param{0.0, 1e-9, 0.01, 100},
Param{0.0, 1e-17, 0.01, 100},
Param{1.0, 1.0, 0.01, 100},
Param{1.0, 1e2, 0.01, 100},
Param{1.0, 1e-2, 0.01, 100},
Param{1e2, 1.0, 0.01, 100},
Param{-1e2, 1.0, 0.01, 100},
Param{1e2, 1e6, 0.01, 100},
Param{-1e2, 1e6, 0.01, 100},
Param{1e4, 1e4, 0.01, 100},
Param{1e8, 1e4, 0.01, 100},
Param{1e12, 1e4, 0.01, 100},
};
}
std::string ParamName(const ::testing::TestParamInfo<Param>& info) {
const auto& p = info.param;
std::string name = absl::StrCat("mean_", absl::SixDigits(p.mean), "__stddev_",
absl::SixDigits(p.stddev));
return absl::StrReplaceAll(name, {{"+", "_"}, {"-", "_"}, {".", "_"}});
}
INSTANTIATE_TEST_SUITE_P(All, GaussianDistributionTests,
::testing::ValuesIn(GenParams()), ParamName);
TEST(GaussianDistributionTest, StabilityTest) {
absl::random_internal::sequence_urbg urbg(
{0x0003eb76f6f7f755ull, 0xFFCEA50FDB2F953Bull, 0xC332DDEFBE6C5AA5ull,
0x6558218568AB9702ull, 0x2AEF7DAD5B6E2F84ull, 0x1521B62829076170ull,
0xECDD4775619F1510ull, 0x13CCA830EB61BD96ull, 0x0334FE1EAA0363CFull,
0xB5735C904C70A239ull, 0xD59E9E0BCBAADE14ull, 0xEECC86BC60622CA7ull});
std::vector<int> output(11);
{
absl::gaussian_distribution<double> dist;
std::generate(std::begin(output), std::end(output),
[&] { return static_cast<int>(10000000.0 * dist(urbg)); });
EXPECT_EQ(13, urbg.invocations());
EXPECT_THAT(output,
testing::ElementsAre(1494, 25518841, 9991550, 1351856,
-20373238, 3456682, 333530, -6804981,
-15279580, -16459654, 1494));
}
urbg.reset();
{
absl::gaussian_distribution<float> dist;
std::generate(std::begin(output), std::end(output),
[&] { return static_cast<int>(1000000.0f * dist(urbg)); });
EXPECT_EQ(13, urbg.invocations());
EXPECT_THAT(
output,
testing::ElementsAre(149, 2551884, 999155, 135185, -2037323, 345668,
33353, -680498, -1527958, -1645965, 149));
}
}
TEST(GaussianDistributionTest, AlgorithmBounds) {
absl::gaussian_distribution<double> dist;
const uint64_t kValues[] = {
0x1000000000000100ull, 0x2000000000000100ull, 0x3000000000000100ull,
0x4000000000000100ull, 0x5000000000000100ull, 0x6000000000000100ull,
0x9000000000000100ull, 0xa000000000000100ull, 0xb000000000000100ull,
0xc000000000000100ull, 0xd000000000000100ull, 0xe000000000000100ull};
const uint64_t kExtraValues[] = {
0x7000000000000100ull, 0x7800000000000100ull,
0x7c00000000000100ull, 0x7e00000000000100ull,
0xf000000000000100ull, 0xf800000000000100ull,
0xfc00000000000100ull, 0xfe00000000000100ull};
auto make_box = [](uint64_t v, uint64_t box) {
return (v & 0xffffffffffffff80ull) | box;
};
for (uint64_t box = 0; box < 0x7f; box++) {
for (const uint64_t v : kValues) {
absl::random_internal::sequence_urbg urbg(
{make_box(v, box), 0x0003eb76f6f7f755ull, 0x5FCEA50FDB2F953Bull});
auto a = dist(urbg);
EXPECT_EQ(1, urbg.invocations()) << box << " " << std::hex << v;
if (v & 0x8000000000000000ull) {
EXPECT_LT(a, 0.0) << box << " " << std::hex << v;
} else {
EXPECT_GT(a, 0.0) << box << " " << std::hex << v;
}
}
if (box > 10 && box < 100) {
for (const uint64_t v : kExtraValues) {
absl::random_internal::sequence_urbg urbg(
{make_box(v, box), 0x0003eb76f6f7f755ull, 0x5FCEA50FDB2F953Bull});
auto a = dist(urbg);
EXPECT_EQ(1, urbg.invocations()) << box << " " << std::hex << v;
if (v & 0x8000000000000000ull) {
EXPECT_LT(a, 0.0) << box << " " << std::hex << v;
} else {
EXPECT_GT(a, 0.0) << box << " " << std::hex << v;
}
}
}
}
auto make_fallback = [](uint64_t v) { return (v & 0xffffffffffffff80ull); };
double tail[2];
{
absl::random_internal::sequence_urbg urbg(
{make_fallback(0x7800000000000000ull), 0x13CCA830EB61BD96ull,
0x00000076f6f7f755ull});
tail[0] = dist(urbg);
EXPECT_EQ(3, urbg.invocations());
EXPECT_GT(tail[0], 0);
}
{
absl::random_internal::sequence_urbg urbg(
{make_fallback(0xf800000000000000ull), 0x13CCA830EB61BD96ull,
0x00000076f6f7f755ull});
tail[1] = dist(urbg);
EXPECT_EQ(3, urbg.invocations());
EXPECT_LT(tail[1], 0);
}
EXPECT_EQ(tail[0], -tail[1]);
EXPECT_EQ(418610, static_cast<int64_t>(tail[0] * 100000.0));
{
absl::random_internal::sequence_urbg urbg(
{make_box(0x7f00000000000000ull, 120), 0xe000000000000001ull,
0x13CCA830EB61BD96ull});
tail[0] = dist(urbg);
EXPECT_EQ(2, urbg.invocations());
EXPECT_GT(tail[0], 0);
}
{
absl::random_internal::sequence_urbg urbg(
{make_box(0xff00000000000000ull, 120), 0xe000000000000001ull,
0x13CCA830EB61BD96ull});
tail[1] = dist(urbg);
EXPECT_EQ(2, urbg.invocations());
EXPECT_LT(tail[1], 0);
}
EXPECT_EQ(tail[0], -tail[1]);
EXPECT_EQ(61948, static_cast<int64_t>(tail[0] * 100000.0));
{
absl::random_internal::sequence_urbg urbg(
{make_box(0xff00000000000000ull, 120), 0x1000000000000001,
make_box(0x1000000000000100ull, 50), 0x13CCA830EB61BD96ull});
dist(urbg);
EXPECT_EQ(3, urbg.invocations());
}
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/gaussian_distribution.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/gaussian_distribution_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
697358e5-9e87-487b-a7f6-e0a7ac0f0008 | cpp | google/tensorstore | future | tensorstore/util/future.cc | tensorstore/util/future_test.cc | #include "tensorstore/util/future.h"
#include <stddef.h>
#include <stdint.h>
#include <atomic>
#include <cassert>
#include <thread>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/const_init.h"
#include "absl/base/no_destructor.h"
#include "absl/base/optimization.h"
#include "absl/hash/hash.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "tensorstore/internal/container/intrusive_linked_list.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/metrics/counter.h"
#include "tensorstore/internal/metrics/gauge.h"
#include "tensorstore/internal/metrics/metadata.h"
#include "tensorstore/util/future_impl.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
using ::tensorstore::internal_metrics::MetricMetadata;
namespace tensorstore {
namespace internal_future {
namespace {
auto& live_futures = internal_metrics::Gauge<int64_t>::New(
"/tensorstore/futures/live", MetricMetadata("Live futures"));
auto& future_ready_callbacks = internal_metrics::Counter<int64_t>::New(
"/tensorstore/futures/ready_callbacks", MetricMetadata("Ready callbacks"));
auto& future_not_needed_callbacks = internal_metrics::Counter<int64_t>::New(
"/tensorstore/futures/not_needed_callbacks",
MetricMetadata("Not needed callbacks"));
auto& future_force_callbacks = internal_metrics::Counter<int64_t>::New(
"/tensorstore/futures/force_callbacks", MetricMetadata("Force callbacks"));
}
static CallbackListNode unregister_requested;
struct ABSL_CACHELINE_ALIGNED CacheLineAlignedMutex {
absl::Mutex mutex{absl::kConstInit};
};
constexpr size_t kNumMutexes = 64;
absl::Mutex* GetMutex(FutureStateBase* ptr) {
ABSL_CONST_INIT static CacheLineAlignedMutex mutexes[kNumMutexes];
return &mutexes[absl::HashOf(ptr) % kNumMutexes].mutex;
}
using CallbackListAccessor =
internal::intrusive_linked_list::MemberAccessor<CallbackListNode>;
namespace {
CallbackPointer MakeUnregisteredCallbackPointer(CallbackBase* callback) {
assert(callback->reference_count_.load(std::memory_order_relaxed) >= 2);
callback->next = callback->prev = callback;
callback->reference_count_.fetch_sub(1, std::memory_order_relaxed);
return CallbackPointer(callback, internal::adopt_object_ref);
}
}
CallbackPointer FutureStateBase::RegisterReadyCallback(
ReadyCallbackBase* callback) {
assert(callback->reference_count_.load(std::memory_order_relaxed) >= 2);
{
absl::MutexLock lock(GetMutex(this));
future_ready_callbacks.Increment();
if (!this->ready()) {
InsertBefore(CallbackListAccessor{}, &ready_callbacks_, callback);
return CallbackPointer(callback, internal::adopt_object_ref);
}
}
callback->OnReady();
return MakeUnregisteredCallbackPointer(callback);
}
CallbackPointer FutureStateBase::RegisterNotNeededCallback(
ResultNotNeededCallbackBase* callback) {
assert(callback->reference_count_.load(std::memory_order_relaxed) >= 2);
{
absl::MutexLock lock(GetMutex(this));
future_not_needed_callbacks.Increment();
if (result_needed()) {
InsertBefore(CallbackListAccessor{}, &promise_callbacks_, callback);
return CallbackPointer(callback, internal::adopt_object_ref);
}
}
callback->OnResultNotNeeded();
return MakeUnregisteredCallbackPointer(callback);
}
CallbackPointer FutureStateBase::RegisterForceCallback(
ForceCallbackBase* callback) {
assert(callback->reference_count_.load(std::memory_order_relaxed) >= 2);
auto* mutex = GetMutex(this);
{
absl::MutexLock lock(mutex);
future_force_callbacks.Increment();
const auto state = state_.load(std::memory_order_acquire);
if ((state & kResultLocked) != 0 || !has_future()) {
goto destroy_callback;
}
if (state & kForcing) {
goto already_forced;
}
InsertBefore(CallbackListAccessor{}, &promise_callbacks_, callback);
return CallbackPointer(callback, internal::adopt_object_ref);
}
already_forced:
callback->OnForced();
if (callback->callback_type() == CallbackBase::kLinkCallback) {
absl::MutexLock lock(mutex);
if (result_needed()) {
InsertBefore(CallbackListAccessor{}, &promise_callbacks_, callback);
return CallbackPointer(callback, internal::adopt_object_ref);
}
} else {
return MakeUnregisteredCallbackPointer(callback);
}
destroy_callback:
callback->OnUnregistered();
return MakeUnregisteredCallbackPointer(callback);
}
CallbackBase::~CallbackBase() {}
void CallbackBase::Unregister(bool block) noexcept {
auto* shared_state = this->shared_state();
auto* mutex = GetMutex(shared_state);
{
absl::MutexLock lock(mutex);
if (next == this) {
return;
}
if (next == nullptr || next == &unregister_requested) {
next = &unregister_requested;
if (!block || running_callback_thread == std::this_thread::get_id()) {
return;
}
const auto is_done = [&] { return this->next != &unregister_requested; };
mutex->Await(absl::Condition(&is_done));
return;
}
Remove(CallbackListAccessor{}, this);
next = this;
}
this->OnUnregistered();
CallbackPointerTraits::decrement(this);
}
FutureStateBase::FutureStateBase()
: state_(kInitial),
combined_reference_count_(2),
promise_reference_count_(1),
future_reference_count_(1) {
Initialize(CallbackListAccessor{}, &ready_callbacks_);
Initialize(CallbackListAccessor{}, &promise_callbacks_);
live_futures.Increment();
}
namespace {
void NoMorePromiseReferences(FutureStateBase* shared_state) {
if (shared_state->LockResult()) {
shared_state->MarkResultWrittenAndCommitResult();
} else {
shared_state->CommitResult();
}
shared_state->ReleaseCombinedReference();
}
template <typename BeforeUnregisterFunc, typename AfterUnregisterFunc>
inline void RunAndReleaseCallbacks(FutureStateBase* shared_state,
CallbackListNode* head,
BeforeUnregisterFunc before_func,
AfterUnregisterFunc after_func) {
const auto thread_id = std::this_thread::get_id();
auto* mutex = GetMutex(shared_state);
CallbackPointer prev_node;
while (true) {
CallbackListNode* next_node;
{
absl::MutexLock lock(mutex);
if (prev_node != nullptr) {
using Id = std::thread::id;
prev_node->running_callback_thread.~Id();
prev_node->next = prev_node.get();
}
next_node = head->next;
if (next_node == head) {
break;
}
Remove(CallbackListAccessor{}, next_node);
next_node->next = nullptr;
new (&next_node->running_callback_thread) std::thread::id(thread_id);
}
if (prev_node) after_func(prev_node.get());
prev_node.reset(static_cast<CallbackBase*>(next_node),
internal::adopt_object_ref);
before_func(prev_node.get());
}
if (prev_node) after_func(prev_node.get());
}
void RunReadyCallbacks(FutureStateBase* shared_state) {
RunAndReleaseCallbacks(
shared_state, &shared_state->ready_callbacks_,
[](CallbackBase* callback) {
static_cast<ReadyCallbackBase*>(callback)->OnReady();
},
[](CallbackBase* callback) {});
}
void DestroyPromiseCallbacks(FutureStateBase* shared_state) {
RunAndReleaseCallbacks(
shared_state, &shared_state->promise_callbacks_,
[](CallbackBase* callback) {
if (callback->callback_type() ==
CallbackBase::kResultNotNeededCallback) {
static_cast<ResultNotNeededCallbackBase*>(callback)
->OnResultNotNeeded();
}
},
[](CallbackBase* callback) {
if (callback->callback_type() !=
CallbackBase::kResultNotNeededCallback) {
callback->OnUnregistered();
}
});
}
void RunForceCallbacks(FutureStateBase* shared_state) {
const auto thread_id = std::this_thread::get_id();
auto* mutex = GetMutex(shared_state);
CallbackPointer prev_node;
CallbackListNode temp_head;
CallbackListNode* const head = &shared_state->promise_callbacks_;
while (true) {
CallbackListNode* next_node;
{
absl::MutexLock lock(mutex);
if (prev_node) {
using Id = std::thread::id;
if (prev_node->callback_type() == CallbackBase::kLinkCallback) {
if (prev_node->next == &unregister_requested) {
prev_node->next = prev_node.get();
mutex->Unlock();
static_cast<CallbackBase*>(prev_node.get())->OnUnregistered();
mutex->Lock();
} else {
prev_node->running_callback_thread.~Id();
InsertBefore(CallbackListAccessor{}, head, prev_node.release());
}
} else {
assert(prev_node->callback_type() == CallbackBase::kForceCallback);
prev_node->next = prev_node.get();
}
} else {
temp_head.next = head->next;
temp_head.next->prev = &temp_head;
temp_head.prev = head->prev;
temp_head.prev->next = &temp_head;
head->next = head->prev = head;
shared_state->state_.fetch_or(FutureStateBase::kForcing);
}
while (true) {
next_node = temp_head.next;
if (next_node == &temp_head) return;
Remove(CallbackListAccessor{}, next_node);
if (static_cast<CallbackBase*>(next_node)->callback_type() ==
CallbackBase::kResultNotNeededCallback) {
InsertBefore(CallbackListAccessor{}, head, next_node);
continue;
}
next_node->next = nullptr;
new (&next_node->running_callback_thread) std::thread::id(thread_id);
break;
}
}
prev_node.reset(static_cast<CallbackBase*>(next_node),
internal::adopt_object_ref);
static_cast<ForceCallbackBase*>(prev_node.get())->OnForced();
}
}
void NoMoreFutureReferences(FutureStateBase* shared_state) {
DestroyPromiseCallbacks(shared_state);
shared_state->ReleaseCombinedReference();
}
}
void FutureStateBase::Force() noexcept {
StateValue prior_state = kInitial;
if (!state_.compare_exchange_strong(prior_state, kPreparingToForce)) {
return;
}
RunForceCallbacks(this);
prior_state = state_.fetch_or(kForced);
if (prior_state & kResultLocked) {
DestroyPromiseCallbacks(this);
}
}
void FutureStateBase::ReleaseFutureReference() {
if (--future_reference_count_ == 0) {
NoMoreFutureReferences(this);
}
}
void FutureStateBase::ReleasePromiseReference() {
if (--promise_reference_count_ == 0) {
NoMorePromiseReferences(this);
}
}
void FutureStateBase::ReleaseCombinedReference() {
if (--combined_reference_count_ == 0) {
delete this;
}
}
bool FutureStateBase::AcquireFutureReference() noexcept {
auto existing = future_reference_count_.load(std::memory_order_relaxed);
while (true) {
if (existing == 0) {
if ((state_.load(std::memory_order_acquire) & kResultLocked) == 0) {
return false;
}
if (future_reference_count_.fetch_add(1, std::memory_order_acq_rel) ==
0) {
combined_reference_count_.fetch_add(1, std::memory_order_relaxed);
}
return true;
}
if (future_reference_count_.compare_exchange_weak(
existing, existing + 1, std::memory_order_acq_rel)) {
return true;
}
}
}
bool FutureStateBase::LockResult() noexcept {
const StateValue prior_state = state_.fetch_or(kResultLocked);
if (prior_state & kResultLocked) return false;
if ((prior_state & kForced) != 0 || (prior_state & kPreparingToForce) == 0) {
DestroyPromiseCallbacks(this);
} else {
}
return true;
}
void FutureStateBase::MarkResultWritten() noexcept {
const StateValue prior_state = state_.fetch_or(kResultWritten);
assert(prior_state & kResultLocked);
assert((prior_state & kResultWritten) == 0);
if (prior_state & kReady) {
RunReadyCallbacks(this);
}
}
bool FutureStateBase::CommitResult() noexcept {
const StateValue prior_state = state_.fetch_or(kReady);
if (prior_state & kReady) return false;
if (prior_state & kResultWritten) {
RunReadyCallbacks(this);
}
return true;
}
void FutureStateBase::MarkResultWrittenAndCommitResult() noexcept {
[[maybe_unused]] const StateValue prior_state =
state_.fetch_or(kResultWrittenAndReady);
assert(prior_state & kResultLocked);
assert((prior_state & kResultWritten) == 0);
RunReadyCallbacks(this);
}
bool FutureStateBase::WaitFor(absl::Duration duration) noexcept {
if (ready()) return true;
Force();
absl::Mutex* mutex = GetMutex(this);
bool is_ready = mutex->LockWhenWithTimeout(
absl::Condition(this, &FutureStateBase::ready), duration);
mutex->Unlock();
return is_ready;
}
bool FutureStateBase::WaitUntil(absl::Time deadline) noexcept {
if (ready()) return true;
Force();
absl::Mutex* mutex = GetMutex(this);
bool is_ready = mutex->LockWhenWithDeadline(
absl::Condition(this, &FutureStateBase::ready), deadline);
mutex->Unlock();
return is_ready;
}
void FutureStateBase::Wait() noexcept {
if (ready()) return;
Force();
absl::Mutex* mutex = GetMutex(this);
mutex->LockWhen(absl::Condition(this, &FutureStateBase::ready));
mutex->Unlock();
}
FutureStateBase::~FutureStateBase() {
assert(promise_callbacks_.next == &promise_callbacks_);
assert(ready_callbacks_.next == &ready_callbacks_);
live_futures.Decrement();
}
}
ReadyFuture<const void> MakeReadyFuture() {
static absl::NoDestructor<ReadyFuture<const void>> future{
MakeReadyFuture<void>(MakeResult())};
return *future;
}
Future<void> WaitAllFuture(tensorstore::span<const AnyFuture> futures) {
auto& f = futures;
switch (f.size()) {
case 0:
return MakeReadyFuture<void>(absl::OkStatus());
case 1:
return PromiseFuturePair<void>::LinkError(absl::OkStatus(), f[0]).future;
case 2:
return PromiseFuturePair<void>::LinkError(absl::OkStatus(), f[0], f[1])
.future;
case 3:
return PromiseFuturePair<void>::LinkError(absl::OkStatus(), f[0], f[1],
f[2])
.future;
case 4:
return PromiseFuturePair<void>::LinkError(absl::OkStatus(), f[0], f[1],
f[2], f[3])
.future;
case 5:
return PromiseFuturePair<void>::LinkError(absl::OkStatus(), f[0], f[1],
f[2], f[3], f[4])
.future;
case 6:
return PromiseFuturePair<void>::LinkError(absl::OkStatus(), f[0], f[1],
f[2], f[3], f[4], f[5])
.future;
case 7:
return PromiseFuturePair<void>::LinkError(absl::OkStatus(), f[0], f[1],
f[2], f[3], f[4], f[5], f[6])
.future;
default:
break;
}
auto [promise, result] = PromiseFuturePair<void>::LinkError(
absl::OkStatus(), f[0], f[1], f[2], f[3], f[4], f[5], f[6], f[7]);
f = f.subspan(8);
while (f.size() > 8) {
LinkError(promise, f[0], f[1], f[2], f[3], f[4], f[5], f[6], f[7]);
f = f.subspan(8);
}
switch (f.size()) {
case 0:
return std::move(result);
case 1:
LinkError(std::move(promise), f[0]);
return std::move(result);
case 2:
LinkError(std::move(promise), f[0], f[1]);
return std::move(result);
case 3:
LinkError(std::move(promise), f[0], f[1], f[2]);
return std::move(result);
case 4:
LinkError(std::move(promise), f[0], f[1], f[2], f[3]);
return std::move(result);
case 5:
LinkError(std::move(promise), f[0], f[1], f[2], f[3], f[4]);
return std::move(result);
case 6:
LinkError(std::move(promise), f[0], f[1], f[2], f[3], f[4], f[5]);
return std::move(result);
case 7:
LinkError(std::move(promise), f[0], f[1], f[2], f[3], f[4], f[5], f[6]);
return std::move(result);
case 8:
LinkError(std::move(promise), f[0], f[1], f[2], f[3], f[4], f[5], f[6],
f[7]);
return std::move(result);
}
ABSL_UNREACHABLE();
}
} | #include "tensorstore/util/future.h"
#include <stddef.h>
#include <atomic>
#include <chrono>
#include <functional>
#include <memory>
#include <thread>
#include <type_traits>
#include <utility>
#include <benchmark/benchmark.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/internal/metrics/registry.h"
#include "tensorstore/internal/testing/concurrent.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future_impl.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::AnyFuture;
using ::tensorstore::Future;
using ::tensorstore::FutureCallbackRegistration;
using ::tensorstore::InlineExecutor;
using ::tensorstore::IsFutureConvertible;
using ::tensorstore::MakeReadyFuture;
using ::tensorstore::MakeResult;
using ::tensorstore::MatchesStatus;
using ::tensorstore::Promise;
using ::tensorstore::PromiseFuturePair;
using ::tensorstore::ReadyFuture;
using ::tensorstore::Result;
using ::tensorstore::internal_future::FutureAccess;
using ::tensorstore::internal_testing::TestConcurrent;
static_assert(IsFutureConvertible<int, const int>);
static_assert(!IsFutureConvertible<const int, int>);
static_assert(
std::is_same_v<
decltype(FutureAccess::rep_pointer(std::declval<Future<void>&>())),
tensorstore::internal_future::FutureStatePointer&>);
static_assert(
std::is_same_v<decltype(FutureAccess::rep_pointer(
std::declval<const Future<void>&>())),
const tensorstore::internal_future::FutureStatePointer&>);
static_assert(
std::is_same_v<
decltype(FutureAccess::rep_pointer(std::declval<Future<void>&&>())),
tensorstore::internal_future::FutureStatePointer&&>);
static_assert(
std::is_same_v<
decltype(FutureAccess::rep_pointer(std::declval<Promise<void>&>())),
tensorstore::internal_future::PromiseStatePointer&>);
static_assert(
std::is_same_v<decltype(FutureAccess::rep_pointer(
std::declval<const Promise<void>&>())),
const tensorstore::internal_future::PromiseStatePointer&>);
static_assert(
std::is_same_v<
decltype(FutureAccess::rep_pointer(std::declval<Promise<void>&&>())),
tensorstore::internal_future::PromiseStatePointer&&>);
static_assert(!std::is_constructible_v<Result<int>, Result<Future<int>>>);
static_assert(!std::is_convertible_v<Result<int>, Result<Future<int>>>);
static_assert(!std::is_assignable_v<Result<int>, Result<Future<int>>>);
static_assert(std::is_same_v<
Result<Future<void>>,
tensorstore::FlatResult<std::invoke_result_t<Future<void>()>>>);
TEST(FutureTest, Valid) {
EXPECT_TRUE(Future<int>().null());
EXPECT_TRUE(Promise<int>().null());
auto pair = PromiseFuturePair<int>::Make();
EXPECT_FALSE(pair.future.null());
EXPECT_FALSE(pair.promise.null());
auto future2 = pair.promise.future();
EXPECT_FALSE(future2.null());
}
TEST(FutureTest, MakeReadyFuture) {
Future<int> future = MakeReadyFuture<int>(3);
EXPECT_EQ(true, future.ready());
EXPECT_EQ(3, future.result().value());
Result<int> result{tensorstore::in_place};
bool got_result = false;
future.ExecuteWhenReady([&](ReadyFuture<int> r) {
got_result = true;
result = r.result();
});
EXPECT_TRUE(got_result);
EXPECT_EQ(result, future.result());
}
TEST(FutureTest, MakeInPlace) {
auto pair = PromiseFuturePair<int>::Make(tensorstore::in_place, 4);
pair.promise.reset();
EXPECT_EQ(4, pair.future.value());
}
TEST(FutureTest, ConstructFromValue) {
Future<int> x = 3;
EXPECT_EQ(3, x.value());
}
TEST(FutureTest, ConstructFromValueConst) {
Future<const int> x = 3;
EXPECT_EQ(3, x.value());
}
TEST(FutureTest, FlattenResultError) {
Future<int> x = MakeResult<Future<int>>(absl::UnknownError("Error"));
EXPECT_THAT(x.result(), MatchesStatus(absl::StatusCode::kUnknown, "Error"));
}
TEST(FutureTest, FlattenResultErrorConst) {
Future<const int> x = MakeResult<Future<int>>(absl::UnknownError("Error"));
EXPECT_THAT(x.result(), MatchesStatus(absl::StatusCode::kUnknown, "Error"));
}
TEST(FutureTest, FlattenResultSuccess) {
auto pair = PromiseFuturePair<int>::Make();
Future<int> x = MakeResult(pair.future);
EXPECT_TRUE(HaveSameSharedState(pair.future, x));
}
TEST(FutureTest, FlattenResultSuccessConstConvert) {
auto pair = PromiseFuturePair<int>::Make();
Future<const int> x = MakeResult(pair.future);
EXPECT_TRUE(HaveSameSharedState(pair.future, x));
}
TEST(FutureTest, FlattenResultLvalue) {
Result<Future<int>> f1 = absl::UnknownError("");
Future<int> f2 = f1;
EXPECT_EQ(absl::UnknownError(""), GetStatus(f2.result()));
}
TEST(FutureTest, SetResult) {
{
auto pair = PromiseFuturePair<int>::Make();
EXPECT_FALSE(pair.promise.ready());
EXPECT_TRUE(pair.promise.result_needed());
EXPECT_FALSE(pair.future.ready());
Result<int> result{tensorstore::in_place};
bool got_result = false;
pair.future.ExecuteWhenReady([&](ReadyFuture<int> r) {
got_result = true;
result = r.result();
});
EXPECT_FALSE(got_result);
EXPECT_TRUE(pair.promise.SetResult(5));
EXPECT_FALSE(pair.promise.result_needed());
EXPECT_TRUE(pair.future.ready());
EXPECT_TRUE(pair.promise.ready());
EXPECT_EQ(result, 5);
}
{
auto pair = PromiseFuturePair<int>::Make();
pair.promise.SetResult(std::in_place, 6);
EXPECT_TRUE(pair.future.ready());
EXPECT_TRUE(pair.promise.ready());
EXPECT_TRUE(pair.future.result().ok());
}
{
auto pair = PromiseFuturePair<int>::Make();
pair.promise.SetResult(MakeResult(7));
EXPECT_TRUE(pair.future.ready());
EXPECT_TRUE(pair.promise.ready());
EXPECT_TRUE(pair.future.result().ok());
}
{
auto pair = PromiseFuturePair<int>::Make();
pair.promise.SetResult(absl::InternalError("error"));
EXPECT_TRUE(pair.future.ready());
EXPECT_TRUE(pair.promise.ready());
EXPECT_FALSE(pair.future.result().ok());
}
{
auto pair = PromiseFuturePair<int>::Make();
pair.promise.SetResult(MakeResult<int>(absl::InternalError("error")));
EXPECT_TRUE(pair.future.ready());
EXPECT_TRUE(pair.promise.ready());
EXPECT_FALSE(pair.future.result().ok());
}
}
TEST(FutureTest, SetResultVoid) {
{
auto pair = PromiseFuturePair<void>::Make();
EXPECT_FALSE(pair.promise.ready());
EXPECT_TRUE(pair.promise.result_needed());
EXPECT_FALSE(pair.future.ready());
EXPECT_TRUE(pair.promise.SetResult(absl::OkStatus()));
EXPECT_FALSE(pair.promise.result_needed());
EXPECT_TRUE(pair.future.ready());
EXPECT_TRUE(pair.promise.ready());
EXPECT_TRUE(pair.future.result().ok());
}
{
auto pair = PromiseFuturePair<void>::Make();
pair.promise.SetResult(std::in_place);
EXPECT_TRUE(pair.future.ready());
EXPECT_TRUE(pair.promise.ready());
EXPECT_TRUE(pair.future.result().ok());
}
{
auto pair = PromiseFuturePair<void>::Make();
pair.promise.SetResult(MakeResult<void>(absl::OkStatus()));
EXPECT_TRUE(pair.future.ready());
EXPECT_TRUE(pair.promise.ready());
EXPECT_TRUE(pair.future.result().ok());
}
{
auto pair = PromiseFuturePair<void>::Make();
pair.promise.SetResult(absl::InternalError("error"));
EXPECT_TRUE(pair.future.ready());
EXPECT_TRUE(pair.promise.ready());
EXPECT_FALSE(pair.future.result().ok());
}
{
auto pair = PromiseFuturePair<void>::Make();
pair.promise.SetResult(MakeResult<void>(absl::InternalError("error")));
EXPECT_TRUE(pair.future.ready());
EXPECT_TRUE(pair.promise.ready());
EXPECT_FALSE(pair.future.result().ok());
}
}
TEST(FutureTest, Wait) {
auto pair = PromiseFuturePair<int>::Make();
std::thread thread(
[](Promise<int> promise) {
absl::SleepFor(absl::Milliseconds(20));
EXPECT_TRUE(promise.SetResult(5));
},
std::move(pair.promise));
pair.future.Wait();
EXPECT_EQ(5, pair.future.result());
thread.join();
}
TEST(FutureTest, WaitForFailure) {
for (size_t i = 0; i < 100; ++i) {
auto pair = PromiseFuturePair<int>::Make();
EXPECT_FALSE(pair.future.WaitFor(absl::Milliseconds(10)));
std::thread thread(
[](Promise<int> promise) {
absl::SleepFor(absl::Milliseconds(20));
EXPECT_TRUE(promise.SetResult(5));
},
pair.promise);
const bool ready = pair.future.WaitFor(absl::Milliseconds(5));
thread.join();
if (!ready) {
return;
}
}
FAIL();
}
TEST(FutureTest, WaitForSuccess) {
for (size_t i = 0; i < 100; ++i) {
auto pair = PromiseFuturePair<int>::Make();
std::thread thread(
[](Promise<int> promise) {
absl::SleepFor(absl::Milliseconds(5));
EXPECT_TRUE(promise.SetResult(5));
},
pair.promise);
const bool ready1 = pair.future.WaitFor(absl::Milliseconds(20));
const bool ready2 = pair.future.WaitFor(absl::Milliseconds(10));
thread.join();
if (ready1 && ready2) {
return;
}
}
FAIL();
}
TEST(FutureTest, WaitUntilFailure) {
for (size_t i = 0; i < 100; ++i) {
auto pair = PromiseFuturePair<int>::Make();
EXPECT_FALSE(pair.future.WaitUntil(absl::Now() - absl::Milliseconds(10)));
EXPECT_FALSE(pair.future.WaitUntil(absl::Now() + absl::Milliseconds(10)));
std::thread thread(
[](Promise<int> promise) {
absl::SleepFor(absl::Milliseconds(20));
EXPECT_TRUE(promise.SetResult(5));
},
pair.promise);
const bool ready =
pair.future.WaitUntil(absl::Now() + absl::Milliseconds(5));
thread.join();
if (!ready) {
return;
}
}
FAIL();
}
TEST(FutureTest, WaitUntilSuccess) {
for (size_t i = 0; i < 100; ++i) {
auto pair = PromiseFuturePair<int>::Make();
std::thread thread(
[](Promise<int> promise) {
absl::SleepFor(absl::Milliseconds(5));
EXPECT_TRUE(promise.SetResult(5));
},
pair.promise);
const bool ready1 =
pair.future.WaitUntil(absl::Now() + absl::Milliseconds(20));
const bool ready2 =
pair.future.WaitUntil(absl::Now() + absl::Milliseconds(10));
thread.join();
if (ready1 && ready2) {
return;
}
}
FAIL();
}
TEST(FutureTest, SetResultTwice) {
auto pair = PromiseFuturePair<int>::Make();
EXPECT_TRUE(pair.promise.SetResult(3));
EXPECT_EQ(3, pair.future.result());
EXPECT_EQ(false, pair.promise.SetResult(5));
EXPECT_EQ(3, pair.future.result());
}
TEST(FutureTest, ExecuteWhenNotNeeded) {
auto pair = PromiseFuturePair<int>::Make();
bool no_future = false;
pair.promise.ExecuteWhenNotNeeded([&] { no_future = true; });
EXPECT_FALSE(no_future);
pair.future.reset();
EXPECT_FALSE(pair.promise.result_needed());
EXPECT_TRUE(no_future);
}
TEST(FutureTest, ExecuteWhenNotNeededBeforeForced) {
auto pair = PromiseFuturePair<int>::Make();
bool no_future = false;
pair.promise.ExecuteWhenNotNeeded([&] { no_future = true; });
EXPECT_FALSE(no_future);
pair.future.reset();
EXPECT_FALSE(pair.promise.result_needed());
EXPECT_TRUE(no_future);
}
TEST(FutureTest, ExecuteWhenNotNeededUnregister) {
auto pair = PromiseFuturePair<int>::Make();
bool no_future = false;
auto registration =
pair.promise.ExecuteWhenNotNeeded([&] { no_future = true; });
EXPECT_FALSE(no_future);
registration.Unregister();
pair.future.reset();
EXPECT_FALSE(no_future);
}
TEST(FutureTest, ExecuteWhenNotNeededImmediate) {
auto pair = PromiseFuturePair<int>::Make();
bool no_future = false;
pair.future.reset();
auto registration =
pair.promise.ExecuteWhenNotNeeded([&] { no_future = true; });
EXPECT_TRUE(no_future);
registration.Unregister();
}
TEST(FutureTest, ExecuteWhenReadyUnregisterTwice) {
auto pair = PromiseFuturePair<int>::Make();
bool invoked = false;
auto registration =
pair.future.ExecuteWhenReady([&](ReadyFuture<int>) { invoked = true; });
EXPECT_FALSE(invoked);
auto registration2 = registration;
registration.Unregister();
registration2.Unregister();
pair.promise.SetResult(3);
EXPECT_FALSE(invoked);
}
TEST(FutureTest, ExecuteWhenNotNeededThenForce) {
auto pair = PromiseFuturePair<int>::Make();
bool no_future = false;
auto registration =
pair.promise.ExecuteWhenNotNeeded([&] { no_future = true; });
pair.future.Force();
pair.future.reset();
EXPECT_TRUE(no_future);
registration.Unregister();
}
TEST(FutureTest, ExecuteWhenReadyUnregisterSelf) {
auto pair = PromiseFuturePair<int>::Make();
bool invoked = false;
FutureCallbackRegistration registration;
registration = pair.future.ExecuteWhenReady([&](ReadyFuture<int>) {
invoked = true;
registration();
});
pair.promise.SetResult(3);
EXPECT_TRUE(invoked);
}
TEST(FutureTest, ExecuteWhenReadyUnregisterSelfTwice) {
auto pair = PromiseFuturePair<int>::Make();
bool invoked = false;
FutureCallbackRegistration registration;
registration = pair.future.ExecuteWhenReady([&](ReadyFuture<int>) {
invoked = true;
auto registration_copy = registration;
registration();
registration_copy();
});
pair.promise.SetResult(3);
EXPECT_TRUE(invoked);
}
TEST(FutureTest, Destructor) {
auto pair = PromiseFuturePair<int>::Make();
static_cast<void>(pair);
}
TEST(FutureTest, DestructorExecuteWhenReady) {
auto pair = PromiseFuturePair<int>::Make();
pair.future.ExecuteWhenReady([&](ReadyFuture<int>) {});
}
TEST(FutureTest, ExecuteWhenReadyUnregisterOther) {
auto pair = PromiseFuturePair<int>::Make();
bool invoked = false;
FutureCallbackRegistration registration;
pair.future.ExecuteWhenReady([&](ReadyFuture<int>) { registration(); });
registration =
pair.future.ExecuteWhenReady([&](ReadyFuture<int>) { invoked = true; });
pair.promise.SetResult(3);
EXPECT_FALSE(invoked);
}
TEST(FutureTest, ExecuteWhenReadyUnregisterConcurrent) {
PromiseFuturePair<int> pair;
std::atomic<bool> unregistered;
FutureCallbackRegistration registration;
TestConcurrent(
1000,
[&] {
unregistered = false;
pair = PromiseFuturePair<int>::Make();
registration = pair.future.ExecuteWhenReady([&](ReadyFuture<int>) {
for (int i = 0; i < 100; ++i) {
EXPECT_FALSE(unregistered.load());
}
});
},
[&] { EXPECT_TRUE(unregistered.load()); },
[&] { pair.promise.SetResult(3); },
[&] {
registration.Unregister();
unregistered = true;
});
}
TEST(FutureTest, ExecuteWhenReadyUnregisterNonBlockingConcurrent) {
PromiseFuturePair<int> pair;
std::atomic<bool> callback_started, unregister_returned, callback_finished;
FutureCallbackRegistration registration;
TestConcurrent(
1,
[&] {
callback_started = false;
callback_finished = false;
unregister_returned = false;
pair = PromiseFuturePair<int>::Make();
registration = pair.future.ExecuteWhenReady([&](ReadyFuture<int>) {
callback_started = true;
while (unregister_returned == false) {
}
callback_finished = true;
});
},
[&] {
EXPECT_TRUE(callback_started);
EXPECT_TRUE(unregister_returned);
EXPECT_TRUE(callback_finished);
},
[&] { pair.promise.SetResult(3); },
[&] {
while (!callback_started) {
}
EXPECT_FALSE(callback_finished);
registration.UnregisterNonBlocking();
unregister_returned = true;
});
}
TEST(FutureTest, ExecuteWhenNotNeededUnregisterConcurrent) {
PromiseFuturePair<int> pair;
std::atomic<bool> unregistered;
FutureCallbackRegistration registration;
TestConcurrent(
1000,
[&] {
unregistered = false;
pair = PromiseFuturePair<int>::Make();
registration = pair.promise.ExecuteWhenNotNeeded([&] {
for (int i = 0; i < 100; ++i) {
EXPECT_FALSE(unregistered.load());
}
});
},
[&] { EXPECT_TRUE(unregistered.load()); },
[&] { pair.promise.SetResult(3); },
[&] {
registration.Unregister();
unregistered = true;
});
}
TEST(FutureTest, ExecuteWhenForcedUnregisterConcurrent) {
PromiseFuturePair<int> pair;
std::atomic<bool> unregistered;
FutureCallbackRegistration registration;
TestConcurrent(
1000,
[&] {
unregistered = false;
pair = PromiseFuturePair<int>::Make();
registration = pair.promise.ExecuteWhenForced([&](Promise<int>) {
for (int i = 0; i < 100; ++i) {
EXPECT_FALSE(unregistered.load());
}
});
},
[&] { EXPECT_TRUE(unregistered.load()); },
[&] { pair.future.Force(); },
[&] {
registration.Unregister();
unregistered = true;
});
}
TEST(FutureTest, SetResultInForceCallback) {
auto pair = PromiseFuturePair<int>::Make();
pair.promise.ExecuteWhenForced([](Promise<int> p) { p.SetResult(5); });
EXPECT_FALSE(pair.future.ready());
pair.future.Force();
EXPECT_EQ(true, pair.future.ready());
EXPECT_EQ(5, pair.future.result());
}
TEST(FutureTest, ForceCallbackAddedAfterForced) {
auto pair = PromiseFuturePair<int>::Make();
auto sentinel = std::make_shared<int>();
pair.future.Force();
bool callback_ran = false;
pair.promise.ExecuteWhenForced(
[sentinel, &callback_ran](Promise<int> p) { callback_ran = true; });
EXPECT_TRUE(callback_ran);
EXPECT_EQ(1, sentinel.use_count());
EXPECT_FALSE(pair.future.ready());
}
TEST(FutureTest, ForceCallbackAddedAfterForcedWithNoFuturesRemaining) {
auto pair = PromiseFuturePair<int>::Make();
auto sentinel = std::make_shared<int>();
pair.future.Force();
pair.future.reset();
bool callback_ran = false;
pair.promise.ExecuteWhenForced(
[sentinel, &callback_ran](Promise<int> p) { callback_ran = true; });
EXPECT_FALSE(callback_ran);
EXPECT_EQ(1, sentinel.use_count());
EXPECT_FALSE(pair.promise.result_needed());
}
TEST(FutureTest, ForceCallbackDestroyedAfterForce) {
auto pair = PromiseFuturePair<int>::Make();
auto sentinel = std::make_shared<int>();
pair.promise.ExecuteWhenForced(
[sentinel](Promise<int> p) { p.SetResult(5); });
EXPECT_EQ(2, sentinel.use_count());
EXPECT_FALSE(pair.future.ready());
pair.future.Force();
EXPECT_EQ(1, sentinel.use_count());
EXPECT_EQ(true, pair.future.ready());
EXPECT_EQ(5, pair.future.result());
}
TEST(FutureTest, ForceAfterReady) {
auto pair = PromiseFuturePair<int>::Make();
bool forced = false;
auto sentinel = std::make_shared<int>();
pair.promise.ExecuteWhenForced(
[&forced, sentinel](Promise<int> p) { forced = true; });
EXPECT_EQ(2, sentinel.use_count());
pair.promise.SetResult(3);
EXPECT_FALSE(forced);
EXPECT_EQ(1, sentinel.use_count());
pair.future.Force();
EXPECT_FALSE(forced);
}
TEST(FutureTest, ForceCallbacksDestroyedWhenNoFuturesRemain) {
auto pair = PromiseFuturePair<int>::Make();
bool forced = false;
auto sentinel = std::make_shared<int>();
pair.promise.ExecuteWhenForced(
[&forced, sentinel](Promise<int> p) { forced = true; });
EXPECT_EQ(2, sentinel.use_count());
pair.future.reset();
EXPECT_EQ(1, sentinel.use_count());
EXPECT_FALSE(forced);
}
struct CallOnCopy {
CallOnCopy(const CallOnCopy& x)
: call_when_copied(x.call_when_copied),
call_when_invoked(x.call_when_invoked) {
call_when_copied();
}
CallOnCopy(std::function<void()> call_when_copied,
std::function<void()> call_when_invoked)
: call_when_copied(call_when_copied),
call_when_invoked(call_when_invoked) {}
template <typename... Arg>
void operator()(Arg&&...) {
call_when_invoked();
}
std::function<void()> call_when_copied, call_when_invoked;
};
TEST(FutureTest, SetReadyCalledConcurrentlyWithExecuteWhenReady) {
bool was_called = false;
auto pair = PromiseFuturePair<int>::Make();
pair.future.ExecuteWhenReady(CallOnCopy{[&] { pair.promise.SetResult(5); },
[&] { was_called = true; }});
EXPECT_TRUE(was_called);
EXPECT_EQ(5, pair.future.result().value());
}
TEST(FutureTest, ForceCalledConcurrentlyWithExecuteWhenForced) {
bool was_called = false;
auto sentinel = std::make_shared<int>();
auto pair = PromiseFuturePair<int>::Make();
pair.promise.ExecuteWhenForced(CallOnCopy{
[&] { pair.future.Force(); }, [&, sentinel] { was_called = true; }});
EXPECT_TRUE(was_called);
EXPECT_EQ(1, sentinel.use_count());
}
TEST(FutureTest, ForceAndThenSetResultCalledConcurrentlyWithExecuteWhenForced) {
bool was_called = false;
auto sentinel = std::make_shared<int>();
auto pair = PromiseFuturePair<int>::Make();
pair.promise.ExecuteWhenForced(CallOnCopy{[&] { pair.future.Force(); },
[&, sentinel] {
was_called = true;
pair.promise.SetResult(5);
}});
EXPECT_TRUE(was_called);
EXPECT_EQ(1, sentinel.use_count());
EXPECT_EQ(5, pair.future.result().value());
}
TEST(FutureTest, LastFutureReleasedConcurrentlyWithExecuteWhenNotNeeded) {
bool was_called = false;
auto sentinel = std::make_shared<int>();
auto pair = PromiseFuturePair<int>::Make();
pair.promise.ExecuteWhenNotNeeded(CallOnCopy{
[&] { pair.future.reset(); }, [&, sentinel] { was_called = true; }});
EXPECT_TRUE(was_called);
EXPECT_EQ(1, sentinel.use_count());
}
TEST(FutureTest, LastFutureReleasedConcurrentlyWithExecuteWhenForced) {
bool was_called = false;
auto sentinel = std::make_shared<int>();
auto pair = PromiseFuturePair<int>::Make();
pair.promise.ExecuteWhenForced(CallOnCopy{
[&] { pair.future.reset(); }, [&, sentinel] { was_called = true; }});
EXPECT_FALSE(was_called);
EXPECT_EQ(1, sentinel.use_count());
}
TEST(FutureTest, SetResultCalledConcurrentlyWithExecuteWhenForced) {
bool was_called = false;
auto sentinel = std::make_shared<int>();
auto pair = PromiseFuturePair<int>::Make();
pair.promise.ExecuteWhenForced(
CallOnCopy{[&] { pair.promise.SetResult(5); },
[&, sentinel] { was_called = true; }});
EXPECT_FALSE(was_called);
EXPECT_EQ(1, sentinel.use_count());
EXPECT_EQ(5, pair.future.result().value());
}
TEST(FutureTest, PromiseBroken) {
auto pair = PromiseFuturePair<int>::Make();
pair.promise = {};
EXPECT_TRUE(pair.future.ready());
EXPECT_FALSE(pair.future.result().has_value());
EXPECT_EQ(absl::UnknownError(""), pair.future.result().status());
}
TEST(FutureTest, ConvertInt) {
auto pair = PromiseFuturePair<int>::Make();
Future<const int> f = pair.future;
Promise<const int> p = pair.promise;
}
TEST(FutureTest, ConvertVoid) {
auto pair = PromiseFuturePair<void>::Make();
Future<const void> f = pair.future;
Promise<const void> p = pair.promise;
pair.promise.SetResult(tensorstore::MakeResult());
f.value();
}
TEST(FutureTest, ConvertVoid2) {
Future<const void> f;
Promise<const void> p;
auto pair = PromiseFuturePair<void>::Make();
f = pair.future;
p = pair.promise;
pair.promise.SetResult(std::in_place);
f.value();
}
struct NonMovable {
NonMovable(int value) : value(value) {}
NonMovable(NonMovable const&) = delete;
NonMovable(NonMovable&&) = delete;
int value;
};
TEST(FutureTest, NonMovableTypeInitialize) {
auto pair = PromiseFuturePair<NonMovable>::Make(3);
pair.promise.SetReady();
EXPECT_EQ(3, pair.future.value().value);
}
TEST(FutureTest, NonMovableTypeSetReady) {
auto pair = PromiseFuturePair<NonMovable>::Make();
pair.promise.raw_result().emplace(5);
pair.promise.SetReady();
EXPECT_EQ(5, pair.future.value().value);
}
TEST(HaveSameSharedStateTest, Invalid) {
Future<int> fa, fb;
Future<const int> cf;
Promise<int> pa, pb;
Promise<int> cp;
EXPECT_TRUE(HaveSameSharedState(fa, fb));
EXPECT_TRUE(HaveSameSharedState(fa, cf));
EXPECT_TRUE(HaveSameSharedState(pa, pb));
EXPECT_TRUE(HaveSameSharedState(pa, fa));
EXPECT_TRUE(HaveSameSharedState(fa, pb));
EXPECT_TRUE(HaveSameSharedState(pa, cf));
}
TEST(HaveSameSharedStateTest, Valid) {
auto pair1 = PromiseFuturePair<void>::Make();
auto pair2 = PromiseFuturePair<void>::Make();
EXPECT_TRUE(HaveSameSharedState(pair1.future, pair1.future));
EXPECT_TRUE(HaveSameSharedState(pair1.future, pair1.promise));
EXPECT_TRUE(HaveSameSharedState(pair1.promise, pair1.future));
EXPECT_TRUE(HaveSameSharedState(pair1.promise, pair1.promise));
EXPECT_FALSE(HaveSameSharedState(pair1.promise, pair2.promise));
EXPECT_FALSE(HaveSameSharedState(pair1.promise, pair2.future));
EXPECT_FALSE(HaveSameSharedState(pair1.future, pair2.future));
EXPECT_FALSE(HaveSameSharedState(pair1.future, pair2.promise));
}
TEST(AcquireFutureReferenceTest, ExistingFutureNotReady) {
auto pair = PromiseFuturePair<void>::Make();
auto future2 = pair.promise.future();
EXPECT_TRUE(HaveSameSharedState(future2, pair.future));
}
TEST(AcquireFutureReferenceTest, ExistingFutureReady) {
auto pair = PromiseFuturePair<void>::Make();
pair.promise.SetReady();
auto future2 = pair.promise.future();
EXPECT_TRUE(HaveSameSharedState(future2, pair.future));
}
TEST(AcquireFutureReferenceTest, NoExistingFutureNotReady) {
auto pair = PromiseFuturePair<void>::Make();
pair.future.reset();
auto future2 = pair.promise.future();
EXPECT_FALSE(!future2.null());
}
TEST(AcquireFutureReferenceTest, NoExistingFutureReady) {
auto pair = PromiseFuturePair<void>::Make();
pair.future.reset();
pair.promise.SetReady();
auto future2 = pair.promise.future();
EXPECT_TRUE(HaveSameSharedState(future2, pair.promise));
}
TEST(LinkTest, MultipleSimple) {
auto a_pair = PromiseFuturePair<int>::Make();
auto b_pair = PromiseFuturePair<int>::Make();
auto c_pair = PromiseFuturePair<int>::Make();
EXPECT_FALSE(a_pair.future.ready());
EXPECT_FALSE(b_pair.future.ready());
EXPECT_FALSE(c_pair.future.ready());
Link(
[](Promise<int> c, ReadyFuture<int> a, ReadyFuture<int> b) {
c.SetResult(a.result().value() + b.result().value());
},
c_pair.promise, a_pair.future, b_pair.future);
a_pair.promise.SetResult(5);
EXPECT_FALSE(b_pair.future.ready());
EXPECT_FALSE(c_pair.future.ready());
b_pair.promise.SetResult(3);
ASSERT_TRUE(c_pair.future.ready());
EXPECT_EQ(8, c_pair.future.result().value());
}
TEST(LinkTest, EmptyCallback) {
auto a_pair = PromiseFuturePair<int>::Make();
auto b_pair = PromiseFuturePair<int>::Make();
struct Callback {
void operator()(Promise<int> b, ReadyFuture<int> a) const {
b.SetResult(a.result().value());
}
};
Link(Callback{}, b_pair.promise, a_pair.future);
EXPECT_FALSE(a_pair.future.ready());
EXPECT_FALSE(b_pair.future.ready());
a_pair.promise.SetResult(5);
ASSERT_TRUE(b_pair.future.ready());
EXPECT_EQ(5, b_pair.future.result().value());
}
TEST(LinkValueTest, MultipleSuccessError) {
auto a_pair = PromiseFuturePair<int>::Make();
auto b_pair = PromiseFuturePair<int>::Make();
auto c_pair = PromiseFuturePair<int>::Make();
LinkValue(
[](Promise<int> c, ReadyFuture<int> a, ReadyFuture<int> b) {
c.SetResult(a.result().value() + b.result().value());
},
c_pair.promise, a_pair.future, b_pair.future);
a_pair.promise.SetResult(5);
EXPECT_FALSE(c_pair.future.ready());
b_pair.promise.SetResult(absl::InvalidArgumentError("Test error"));
ASSERT_TRUE(c_pair.future.ready());
EXPECT_THAT(c_pair.future.result().status(),
MatchesStatus(absl::StatusCode::kInvalidArgument, "Test error"));
}
TEST(LinkValueTest, MultipleErrorSuccess) {
auto a_pair = PromiseFuturePair<int>::Make();
auto b_pair = PromiseFuturePair<int>::Make();
auto c_pair = PromiseFuturePair<int>::Make();
LinkValue(
[](Promise<int> c, ReadyFuture<int> a, ReadyFuture<int> b) {
c.SetResult(a.result().value() + b.result().value());
},
c_pair.promise, a_pair.future, b_pair.future);
b_pair.promise.SetResult(absl::InvalidArgumentError("Test error"));
ASSERT_TRUE(c_pair.future.ready());
EXPECT_THAT(c_pair.future.result().status(),
MatchesStatus(absl::StatusCode::kInvalidArgument, "Test error"));
}
TEST(LinkErrorTest, ImmediateSuccess) {
auto pair = PromiseFuturePair<int>::Make(3);
LinkError(pair.promise, MakeReadyFuture<int>(1));
EXPECT_FALSE(pair.future.ready());
pair.promise.reset();
ASSERT_TRUE(pair.future.ready());
EXPECT_EQ(3, pair.future.value());
}
TEST(LinkErrorTest, ImmediateFailure) {
auto pair = PromiseFuturePair<int>::Make(3);
LinkError(pair.promise, MakeReadyFuture<int>(absl::UnknownError("Msg")));
pair.promise.reset();
EXPECT_EQ(absl::UnknownError("Msg"), pair.future.result().status());
}
TEST(LinkErrorTest, DelayedSuccess) {
auto pair1 = PromiseFuturePair<int>::Make(3);
auto pair2 = PromiseFuturePair<void>::Make();
LinkError(pair1.promise, pair2.future);
pair1.promise.reset();
EXPECT_FALSE(pair1.future.ready());
pair2.promise.SetResult(tensorstore::MakeResult());
ASSERT_TRUE(pair1.future.ready());
EXPECT_EQ(3, pair1.future.value());
}
TEST(LinkErrorTest, DelayedFailure) {
auto pair1 = PromiseFuturePair<int>::Make(3);
auto pair2 = PromiseFuturePair<void>::Make();
LinkError(pair1.promise, pair2.future);
EXPECT_FALSE(pair1.future.ready());
pair2.promise.SetResult(absl::UnknownError("Msg"));
ASSERT_TRUE(pair1.future.ready());
EXPECT_EQ(absl::UnknownError("Msg"), pair1.future.result().status());
}
TEST(LinkTest, SetReadyInForce) {
auto pair1 = PromiseFuturePair<int>::Make();
pair1.promise.ExecuteWhenForced([](Promise<int> self) { self.SetResult(5); });
auto pair2 = PromiseFuturePair<int>::Make();
Link([](Promise<int> p,
ReadyFuture<int> f) { p.SetResult(f.result().value() + 2); },
pair2.promise, pair1.future);
EXPECT_FALSE(pair1.future.ready());
EXPECT_FALSE(pair2.future.ready());
EXPECT_EQ(7, pair2.future.result().value());
}
TEST(LinkTest, LinkAfterForceCalledWhereFutureBecomesReadyWhenForced) {
auto pair1 = PromiseFuturePair<int>::Make();
auto pair2 = PromiseFuturePair<int>::Make();
pair2.promise.ExecuteWhenForced([](Promise<int> self) { self.SetResult(5); });
pair1.future.Force();
EXPECT_FALSE(pair1.future.ready());
EXPECT_FALSE(pair2.future.ready());
Link([](Promise<int> p,
ReadyFuture<int> f1) { p.SetResult(f1.result().value() + 2); },
pair1.promise, pair2.future);
EXPECT_TRUE(pair1.future.ready());
EXPECT_TRUE(pair2.future.ready());
EXPECT_EQ(7, pair1.future.result().value());
}
TEST(LinkTest, LinkAfterForceCalledWhereFutureDoesNotBecomeReadyWhenForced) {
auto pair1 = PromiseFuturePair<int>::Make();
auto pair2 = PromiseFuturePair<int>::Make();
pair1.future.Force();
EXPECT_FALSE(pair1.future.ready());
EXPECT_FALSE(pair2.future.ready());
Link([](Promise<int> p,
ReadyFuture<int> f1) { p.SetResult(f1.result().value() + 2); },
pair1.promise, pair2.future);
EXPECT_FALSE(pair1.future.ready());
EXPECT_FALSE(pair2.future.ready());
pair2.promise.SetResult(5);
EXPECT_TRUE(pair1.future.ready());
EXPECT_TRUE(pair2.future.ready());
EXPECT_EQ(7, pair1.future.result().value());
}
TEST(LinkTest, Unregister) {
auto pair1 = PromiseFuturePair<int>::Make();
pair1.promise.ExecuteWhenForced([](Promise<int> p) { p.SetResult(5); });
auto pair2 = PromiseFuturePair<int>::Make();
auto sentinel = std::make_shared<int>();
auto registration = Link(
[sentinel](Promise<int> p, ReadyFuture<int> f) {
p.SetResult(f.result().value() + 2);
},
pair2.promise, pair1.future);
EXPECT_FALSE(pair1.future.ready());
EXPECT_FALSE(pair2.future.ready());
EXPECT_EQ(2, sentinel.use_count());
registration();
EXPECT_EQ(1, sentinel.use_count());
pair1.future.Force();
EXPECT_FALSE(pair2.future.ready());
}
TEST(LinkTest, AlreadyReady) {
auto future1 = MakeReadyFuture<int>(5);
auto pair2 = PromiseFuturePair<int>::Make();
Link([](Promise<int> p,
ReadyFuture<int> f) { p.SetResult(f.result().value() + 2); },
pair2.promise, future1);
EXPECT_TRUE(pair2.future.ready());
EXPECT_EQ(7, pair2.future.result().value());
}
TEST(LinkTest, NotNeeded) {
auto pair1 = PromiseFuturePair<int>::Make();
auto pair2 = PromiseFuturePair<int>::Make();
pair2.future.reset();
EXPECT_FALSE(pair2.promise.result_needed());
auto sentinel = std::make_shared<int>();
auto registration = Link(
[sentinel](Promise<int> p, ReadyFuture<int> f) {
p.SetResult(f.result().value() + 2);
},
pair2.promise, pair1.future);
EXPECT_EQ(1, sentinel.use_count());
EXPECT_FALSE(pair1.future.ready());
EXPECT_FALSE(pair2.promise.ready());
}
TEST(LinkTest, ConcurrentSetReady) {
PromiseFuturePair<int> pair1, pair2, pair3;
TestConcurrent(
1000,
[&] {
pair1 = PromiseFuturePair<int>::Make();
pair2 = PromiseFuturePair<int>::Make();
pair3 = PromiseFuturePair<int>::Make();
Link([](Promise<int> p1, ReadyFuture<int> f2,
ReadyFuture<int> f3) { p1.SetResult(f2.value() + f3.value()); },
pair1.promise, pair2.future, pair3.future);
},
[&] {
ASSERT_TRUE(pair1.future.ready());
EXPECT_EQ(pair1.future.value(), 7);
},
[&] { pair2.promise.SetResult(5); },
[&] { pair3.promise.SetResult(2); });
}
TEST(LinkTest, ConcurrentLinkAndSetReady) {
PromiseFuturePair<int> pair1, pair2, pair3;
TestConcurrent(
1000,
[&] {
pair1 = PromiseFuturePair<int>::Make();
pair2 = PromiseFuturePair<int>::Make();
pair3 = PromiseFuturePair<int>::Make();
},
[&] {
ASSERT_TRUE(pair1.future.ready());
EXPECT_EQ(pair1.future.value(), 7);
},
[&] {
Link([](Promise<int> p1, ReadyFuture<int> f2,
ReadyFuture<int> f3) { p1.SetResult(f2.value() + f3.value()); },
pair1.promise, pair2.future, pair3.future);
},
[&] { pair2.promise.SetResult(5); },
[&] { pair3.promise.SetResult(2); });
}
TEST(LinkTest, ConcurrentUnregister) {
PromiseFuturePair<int> pair1, pair2;
FutureCallbackRegistration registration;
std::atomic<bool> unregistered;
TestConcurrent(
1000,
[&] {
unregistered = false;
pair1 = PromiseFuturePair<int>::Make(1);
pair2 = PromiseFuturePair<int>::Make();
registration = Link(
[&](Promise<int> p1, ReadyFuture<int> f2) {
for (int i = 0; i < 100; ++i) {
EXPECT_FALSE(unregistered.load());
}
},
pair1.promise, pair2.future);
},
[&] { EXPECT_TRUE(unregistered.load()); },
[&] { pair2.promise.SetResult(2); },
[&] {
registration.Unregister();
unregistered = true;
});
}
TEST(LinkTest, ConcurrentForceAndSetReady) {
PromiseFuturePair<int> pair1, pair2, pair3;
TestConcurrent(
1000,
[&] {
pair1 = PromiseFuturePair<int>::Make(1);
pair2 = PromiseFuturePair<int>::Make();
pair3 = PromiseFuturePair<int>::Make();
LinkResult(pair2.promise, pair1.future);
LinkResult(pair3.promise, pair2.future);
},
[&] {},
[&] { pair1.promise.SetResult(2); },
[&] { pair3.future.Force(); });
}
TEST(LinkTest, NoFutures) {
auto pair = PromiseFuturePair<int>::Make();
Link([](Promise<int> promise) { promise.SetResult(5); }, pair.promise);
ASSERT_TRUE(pair.future.ready());
ASSERT_TRUE(pair.future.result());
EXPECT_EQ(5, pair.future.value());
}
TEST(LinkTest, NoCallback) {
auto [promise, future] = PromiseFuturePair<int>::Make();
promise.ExecuteWhenForced([](Promise<int> promise) { promise.SetResult(5); });
{
auto [linked_promise, linked_future] = PromiseFuturePair<int>::Make();
auto link = LinkResult(linked_promise, future);
EXPECT_FALSE(linked_future.ready());
link.Unregister();
linked_future.Force();
EXPECT_FALSE(linked_future.ready());
EXPECT_FALSE(future.ready());
}
{
auto [linked_promise, linked_future] = PromiseFuturePair<int>::Make();
auto link = LinkResult(linked_promise, future);
EXPECT_FALSE(linked_future.ready());
linked_future.Force();
ASSERT_TRUE(linked_future.ready());
ASSERT_TRUE(future.ready());
EXPECT_THAT(linked_future.result(), ::testing::Optional(5));
}
{
auto [linked_promise, linked_future] = PromiseFuturePair<int>::Make();
auto link = LinkResult(linked_promise, future);
ASSERT_TRUE(linked_future.ready());
EXPECT_THAT(linked_future.result(), ::testing::Optional(5));
}
}
TEST(LinkErrorTest, ConcurrentForceAndSetReady) {
PromiseFuturePair<void> pairA, pairB;
TestConcurrent(
1000,
[&] {
pairA = PromiseFuturePair<void>::Make(tensorstore::MakeResult());
pairB = PromiseFuturePair<void>::Make(tensorstore::MakeResult());
LinkError(pairA.promise, pairB.future);
},
[&] {
EXPECT_TRUE(pairB.future.ready());
EXPECT_TRUE(pairB.future.result());
EXPECT_FALSE(pairA.future.ready());
},
[&] { pairA.future.Force(); },
[&] { pairB.promise.SetReady(); });
}
TEST(LinkErrorTest, ConcurrentSetError) {
PromiseFuturePair<void> pairA, pairB, pairC;
TestConcurrent(
1000,
[&] {
pairA = PromiseFuturePair<void>::Make(tensorstore::MakeResult());
pairB = PromiseFuturePair<void>::Make(tensorstore::MakeResult());
pairC = PromiseFuturePair<void>::Make(tensorstore::MakeResult());
LinkError(pairA.promise, pairB.future, pairC.future);
},
[&] {
EXPECT_TRUE(pairA.future.ready());
EXPECT_TRUE(pairB.future.ready());
EXPECT_TRUE(pairC.future.ready());
EXPECT_FALSE(pairA.future.result());
EXPECT_FALSE(pairB.future.result());
EXPECT_FALSE(pairC.future.result());
},
[&] { pairB.promise.SetResult(absl::UnknownError("")); },
[&] { pairC.promise.SetResult(absl::UnknownError("")); });
}
TEST(LinkErrorTest, ConcurrentForceAndSetError) {
PromiseFuturePair<void> pairA, pairB;
TestConcurrent(
1000,
[&] {
pairA = PromiseFuturePair<void>::Make(tensorstore::MakeResult());
pairB = PromiseFuturePair<void>::Make(tensorstore::MakeResult());
LinkError(pairA.promise, pairB.future);
},
[&] {
EXPECT_TRUE(pairB.future.ready());
EXPECT_TRUE(pairA.future.ready());
EXPECT_FALSE(pairB.future.result());
EXPECT_FALSE(pairA.future.result());
},
[&] { pairA.future.Force(); },
[&] { pairB.promise.SetResult(absl::UnknownError("")); });
}
TEST(LinkErrorTest, LinkErrorVoidImmediateSuccessFailure) {
auto [promise, future] = PromiseFuturePair<void>::Make();
LinkError(std::move(promise), MakeReadyFuture<void>(absl::OkStatus()),
MakeReadyFuture<void>(absl::OkStatus()));
ASSERT_TRUE(future.ready());
EXPECT_FALSE(future.result().ok());
}
TEST(LinkErrorTest, LinkErrorVoidImmediateSuccessOk) {
auto [promise, future] = PromiseFuturePair<void>::Make(absl::OkStatus());
LinkError(std::move(promise), MakeReadyFuture<void>(absl::OkStatus()),
MakeReadyFuture<void>(absl::OkStatus()));
ASSERT_TRUE(future.ready());
EXPECT_TRUE(future.result().ok());
}
TEST(PromiseFuturePairTest, LinkImmediateSuccess) {
auto future = PromiseFuturePair<int>::Link(
[](Promise<int> p, ReadyFuture<int> f) {
p.SetResult(f.value() + 1);
},
MakeReadyFuture<int>(1))
.future;
EXPECT_EQ(2, future.value());
}
TEST(PromiseFuturePairTest, LinkImmediateFailure) {
auto future =
PromiseFuturePair<int>::Link(
[](Promise<int> p, ReadyFuture<int> f) { p.SetResult(f.result()); },
MakeReadyFuture<int>(absl::UnknownError("Fail")))
.future;
EXPECT_THAT(future.result(),
MatchesStatus(absl::StatusCode::kUnknown, "Fail"));
}
TEST(PromiseFuturePairTest, LinkDeferredSuccess) {
auto pair = PromiseFuturePair<int>::Make();
auto future = PromiseFuturePair<int>::Link(
[](Promise<int> p, ReadyFuture<int> f) {
p.SetResult(f.value() + 1);
},
pair.future)
.future;
EXPECT_FALSE(future.ready());
pair.promise.SetResult(1);
EXPECT_EQ(2, future.value());
}
TEST(PromiseFuturePairTest, LinkDeferredFailure) {
auto pair = PromiseFuturePair<int>::Make();
auto future =
PromiseFuturePair<int>::Link(
[](Promise<int> p, ReadyFuture<int> f) { p.SetResult(f.result()); },
pair.future)
.future;
EXPECT_FALSE(future.ready());
pair.promise.SetResult(absl::UnknownError("Fail"));
EXPECT_THAT(future.result(),
MatchesStatus(absl::StatusCode::kUnknown, "Fail"));
}
TEST(PromiseFuturePairTest, LinkResultInit) {
auto pair = PromiseFuturePair<int>::Make();
auto future = PromiseFuturePair<int>::Link(
5, [](Promise<int> p, ReadyFuture<int> f) {}, pair.future)
.future;
EXPECT_FALSE(future.ready());
pair.promise.SetResult(3);
EXPECT_EQ(5, future.value());
}
TEST(PromiseFuturePairTest, LinkValueImmediateSuccess) {
auto future = PromiseFuturePair<int>::LinkValue(
[](Promise<int> p, ReadyFuture<int> f) {
p.SetResult(f.value() + 1);
},
MakeReadyFuture<int>(1))
.future;
EXPECT_EQ(2, future.value());
}
TEST(PromiseFuturePairTest, LinkValueImmediateFailure) {
auto future = PromiseFuturePair<int>::LinkValue(
[](Promise<int> p, ReadyFuture<int> f) {},
MakeReadyFuture<int>(absl::UnknownError("Fail")))
.future;
EXPECT_THAT(future.result(),
MatchesStatus(absl::StatusCode::kUnknown, "Fail"));
}
TEST(PromiseFuturePairTest, LinkValueDeferredSuccess) {
auto pair = PromiseFuturePair<int>::Make();
auto future = PromiseFuturePair<int>::LinkValue(
[](Promise<int> p, ReadyFuture<int> f) {
p.SetResult(f.value() + 1);
},
pair.future)
.future;
EXPECT_FALSE(future.ready());
pair.promise.SetResult(1);
EXPECT_EQ(2, future.value());
}
TEST(PromiseFuturePairTest, LinkValueDeferredFailure) {
auto pair = PromiseFuturePair<int>::Make();
auto future = PromiseFuturePair<int>::LinkValue(
[](Promise<int> p, ReadyFuture<int> f) {}, pair.future)
.future;
EXPECT_FALSE(future.ready());
pair.promise.SetResult(absl::UnknownError("Fail"));
EXPECT_THAT(future.result(),
MatchesStatus(absl::StatusCode::kUnknown, "Fail"));
}
TEST(PromiseFuturePairTest, LinkValueResultInit) {
auto pair = PromiseFuturePair<int>::Make();
auto future = PromiseFuturePair<int>::LinkValue(
5, [](Promise<int> p, ReadyFuture<int> f) {}, pair.future)
.future;
EXPECT_FALSE(future.ready());
pair.promise.SetResult(3);
EXPECT_EQ(5, future.value());
}
TEST(PromiseFuturePairTest, LinkErrorImmediateSuccess) {
auto future =
PromiseFuturePair<int>::LinkError(3, MakeReadyFuture<int>(1)).future;
EXPECT_EQ(3, future.value());
}
TEST(PromiseFuturePairTest, LinkErrorImmediateFailure) {
auto future = PromiseFuturePair<int>::LinkError(
3, MakeReadyFuture<int>(1),
MakeReadyFuture<int>(absl::UnknownError("Fail")))
.future;
EXPECT_THAT(future.result(),
MatchesStatus(absl::StatusCode::kUnknown, "Fail"));
}
TEST(PromiseFuturePairTest, LinkErrorDeferredSuccess) {
auto pair = PromiseFuturePair<int>::Make();
auto future = PromiseFuturePair<int>::LinkError(3, pair.future).future;
EXPECT_FALSE(future.ready());
pair.promise.SetResult(5);
EXPECT_EQ(3, future.value());
}
TEST(PromiseFuturePairTest, LinkErrorDeferredFailure) {
auto pair = PromiseFuturePair<int>::Make();
auto future =
PromiseFuturePair<int>::LinkError(3, MakeReadyFuture<int>(1), pair.future)
.future;
EXPECT_FALSE(pair.future.ready());
pair.promise.SetResult(absl::UnknownError("Fail"));
EXPECT_THAT(future.result(),
MatchesStatus(absl::StatusCode::kUnknown, "Fail"));
}
TEST(LinkTest, DestroyCallback) {
auto pair1 = PromiseFuturePair<int>::Make();
auto sentinel = std::make_shared<bool>(false);
auto pair2 = PromiseFuturePair<void>::Make();
auto registration =
Link([sentinel](Promise<void>, ReadyFuture<int>) { *sentinel = true; },
pair2.promise, pair1.future);
EXPECT_EQ(2, sentinel.use_count());
pair1.promise.SetResult(1);
EXPECT_EQ(true, *sentinel);
EXPECT_EQ(1, sentinel.use_count());
}
TEST(PromiseFuturePairTest, LinkDestroyCallback) {
auto pair1 = PromiseFuturePair<int>::Make();
auto sentinel = std::make_shared<bool>(false);
auto pair2 = PromiseFuturePair<void>::Link(
[sentinel](Promise<void>, ReadyFuture<int>) { *sentinel = true; },
pair1.future);
EXPECT_EQ(2, sentinel.use_count());
pair1.promise.SetResult(1);
EXPECT_EQ(true, *sentinel);
EXPECT_EQ(1, sentinel.use_count());
}
TEST(WaitAllFuture, NoFuturesSpan) {
std::vector<AnyFuture> futures;
auto future = WaitAllFuture(futures);
ASSERT_TRUE(future.ready());
ASSERT_TRUE(future.result().ok());
}
TEST(WaitAllFuture, ReadyFuture) {
auto future = WaitAllFuture(MakeReadyFuture<void>(absl::OkStatus()),
MakeReadyFuture<void>(absl::OkStatus()));
ASSERT_TRUE(future.ready());
EXPECT_TRUE(future.result().ok());
future = WaitAllFuture(MakeReadyFuture<void>(absl::OkStatus()),
MakeReadyFuture<void>(absl::OkStatus()),
MakeReadyFuture<void>(absl::InternalError("")));
ASSERT_TRUE(future.ready());
ASSERT_FALSE(future.result().ok());
}
TEST(WaitAllFuture, ReadyFutureSpanError) {
std::vector<AnyFuture> futures{MakeReadyFuture<void>(absl::OkStatus()),
MakeReadyFuture<void>(absl::OkStatus())};
auto future = WaitAllFuture(futures);
ASSERT_TRUE(future.ready());
EXPECT_TRUE(future.result().ok());
futures.push_back(MakeReadyFuture<void>(absl::InternalError("")));
future = WaitAllFuture(futures);
ASSERT_TRUE(future.ready());
ASSERT_FALSE(future.result().ok());
}
TEST(WaitAllFuture, ReadyFutureSpan) {
std::vector<AnyFuture> futures;
for (int i = 0; i < 17; i++) {
auto future = WaitAllFuture(futures);
ASSERT_TRUE(future.ready());
EXPECT_TRUE(future.result().ok());
futures.emplace_back(MakeReadyFuture<void>(absl::OkStatus()));
}
}
TEST(WaitAllFuture, NonVoidFuture) {
auto a = PromiseFuturePair<int>::Make();
auto b = PromiseFuturePair<int>::Make();
auto future = WaitAllFuture(a.future, b.future);
ASSERT_FALSE(future.ready());
a.promise.SetResult(2);
ASSERT_FALSE(future.ready());
b.promise.SetResult(absl::InternalError(""));
ASSERT_TRUE(future.ready());
EXPECT_FALSE(future.result().ok());
}
TEST(MapFutureTest, NoFutures) {
auto future = MapFuture(InlineExecutor{}, [] { return 3; });
ASSERT_TRUE(future.ready());
ASSERT_TRUE(future.result());
EXPECT_EQ(3, future.value());
}
TEST(MapFutureTest, BothReady) {
auto a = MakeReadyFuture<int>(3);
auto b = MakeReadyFuture<int>(5);
auto c = MapFuture(
InlineExecutor{},
[](Result<int> a, Result<int> b) -> Result<int> {
return MapResult(std::plus<int>{}, a, b);
},
a, b);
EXPECT_EQ(8, c.result().value());
}
TEST(MapFutureTest, NonConstOperator) {
struct MyStruct {
Result<int> operator()() { return 2; }
};
Future<int> x = MapFuture(InlineExecutor{}, MyStruct{});
EXPECT_EQ(2, x.result().value());
}
TEST(MapFutureTest, LValueReference) {
auto a = MakeReadyFuture<int>(3);
EXPECT_EQ(3, a.value());
auto b = MapFuture(
InlineExecutor{},
[&](Result<int>& value) {
value = 10;
return 7;
},
a);
EXPECT_EQ(7, b.value());
EXPECT_EQ(10, a.value());
}
TEST(MapFutureTest, ReturnFuture) {
Future<int> a = 5;
auto f = MapFuture(
InlineExecutor{},
[](Result<int> a) -> Future<int> { return a.value() + 3; }, a);
EXPECT_THAT(f.result(), ::testing::Optional(8));
}
TEST(MapFutureValueTest, BothReady) {
auto a = MakeReadyFuture<int>(3);
auto b = MakeReadyFuture<int>(5);
auto c = MapFutureValue(InlineExecutor{}, std::plus<int>{}, a, b);
EXPECT_EQ(8, c.result().value());
}
TEST(MapFutureValueTest, LValueReference) {
auto a = MakeReadyFuture<int>(3);
EXPECT_EQ(3, a.value());
auto b = MapFutureValue(
InlineExecutor{},
[&](int& value) {
value = 10;
return 7;
},
a);
EXPECT_EQ(7, b.value());
EXPECT_EQ(10, a.value());
}
TEST(MapFutureValueTest, ValueToError) {
auto a = MakeReadyFuture<int>(3);
auto b = MapFutureValue(
InlineExecutor{},
[](int x) -> Result<int> {
return absl::UnknownError(tensorstore::StrCat("Got value: ", x));
},
a);
EXPECT_THAT(b.result(),
MatchesStatus(absl::StatusCode::kUnknown, "Got value: 3"));
}
TEST(MapFutureValueTest, ReturnFuture) {
Future<int> a = 5;
auto f = MapFutureValue(
InlineExecutor{}, [](int a) -> Future<int> { return a + 3; }, a);
EXPECT_THAT(f.result(), ::testing::Optional(8));
}
TEST(MapFutureErrorTest, Success) {
auto pair = PromiseFuturePair<int>::Make();
auto mapped = MapFutureError(
InlineExecutor{}, [](absl::Status status) { return 5; }, pair.future);
EXPECT_FALSE(mapped.ready());
pair.promise.SetResult(7);
EXPECT_EQ(7, mapped.result());
}
TEST(MapFutureErrorTest, ErrorMappedToSuccess) {
auto pair = PromiseFuturePair<int>::Make();
auto mapped = MapFutureError(
InlineExecutor{},
[](absl::Status status) {
EXPECT_EQ(absl::UnknownError("message"), status);
return 5;
},
pair.future);
EXPECT_FALSE(mapped.ready());
pair.promise.SetResult(absl::UnknownError("message"));
EXPECT_EQ(5, mapped.result());
}
TEST(MapFutureErrorTest, ErrorMappedToError) {
auto pair = PromiseFuturePair<int>::Make();
auto mapped = MapFutureError(
InlineExecutor{},
[](absl::Status status) {
return tensorstore::MaybeAnnotateStatus(status, "Mapped");
},
pair.future);
EXPECT_FALSE(mapped.ready());
pair.promise.SetResult(absl::UnknownError("message"));
EXPECT_THAT(mapped.result(),
MatchesStatus(absl::StatusCode::kUnknown, "Mapped: message"));
}
TEST(MakeReadyFutureTest, Basic) {
auto future = MakeReadyFuture();
static_assert(std::is_same_v<ReadyFuture<const void>, decltype(future)>);
EXPECT_TRUE(future.ready());
EXPECT_EQ(MakeResult(), future.result());
}
TEST(FutureTest, SetDeferredResult) {
auto [promise, future] = PromiseFuturePair<int>::Make();
SetDeferredResult(promise, 2);
EXPECT_FALSE(future.ready());
SetDeferredResult(promise, 3);
EXPECT_FALSE(future.ready());
promise = Promise<int>();
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(), ::testing::Optional(2));
}
TEST(FutureTest, SetDeferredResultAfterReady) {
auto [promise, future] = PromiseFuturePair<int>::Make();
promise.SetResult(1);
ASSERT_TRUE(future.ready());
SetDeferredResult(promise, 2);
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(), ::testing::Optional(1));
}
TEST(FutureTest, SetDeferredResultSetReady) {
auto [promise, future] = PromiseFuturePair<int>::Make();
int value = 1;
future.ExecuteWhenReady(
[&](ReadyFuture<int> r) { value *= (r.result().ok()) ? 2 : 3; });
SetDeferredResult(promise, absl::InternalError("1"));
SetDeferredResult(promise, absl::InternalError("2"));
promise.SetReady();
future.Wait();
EXPECT_EQ(3, value);
}
TEST(FutureTest, ReturnIfError) {
auto do_test = [] {
TENSORSTORE_RETURN_IF_ERROR(MakeReadyFuture<int>(42).result(), false);
return true;
};
EXPECT_EQ(true, do_test());
}
TEST(FutureTest, UntypedExecuteWhenReadyAlreadyDone) {
Future<int> f(3);
bool ran = false;
f.UntypedExecuteWhenReady([&](AnyFuture f) { ran = true; });
EXPECT_TRUE(ran);
}
TEST(FutureTest, UntypedExecuteWhenReadyNotAlreadyDone) {
auto [promise, future] = PromiseFuturePair<int>::Make();
bool ran = false;
future.UntypedExecuteWhenReady([&](AnyFuture f) { ran = true; });
EXPECT_FALSE(ran);
promise.SetResult(5);
EXPECT_TRUE(ran);
}
TEST(FutureTest, FutureResultFuture) {
auto [promise, future] = PromiseFuturePair<int>::Make();
Result<Future<int>> rf(future);
Future<int> f(rf);
promise.SetResult(5);
EXPECT_EQ(5, f.value());
}
TEST(FutureTest, Live) {
#ifdef TENSORSTORE_METRICS_DISABLED
GTEST_SKIP() << "metrics disabled";
#endif
auto& registry = tensorstore::internal_metrics::GetMetricRegistry();
EXPECT_EQ(
0, std::get<int64_t>(
registry.Collect("/tensorstore/futures/live")->values[0].value));
{
auto [promise, future] = PromiseFuturePair<int>::Make();
EXPECT_NE(
0, std::get<int64_t>(
registry.Collect("/tensorstore/futures/live")->values[0].value));
}
EXPECT_EQ(
0, std::get<int64_t>(
registry.Collect("/tensorstore/futures/live")->values[0].value));
}
static void BM_Future_ExecuteWhenReady(benchmark::State& state) {
int num_callbacks = state.range(0);
for (auto _ : state) {
auto pair = PromiseFuturePair<int>::Make();
for (int i = 0; i < num_callbacks; i++) {
pair.future.ExecuteWhenReady(
[](ReadyFuture<int> a) { benchmark::DoNotOptimize(a.value()); });
}
pair.promise.SetResult(1);
pair.future.Wait();
}
}
BENCHMARK(BM_Future_ExecuteWhenReady)->Range(0, 256);
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/future.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/future_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
411065d3-9fec-4494-900d-a7c9d8538492 | cpp | tensorflow/tensorflow | bitcast | tensorflow/c/kernels/ops/bitcast.cc | tensorflow/lite/kernels/bitcast_test.cc | #include <sstream>
#include <string>
#include "tensorflow/c/ops.h"
#include "tensorflow/c/tf_datatype.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/core/framework/registration/registration.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
static void ComputeNewShape(TF_ShapeInferenceContext* ctx,
TF_ShapeHandle* shape, TF_DataType input_type,
TF_DataType output_type, TF_Status* status) {
size_t input_type_size = TF_DataTypeSize(input_type);
size_t output_type_size = TF_DataTypeSize(output_type);
if (input_type_size == 0 || output_type_size == 0) {
std::ostringstream err;
err << "Cannot bitcast type " << input_type << " to " << output_type
<< " because one of the type sizes is zero";
TF_SetStatus(status, TF_INVALID_ARGUMENT, err.str().c_str());
return;
}
TF_SetStatus(status, TF_OK, "");
if (input_type_size < output_type_size) {
TF_ShapeInferenceContextWithRankAtLeast(ctx, shape, 1, shape, status);
if (TF_GetCode(status) == TF_OK) {
TF_DimensionHandle* last_dim = TF_NewDimensionHandle();
size_t divisor_val = output_type_size / input_type_size;
TF_ShapeInferenceContextDim(ctx, shape, -1, last_dim);
if (!TF_DimensionHandleValueKnown(last_dim) ||
TF_DimensionHandleValue(last_dim) == divisor_val) {
TF_ShapeInferenceContextSubshape(ctx, shape, 0, -1, shape, status);
} else {
std::ostringstream err;
err << "Cannot bitcast from " << input_type << " to " << output_type
<< " due to shape. " << TF_DimensionHandleValue(last_dim)
<< " does not match " << divisor_val;
TF_SetStatus(status, TF_INVALID_ARGUMENT, err.str().c_str());
}
TF_DeleteDimensionHandle(last_dim);
}
} else if (input_type_size > output_type_size) {
size_t divisor_val = input_type_size / output_type_size;
TF_ShapeHandle* extension =
TF_ShapeInferenceContextVectorFromSize(ctx, divisor_val);
TF_ShapeInferenceContextConcatenateShapes(ctx, shape, extension, shape,
status);
TF_DeleteShapeHandle(extension);
}
}
static void bitcast_shape_inference_fn(TF_ShapeInferenceContext* ctx,
TF_Status* status) {
TF_ShapeHandle* result = TF_NewShapeHandle();
TF_ShapeInferenceContextGetInput(ctx, 0, result, status);
if (TF_GetCode(status) == TF_OK &&
!TF_ShapeInferenceContextRankKnown(ctx, result)) {
TF_ShapeInferenceContextSetUnknownShape(ctx, status);
TF_DeleteShapeHandle(result);
return;
}
TF_DataType input_type;
TF_DataType output_type;
if (TF_GetCode(status) == TF_OK) {
TF_ShapeInferenceContext_GetAttrType(ctx, "T", &input_type, status);
}
if (TF_GetCode(status) == TF_OK) {
TF_ShapeInferenceContext_GetAttrType(ctx, "type", &output_type, status);
}
if (TF_GetCode(status) == TF_OK) {
ComputeNewShape(ctx, result, input_type, output_type, status);
}
if (TF_GetCode(status) == TF_OK) {
TF_ShapeInferenceContextSetOutput(ctx, 0, result, status);
}
TF_DeleteShapeHandle(result);
}
void RegisterBitcastOp() {
TF_Status* status = TF_NewStatus();
TF_OpDefinitionBuilder* op_builder = TF_NewOpDefinitionBuilder("Bitcast");
TF_OpDefinitionBuilderAddInput(op_builder, "input: T");
TF_OpDefinitionBuilderAddOutput(op_builder, "output: type");
TF_OpDefinitionBuilderAddAttr(
op_builder,
"T: {bfloat16, half, float, double, int64, int32, uint8, uint16, "
"uint32, uint64, int8, int16, complex64, complex128, qint8, quint8, "
"qint16, quint16, qint32}");
TF_OpDefinitionBuilderAddAttr(
op_builder,
"type: {bfloat16, half, float, double, int64, int32, uint8, uint16, "
"uint32, uint64, int8, int16, complex64, complex128, qint8, quint8, "
"qint16, quint16, qint32}");
TF_OpDefinitionBuilderSetShapeInferenceFunction(op_builder,
&bitcast_shape_inference_fn);
TF_RegisterOpDefinition(op_builder, status);
CHECK_EQ(TF_GetCode(status), TF_OK)
<< "Bitcast op registration failed: " << TF_Message(status);
TF_DeleteStatus(status);
}
TF_ATTRIBUTE_UNUSED static bool IsBitcastOpRegistered = []() {
if ((&TF_NewStatus != nullptr) && SHOULD_REGISTER_OP("Bitcast")) {
RegisterBitcastOp();
}
return true;
}(); | #include <algorithm>
#include <cstdint>
#include <iterator>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
template <
typename Dest, typename Source,
typename std::enable_if<sizeof(Dest) == sizeof(Source) &&
std::is_trivially_copyable<Source>::value &&
std::is_trivially_copyable<Dest>::value &&
std::is_default_constructible<Dest>::value,
int>::type = 0>
inline Dest bit_cast(const Source& source) {
Dest dest;
memcpy(static_cast<void*>(std::addressof(dest)),
static_cast<const void*>(std::addressof(source)), sizeof(dest));
return dest;
}
class BitcastOpModel : public SingleOpModel {
public:
BitcastOpModel(const TensorData& input, const TensorData& output) {
input_ = AddInput(input);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_BITCAST, BuiltinOptions_BitcastOptions,
CreateBitcastOptions(builder_).Union());
BuildInterpreter({GetShape(input_)});
}
int input() const { return input_; }
int output() const { return output_; }
protected:
int input_;
int output_;
};
TEST(BitcastOpModel, BitcastInt32ToUint32) {
BitcastOpModel m({TensorType_INT32, {2, 3}}, {TensorType_UINT32, {2, 3}});
std::vector<int32_t> input = {INT32_MIN, -100, -1, 0, 100, INT32_MAX};
m.PopulateTensor<int32_t>(m.input(), input);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<uint32_t> output;
std::transform(input.cbegin(), input.cend(), std::back_inserter(output),
[](int32_t a) { return bit_cast<std::uint32_t>(a); });
EXPECT_THAT(m.ExtractVector<uint32_t>(m.output()), ElementsAreArray(output));
}
TEST(BitcastOpModel, BitcastUInt32ToInt32Inplace) {
BitcastOpModel m({TensorType_UINT32, {2, 3}}, {TensorType_INT32, {2, 3}});
std::vector<uint32_t> input = {0,
1,
100,
bit_cast<uint32_t>(INT32_MAX),
bit_cast<uint32_t>(INT32_MIN),
UINT32_MAX};
m.PopulateTensor<uint32_t>(m.input(), input);
const int kInplaceTensorIdx = 0;
const TfLiteTensor* input_tensor = m.GetInputTensor(kInplaceTensorIdx);
TfLiteTensor* output_tensor = m.GetOutputTensor(kInplaceTensorIdx);
output_tensor->data.data = input_tensor->data.data;
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int32_t> output;
std::transform(input.cbegin(), input.cend(), std::back_inserter(output),
[](uint32_t a) { return bit_cast<std::uint32_t>(a); });
EXPECT_THAT(m.ExtractVector<int32_t>(m.output()), ElementsAreArray(output));
EXPECT_EQ(output_tensor->data.data, input_tensor->data.data);
}
TEST(BitcastOpModel, BitcastUInt32ToInt32) {
BitcastOpModel m({TensorType_UINT32, {2, 3}}, {TensorType_INT32, {2, 3}});
std::vector<uint32_t> input = {0,
1,
100,
bit_cast<uint32_t>(INT32_MAX),
bit_cast<uint32_t>(INT32_MIN),
UINT32_MAX};
m.PopulateTensor<uint32_t>(m.input(), input);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int32_t> output;
std::transform(input.cbegin(), input.cend(), std::back_inserter(output),
[](uint32_t a) { return bit_cast<std::uint32_t>(a); });
EXPECT_THAT(m.ExtractVector<int32_t>(m.output()), ElementsAreArray(output));
}
TEST(BitcastOpModel, BitcastUInt32Toint16) {
BitcastOpModel m({TensorType_UINT32, {2, 1}}, {TensorType_INT16, {2, 1, 2}});
std::vector<uint32_t> input = {(uint32_t)UINT16_MAX + 1,
(uint32_t)UINT16_MAX};
m.PopulateTensor<uint32_t>(m.input(), input);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
std::vector<int16_t> output = {1, 0, 0, -1};
#else
std::vector<int16_t> output = {0, 1, -1, 0};
#endif
EXPECT_THAT(m.ExtractVector<int16_t>(m.output()), ElementsAreArray(output));
}
TEST(BitcastOpModel, BitcastInt16ToUint32) {
BitcastOpModel m({TensorType_INT16, {2, 1, 2}}, {TensorType_UINT32, {2, 1}});
#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
std::vector<int16_t> input = {1, 0, 0, -1};
#else
std::vector<int16_t> input = {0, 1, -1, 0};
#endif
m.PopulateTensor<int16_t>(m.input(), input);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<uint32_t> output = {(uint32_t)UINT16_MAX + 1,
(uint32_t)UINT16_MAX};
EXPECT_THAT(m.ExtractVector<uint32_t>(m.output()), ElementsAreArray(output));
}
TEST(BitcastOpModel, BitcastInt16ToUint32WrongShape) {
#if GTEST_HAS_DEATH_TEST
EXPECT_DEATH(BitcastOpModel m({TensorType_INT16, {2, 2, 7}},
{TensorType_UINT32, {2, 7}}),
"7 != 2");
#endif
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/kernels/ops/bitcast.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/bitcast_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
079bd92d-f5c1-4c05-b597-b2414a6c6e8c | cpp | google/googletest | gmock-nice-strict | googlemock/include/gmock/gmock-nice-strict.h | googlemock/test/gmock-nice-strict_test.cc | #ifndef GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_NICE_STRICT_H_
#define GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_NICE_STRICT_H_
#include <cstdint>
#include <type_traits>
#include "gmock/gmock-spec-builders.h"
#include "gmock/internal/gmock-port.h"
namespace testing {
template <class MockClass>
class NiceMock;
template <class MockClass>
class NaggyMock;
template <class MockClass>
class StrictMock;
namespace internal {
template <typename T>
std::true_type StrictnessModifierProbe(const NiceMock<T>&);
template <typename T>
std::true_type StrictnessModifierProbe(const NaggyMock<T>&);
template <typename T>
std::true_type StrictnessModifierProbe(const StrictMock<T>&);
std::false_type StrictnessModifierProbe(...);
template <typename T>
constexpr bool HasStrictnessModifier() {
return decltype(StrictnessModifierProbe(std::declval<const T&>()))::value;
}
#if defined(GTEST_OS_WINDOWS) && !defined(GTEST_OS_WINDOWS_MINGW) && \
(defined(_MSC_VER) || defined(__clang__))
#define GTEST_INTERNAL_EMPTY_BASE_CLASS __declspec(empty_bases)
#else
#define GTEST_INTERNAL_EMPTY_BASE_CLASS
#endif
template <typename Base>
class NiceMockImpl {
public:
NiceMockImpl() {
::testing::Mock::AllowUninterestingCalls(reinterpret_cast<uintptr_t>(this));
}
~NiceMockImpl() {
::testing::Mock::UnregisterCallReaction(reinterpret_cast<uintptr_t>(this));
}
};
template <typename Base>
class NaggyMockImpl {
public:
NaggyMockImpl() {
::testing::Mock::WarnUninterestingCalls(reinterpret_cast<uintptr_t>(this));
}
~NaggyMockImpl() {
::testing::Mock::UnregisterCallReaction(reinterpret_cast<uintptr_t>(this));
}
};
template <typename Base>
class StrictMockImpl {
public:
StrictMockImpl() {
::testing::Mock::FailUninterestingCalls(reinterpret_cast<uintptr_t>(this));
}
~StrictMockImpl() {
::testing::Mock::UnregisterCallReaction(reinterpret_cast<uintptr_t>(this));
}
};
}
template <class MockClass>
class GTEST_INTERNAL_EMPTY_BASE_CLASS NiceMock
: private internal::NiceMockImpl<MockClass>,
public MockClass {
public:
static_assert(!internal::HasStrictnessModifier<MockClass>(),
"Can't apply NiceMock to a class hierarchy that already has a "
"strictness modifier. See "
"https:
"gmock_cook_book.html#NiceStrictNaggy");
NiceMock() : MockClass() {
static_assert(sizeof(*this) == sizeof(MockClass),
"The impl subclass shouldn't introduce any padding");
}
template <typename A>
explicit NiceMock(A&& arg) : MockClass(std::forward<A>(arg)) {
static_assert(sizeof(*this) == sizeof(MockClass),
"The impl subclass shouldn't introduce any padding");
}
template <typename TArg1, typename TArg2, typename... An>
NiceMock(TArg1&& arg1, TArg2&& arg2, An&&... args)
: MockClass(std::forward<TArg1>(arg1), std::forward<TArg2>(arg2),
std::forward<An>(args)...) {
static_assert(sizeof(*this) == sizeof(MockClass),
"The impl subclass shouldn't introduce any padding");
}
private:
NiceMock(const NiceMock&) = delete;
NiceMock& operator=(const NiceMock&) = delete;
};
template <class MockClass>
class GTEST_INTERNAL_EMPTY_BASE_CLASS NaggyMock
: private internal::NaggyMockImpl<MockClass>,
public MockClass {
static_assert(!internal::HasStrictnessModifier<MockClass>(),
"Can't apply NaggyMock to a class hierarchy that already has a "
"strictness modifier. See "
"https:
"gmock_cook_book.html#NiceStrictNaggy");
public:
NaggyMock() : MockClass() {
static_assert(sizeof(*this) == sizeof(MockClass),
"The impl subclass shouldn't introduce any padding");
}
template <typename A>
explicit NaggyMock(A&& arg) : MockClass(std::forward<A>(arg)) {
static_assert(sizeof(*this) == sizeof(MockClass),
"The impl subclass shouldn't introduce any padding");
}
template <typename TArg1, typename TArg2, typename... An>
NaggyMock(TArg1&& arg1, TArg2&& arg2, An&&... args)
: MockClass(std::forward<TArg1>(arg1), std::forward<TArg2>(arg2),
std::forward<An>(args)...) {
static_assert(sizeof(*this) == sizeof(MockClass),
"The impl subclass shouldn't introduce any padding");
}
private:
NaggyMock(const NaggyMock&) = delete;
NaggyMock& operator=(const NaggyMock&) = delete;
};
template <class MockClass>
class GTEST_INTERNAL_EMPTY_BASE_CLASS StrictMock
: private internal::StrictMockImpl<MockClass>,
public MockClass {
public:
static_assert(
!internal::HasStrictnessModifier<MockClass>(),
"Can't apply StrictMock to a class hierarchy that already has a "
"strictness modifier. See "
"https:
"gmock_cook_book.html#NiceStrictNaggy");
StrictMock() : MockClass() {
static_assert(sizeof(*this) == sizeof(MockClass),
"The impl subclass shouldn't introduce any padding");
}
template <typename A>
explicit StrictMock(A&& arg) : MockClass(std::forward<A>(arg)) {
static_assert(sizeof(*this) == sizeof(MockClass),
"The impl subclass shouldn't introduce any padding");
}
template <typename TArg1, typename TArg2, typename... An>
StrictMock(TArg1&& arg1, TArg2&& arg2, An&&... args)
: MockClass(std::forward<TArg1>(arg1), std::forward<TArg2>(arg2),
std::forward<An>(args)...) {
static_assert(sizeof(*this) == sizeof(MockClass),
"The impl subclass shouldn't introduce any padding");
}
private:
StrictMock(const StrictMock&) = delete;
StrictMock& operator=(const StrictMock&) = delete;
};
#undef GTEST_INTERNAL_EMPTY_BASE_CLASS
}
#endif | #include "gmock/gmock-nice-strict.h"
#include <string>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest-spi.h"
#include "gtest/gtest.h"
class Mock {
public:
Mock() = default;
MOCK_METHOD0(DoThis, void());
private:
Mock(const Mock&) = delete;
Mock& operator=(const Mock&) = delete;
};
namespace testing {
namespace gmock_nice_strict_test {
using testing::HasSubstr;
using testing::NaggyMock;
using testing::NiceMock;
using testing::StrictMock;
#if GTEST_HAS_STREAM_REDIRECTION
using testing::internal::CaptureStdout;
using testing::internal::GetCapturedStdout;
#endif
class NotDefaultConstructible {
public:
explicit NotDefaultConstructible(int) {}
};
class CallsMockMethodInDestructor {
public:
~CallsMockMethodInDestructor() { OnDestroy(); }
MOCK_METHOD(void, OnDestroy, ());
};
class Foo {
public:
virtual ~Foo() = default;
virtual void DoThis() = 0;
virtual int DoThat(bool flag) = 0;
};
class MockFoo : public Foo {
public:
MockFoo() = default;
void Delete() { delete this; }
MOCK_METHOD0(DoThis, void());
MOCK_METHOD1(DoThat, int(bool flag));
MOCK_METHOD0(ReturnNonDefaultConstructible, NotDefaultConstructible());
private:
MockFoo(const MockFoo&) = delete;
MockFoo& operator=(const MockFoo&) = delete;
};
class MockBar {
public:
explicit MockBar(const std::string& s) : str_(s) {}
MockBar(char a1, char a2, std::string a3, std::string a4, int a5, int a6,
const std::string& a7, const std::string& a8, bool a9, bool a10) {
str_ = std::string() + a1 + a2 + a3 + a4 + static_cast<char>(a5) +
static_cast<char>(a6) + a7 + a8 + (a9 ? 'T' : 'F') +
(a10 ? 'T' : 'F');
}
virtual ~MockBar() = default;
const std::string& str() const { return str_; }
MOCK_METHOD0(This, int());
MOCK_METHOD2(That, std::string(int, bool));
private:
std::string str_;
MockBar(const MockBar&) = delete;
MockBar& operator=(const MockBar&) = delete;
};
class MockBaz {
public:
class MoveOnly {
public:
MoveOnly() = default;
MoveOnly(const MoveOnly&) = delete;
MoveOnly& operator=(const MoveOnly&) = delete;
MoveOnly(MoveOnly&&) = default;
MoveOnly& operator=(MoveOnly&&) = default;
};
MockBaz(MoveOnly) {}
};
#if GTEST_HAS_STREAM_REDIRECTION
TEST(RawMockTest, WarningForUninterestingCall) {
const std::string saved_flag = GMOCK_FLAG_GET(verbose);
GMOCK_FLAG_SET(verbose, "warning");
MockFoo raw_foo;
CaptureStdout();
raw_foo.DoThis();
raw_foo.DoThat(true);
EXPECT_THAT(GetCapturedStdout(),
HasSubstr("Uninteresting mock function call"));
GMOCK_FLAG_SET(verbose, saved_flag);
}
TEST(RawMockTest, WarningForUninterestingCallAfterDeath) {
const std::string saved_flag = GMOCK_FLAG_GET(verbose);
GMOCK_FLAG_SET(verbose, "warning");
MockFoo* const raw_foo = new MockFoo;
ON_CALL(*raw_foo, DoThis()).WillByDefault(Invoke(raw_foo, &MockFoo::Delete));
CaptureStdout();
raw_foo->DoThis();
EXPECT_THAT(GetCapturedStdout(),
HasSubstr("Uninteresting mock function call"));
GMOCK_FLAG_SET(verbose, saved_flag);
}
TEST(RawMockTest, InfoForUninterestingCall) {
MockFoo raw_foo;
const std::string saved_flag = GMOCK_FLAG_GET(verbose);
GMOCK_FLAG_SET(verbose, "info");
CaptureStdout();
raw_foo.DoThis();
EXPECT_THAT(GetCapturedStdout(),
HasSubstr("Uninteresting mock function call"));
GMOCK_FLAG_SET(verbose, saved_flag);
}
TEST(RawMockTest, IsNaggy_IsNice_IsStrict) {
MockFoo raw_foo;
EXPECT_TRUE(Mock::IsNaggy(&raw_foo));
EXPECT_FALSE(Mock::IsNice(&raw_foo));
EXPECT_FALSE(Mock::IsStrict(&raw_foo));
}
TEST(NiceMockTest, NoWarningForUninterestingCall) {
NiceMock<MockFoo> nice_foo;
CaptureStdout();
nice_foo.DoThis();
nice_foo.DoThat(true);
EXPECT_EQ("", GetCapturedStdout());
}
TEST(NiceMockTest, NoWarningForUninterestingCallAfterDeath) {
NiceMock<MockFoo>* const nice_foo = new NiceMock<MockFoo>;
ON_CALL(*nice_foo, DoThis())
.WillByDefault(Invoke(nice_foo, &MockFoo::Delete));
CaptureStdout();
nice_foo->DoThis();
EXPECT_EQ("", GetCapturedStdout());
}
TEST(NiceMockTest, InfoForUninterestingCall) {
NiceMock<MockFoo> nice_foo;
const std::string saved_flag = GMOCK_FLAG_GET(verbose);
GMOCK_FLAG_SET(verbose, "info");
CaptureStdout();
nice_foo.DoThis();
EXPECT_THAT(GetCapturedStdout(),
HasSubstr("Uninteresting mock function call"));
GMOCK_FLAG_SET(verbose, saved_flag);
}
#endif
TEST(NiceMockTest, AllowsExpectedCall) {
NiceMock<MockFoo> nice_foo;
EXPECT_CALL(nice_foo, DoThis());
nice_foo.DoThis();
}
TEST(NiceMockTest, ThrowsExceptionForUnknownReturnTypes) {
NiceMock<MockFoo> nice_foo;
#if GTEST_HAS_EXCEPTIONS
try {
nice_foo.ReturnNonDefaultConstructible();
FAIL();
} catch (const std::runtime_error& ex) {
EXPECT_THAT(ex.what(), HasSubstr("ReturnNonDefaultConstructible"));
}
#else
EXPECT_DEATH_IF_SUPPORTED({ nice_foo.ReturnNonDefaultConstructible(); }, "");
#endif
}
TEST(NiceMockTest, UnexpectedCallFails) {
NiceMock<MockFoo> nice_foo;
EXPECT_CALL(nice_foo, DoThis()).Times(0);
EXPECT_NONFATAL_FAILURE(nice_foo.DoThis(), "called more times than expected");
}
TEST(NiceMockTest, NonDefaultConstructor) {
NiceMock<MockBar> nice_bar("hi");
EXPECT_EQ("hi", nice_bar.str());
nice_bar.This();
nice_bar.That(5, true);
}
TEST(NiceMockTest, NonDefaultConstructor10) {
NiceMock<MockBar> nice_bar('a', 'b', "c", "d", 'e', 'f', "g", "h", true,
false);
EXPECT_EQ("abcdefghTF", nice_bar.str());
nice_bar.This();
nice_bar.That(5, true);
}
TEST(NiceMockTest, AllowLeak) {
NiceMock<MockFoo>* leaked = new NiceMock<MockFoo>;
Mock::AllowLeak(leaked);
EXPECT_CALL(*leaked, DoThis());
leaked->DoThis();
}
TEST(NiceMockTest, MoveOnlyConstructor) {
NiceMock<MockBaz> nice_baz(MockBaz::MoveOnly{});
}
TEST(NiceMockTest, AcceptsClassNamedMock) {
NiceMock< ::Mock> nice;
EXPECT_CALL(nice, DoThis());
nice.DoThis();
}
TEST(NiceMockTest, IsNiceInDestructor) {
{
NiceMock<CallsMockMethodInDestructor> nice_on_destroy;
}
}
TEST(NiceMockTest, IsNaggy_IsNice_IsStrict) {
NiceMock<MockFoo> nice_foo;
EXPECT_FALSE(Mock::IsNaggy(&nice_foo));
EXPECT_TRUE(Mock::IsNice(&nice_foo));
EXPECT_FALSE(Mock::IsStrict(&nice_foo));
}
#if GTEST_HAS_STREAM_REDIRECTION
TEST(NaggyMockTest, WarningForUninterestingCall) {
const std::string saved_flag = GMOCK_FLAG_GET(verbose);
GMOCK_FLAG_SET(verbose, "warning");
NaggyMock<MockFoo> naggy_foo;
CaptureStdout();
naggy_foo.DoThis();
naggy_foo.DoThat(true);
EXPECT_THAT(GetCapturedStdout(),
HasSubstr("Uninteresting mock function call"));
GMOCK_FLAG_SET(verbose, saved_flag);
}
TEST(NaggyMockTest, WarningForUninterestingCallAfterDeath) {
const std::string saved_flag = GMOCK_FLAG_GET(verbose);
GMOCK_FLAG_SET(verbose, "warning");
NaggyMock<MockFoo>* const naggy_foo = new NaggyMock<MockFoo>;
ON_CALL(*naggy_foo, DoThis())
.WillByDefault(Invoke(naggy_foo, &MockFoo::Delete));
CaptureStdout();
naggy_foo->DoThis();
EXPECT_THAT(GetCapturedStdout(),
HasSubstr("Uninteresting mock function call"));
GMOCK_FLAG_SET(verbose, saved_flag);
}
#endif
TEST(NaggyMockTest, AllowsExpectedCall) {
NaggyMock<MockFoo> naggy_foo;
EXPECT_CALL(naggy_foo, DoThis());
naggy_foo.DoThis();
}
TEST(NaggyMockTest, UnexpectedCallFails) {
NaggyMock<MockFoo> naggy_foo;
EXPECT_CALL(naggy_foo, DoThis()).Times(0);
EXPECT_NONFATAL_FAILURE(naggy_foo.DoThis(),
"called more times than expected");
}
TEST(NaggyMockTest, NonDefaultConstructor) {
NaggyMock<MockBar> naggy_bar("hi");
EXPECT_EQ("hi", naggy_bar.str());
naggy_bar.This();
naggy_bar.That(5, true);
}
TEST(NaggyMockTest, NonDefaultConstructor10) {
NaggyMock<MockBar> naggy_bar('0', '1', "2", "3", '4', '5', "6", "7", true,
false);
EXPECT_EQ("01234567TF", naggy_bar.str());
naggy_bar.This();
naggy_bar.That(5, true);
}
TEST(NaggyMockTest, AllowLeak) {
NaggyMock<MockFoo>* leaked = new NaggyMock<MockFoo>;
Mock::AllowLeak(leaked);
EXPECT_CALL(*leaked, DoThis());
leaked->DoThis();
}
TEST(NaggyMockTest, MoveOnlyConstructor) {
NaggyMock<MockBaz> naggy_baz(MockBaz::MoveOnly{});
}
TEST(NaggyMockTest, AcceptsClassNamedMock) {
NaggyMock< ::Mock> naggy;
EXPECT_CALL(naggy, DoThis());
naggy.DoThis();
}
TEST(NaggyMockTest, IsNaggyInDestructor) {
const std::string saved_flag = GMOCK_FLAG_GET(verbose);
GMOCK_FLAG_SET(verbose, "warning");
CaptureStdout();
{
NaggyMock<CallsMockMethodInDestructor> naggy_on_destroy;
}
EXPECT_THAT(GetCapturedStdout(),
HasSubstr("Uninteresting mock function call"));
GMOCK_FLAG_SET(verbose, saved_flag);
}
TEST(NaggyMockTest, IsNaggy_IsNice_IsStrict) {
NaggyMock<MockFoo> naggy_foo;
EXPECT_TRUE(Mock::IsNaggy(&naggy_foo));
EXPECT_FALSE(Mock::IsNice(&naggy_foo));
EXPECT_FALSE(Mock::IsStrict(&naggy_foo));
}
TEST(StrictMockTest, AllowsExpectedCall) {
StrictMock<MockFoo> strict_foo;
EXPECT_CALL(strict_foo, DoThis());
strict_foo.DoThis();
}
TEST(StrictMockTest, UnexpectedCallFails) {
StrictMock<MockFoo> strict_foo;
EXPECT_CALL(strict_foo, DoThis()).Times(0);
EXPECT_NONFATAL_FAILURE(strict_foo.DoThis(),
"called more times than expected");
}
TEST(StrictMockTest, UninterestingCallFails) {
StrictMock<MockFoo> strict_foo;
EXPECT_NONFATAL_FAILURE(strict_foo.DoThis(),
"Uninteresting mock function call");
}
TEST(StrictMockTest, UninterestingCallFailsAfterDeath) {
StrictMock<MockFoo>* const strict_foo = new StrictMock<MockFoo>;
ON_CALL(*strict_foo, DoThis())
.WillByDefault(Invoke(strict_foo, &MockFoo::Delete));
EXPECT_NONFATAL_FAILURE(strict_foo->DoThis(),
"Uninteresting mock function call");
}
TEST(StrictMockTest, NonDefaultConstructor) {
StrictMock<MockBar> strict_bar("hi");
EXPECT_EQ("hi", strict_bar.str());
EXPECT_NONFATAL_FAILURE(strict_bar.That(5, true),
"Uninteresting mock function call");
}
TEST(StrictMockTest, NonDefaultConstructor10) {
StrictMock<MockBar> strict_bar('a', 'b', "c", "d", 'e', 'f', "g", "h", true,
false);
EXPECT_EQ("abcdefghTF", strict_bar.str());
EXPECT_NONFATAL_FAILURE(strict_bar.That(5, true),
"Uninteresting mock function call");
}
TEST(StrictMockTest, AllowLeak) {
StrictMock<MockFoo>* leaked = new StrictMock<MockFoo>;
Mock::AllowLeak(leaked);
EXPECT_CALL(*leaked, DoThis());
leaked->DoThis();
}
TEST(StrictMockTest, MoveOnlyConstructor) {
StrictMock<MockBaz> strict_baz(MockBaz::MoveOnly{});
}
TEST(StrictMockTest, AcceptsClassNamedMock) {
StrictMock< ::Mock> strict;
EXPECT_CALL(strict, DoThis());
strict.DoThis();
}
TEST(StrictMockTest, IsStrictInDestructor) {
EXPECT_NONFATAL_FAILURE(
{
StrictMock<CallsMockMethodInDestructor> strict_on_destroy;
},
"Uninteresting mock function call");
}
TEST(StrictMockTest, IsNaggy_IsNice_IsStrict) {
StrictMock<MockFoo> strict_foo;
EXPECT_FALSE(Mock::IsNaggy(&strict_foo));
EXPECT_FALSE(Mock::IsNice(&strict_foo));
EXPECT_TRUE(Mock::IsStrict(&strict_foo));
}
}
} | https://github.com/google/googletest/blob/a1e255a582377e1006bb88a408ac3f933ba7c916/googlemock/include/gmock/gmock-nice-strict.h | https://github.com/google/googletest/blob/a1e255a582377e1006bb88a408ac3f933ba7c916/googlemock/test/gmock-nice-strict_test.cc | a1e255a582377e1006bb88a408ac3f933ba7c916 |
58056da0-f00e-4ef5-be82-e5fd1ed26ade | cpp | google/tsl | subprocess | tsl/platform/windows/subprocess.cc | tsl/platform/subprocess_test.cc | #include "tsl/platform/subprocess.h"
#include <io.h>
#include <signal.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <windows.h>
#include <vector>
#include "tsl/platform/logging.h"
#include "tsl/platform/strcat.h"
#define PIPE_BUF_SIZE 4096
namespace tsl {
namespace {
static bool IsProcessFinished(HANDLE h) {
DWORD process_return_code = STILL_ACTIVE;
GetExitCodeProcess(h, &process_return_code);
return process_return_code != STILL_ACTIVE;
}
struct ThreadData {
string* iobuf;
HANDLE iohandle;
};
DWORD WINAPI InputThreadFunction(LPVOID param) {
ThreadData* args = reinterpret_cast<ThreadData*>(param);
string* input = args->iobuf;
HANDLE in_handle = args->iohandle;
size_t buffer_pointer = 0;
size_t total_bytes_written = 0;
bool ok = true;
while (ok && total_bytes_written < input->size()) {
DWORD bytes_written_this_time;
ok = WriteFile(in_handle, input->data() + total_bytes_written,
input->size() - total_bytes_written,
&bytes_written_this_time, nullptr);
total_bytes_written += bytes_written_this_time;
}
CloseHandle(in_handle);
if (!ok) {
return GetLastError();
} else {
return 0;
}
}
DWORD WINAPI OutputThreadFunction(LPVOID param) {
ThreadData* args = reinterpret_cast<ThreadData*>(param);
string* output = args->iobuf;
HANDLE out_handle = args->iohandle;
char buf[PIPE_BUF_SIZE];
DWORD bytes_read;
bool wait_result = WaitForSingleObject(out_handle, INFINITE);
if (wait_result != WAIT_OBJECT_0) {
LOG(FATAL) << "WaitForSingleObject on child process output failed. "
"Error code: "
<< wait_result;
}
while (ReadFile(out_handle, buf, sizeof(buf), &bytes_read, nullptr) &&
bytes_read > 0) {
output->append(buf, bytes_read);
}
CloseHandle(out_handle);
return 0;
}
}
SubProcess::SubProcess(int nfds)
: running_(false),
win_pi_(nullptr),
exec_path_(nullptr),
exec_argv_(nullptr) {
for (int i = 0; i < kNFds; i++) {
action_[i] = ACTION_CLOSE;
parent_pipe_[i] = nullptr;
}
}
SubProcess::~SubProcess() {
mutex_lock procLock(proc_mu_);
mutex_lock dataLock(data_mu_);
if (win_pi_) {
auto* pi = reinterpret_cast<PROCESS_INFORMATION*>(win_pi_);
CloseHandle(pi->hProcess);
CloseHandle(pi->hThread);
delete pi;
win_pi_ = nullptr;
}
running_ = false;
FreeArgs();
ClosePipes();
}
void SubProcess::FreeArgs() {
free(exec_path_);
exec_path_ = nullptr;
if (exec_argv_) {
for (int i = 0; exec_argv_[i]; i++) {
free(exec_argv_[i]);
}
delete[] exec_argv_;
exec_argv_ = nullptr;
}
}
void SubProcess::ClosePipes() {
for (int i = 0; i < kNFds; i++) {
if (parent_pipe_[i] != nullptr) {
CloseHandle(parent_pipe_[i]);
parent_pipe_[i] = nullptr;
}
}
}
void SubProcess::SetProgram(const string& file,
const std::vector<string>& argv) {
mutex_lock procLock(proc_mu_);
mutex_lock dataLock(data_mu_);
if (running_) {
LOG(FATAL) << "SetProgram called after the process was started.";
return;
}
FreeArgs();
exec_path_ = _strdup(file.c_str());
if (exec_path_ == nullptr) {
LOG(FATAL) << "SetProgram failed to allocate file string.";
return;
}
int argc = argv.size();
exec_argv_ = new char*[argc + 1];
for (int i = 0; i < argc; i++) {
exec_argv_[i] = _strdup(argv[i].c_str());
if (exec_argv_[i] == nullptr) {
LOG(FATAL) << "SetProgram failed to allocate command argument.";
return;
}
}
exec_argv_[argc] = nullptr;
}
void SubProcess::SetChannelAction(Channel chan, ChannelAction action) {
mutex_lock procLock(proc_mu_);
mutex_lock dataLock(data_mu_);
if (running_) {
LOG(FATAL) << "SetChannelAction called after the process was started.";
} else if (!chan_valid(chan)) {
LOG(FATAL) << "SetChannelAction called with invalid channel: " << chan;
} else if ((action != ACTION_CLOSE) && (action != ACTION_PIPE) &&
(action != ACTION_DUPPARENT)) {
LOG(FATAL) << "SetChannelAction called with invalid action: " << action;
} else {
action_[chan] = action;
}
}
bool SubProcess::Start() {
mutex_lock procLock(proc_mu_);
mutex_lock dataLock(data_mu_);
if (running_) {
LOG(ERROR) << "Start called after the process was started.";
return false;
}
if ((exec_path_ == nullptr) || (exec_argv_ == nullptr)) {
LOG(ERROR) << "Start called without setting a program.";
return false;
}
SECURITY_ATTRIBUTES attrs;
attrs.nLength = sizeof(SECURITY_ATTRIBUTES);
attrs.bInheritHandle = TRUE;
attrs.lpSecurityDescriptor = nullptr;
HANDLE child_pipe_[kNFds] TF_GUARDED_BY(data_mu_);
for (int i = 0; i < kNFds; i++) {
if (action_[i] == ACTION_PIPE) {
if (!CreatePipe(i == CHAN_STDIN ? child_pipe_ + i : parent_pipe_ + i,
i == CHAN_STDIN ? parent_pipe_ + i : child_pipe_ + i,
&attrs, PIPE_BUF_SIZE)) {
LOG(ERROR) << "Cannot create pipe. Error code: " << GetLastError();
ClosePipes();
return false;
}
if (!SetHandleInformation(parent_pipe_[i], HANDLE_FLAG_INHERIT, 0)) {
LOG(ERROR) << "Cannot set pipe handle attributes.";
ClosePipes();
return false;
}
} else if (action_[i] == ACTION_DUPPARENT) {
if (i == CHAN_STDIN) {
child_pipe_[i] = GetStdHandle(STD_INPUT_HANDLE);
} else if (i == CHAN_STDOUT) {
child_pipe_[i] = GetStdHandle(STD_OUTPUT_HANDLE);
} else {
child_pipe_[i] = GetStdHandle(STD_ERROR_HANDLE);
}
} else {
parent_pipe_[i] = nullptr;
child_pipe_[i] = nullptr;
}
}
string command_line = strings::StrCat("\"", exec_path_, "\"");
for (int i = 1; exec_argv_[i]; i++) {
command_line.append(strings::StrCat(" \"", exec_argv_[i], "\""));
}
STARTUPINFOA si;
ZeroMemory(&si, sizeof(STARTUPINFO));
si.cb = sizeof(STARTUPINFO);
si.dwFlags |= STARTF_USESHOWWINDOW;
si.wShowWindow = SW_HIDE;
si.dwFlags |= STARTF_USESTDHANDLES;
if (child_pipe_[CHAN_STDIN]) {
si.hStdInput = child_pipe_[CHAN_STDIN];
}
if (child_pipe_[CHAN_STDOUT]) {
si.hStdOutput = child_pipe_[CHAN_STDOUT];
}
if (child_pipe_[CHAN_STDERR]) {
si.hStdError = child_pipe_[CHAN_STDERR];
}
win_pi_ = new PROCESS_INFORMATION;
bool bSuccess =
CreateProcessA(nullptr, const_cast<char*>(command_line.c_str()), nullptr,
nullptr, TRUE, CREATE_NO_WINDOW, nullptr, nullptr, &si,
reinterpret_cast<PROCESS_INFORMATION*>(win_pi_));
if (bSuccess) {
for (int i = 0; i < kNFds; i++) {
if (child_pipe_[i] != nullptr) {
CloseHandle(child_pipe_[i]);
child_pipe_[i] = nullptr;
}
}
running_ = true;
return true;
} else {
LOG(ERROR) << "Call to CreateProcess failed. Error code: " << GetLastError()
<< ", command: '" << command_line << "'";
ClosePipes();
return false;
}
}
bool SubProcess::Wait() {
int status;
return WaitInternal(&status);
}
bool SubProcess::WaitInternal(int* status) {
proc_mu_.lock();
bool running = running_;
PROCESS_INFORMATION pi_ = *reinterpret_cast<PROCESS_INFORMATION*>(win_pi_);
proc_mu_.unlock();
bool ret = false;
if (running && pi_.hProcess) {
DWORD wait_status = WaitForSingleObject(pi_.hProcess, INFINITE);
if (wait_status == WAIT_OBJECT_0) {
DWORD process_exit_code = 0;
if (GetExitCodeProcess(pi_.hProcess, &process_exit_code)) {
*status = static_cast<int>(process_exit_code);
} else {
LOG(FATAL) << "Wait failed with code: " << GetLastError();
}
} else {
LOG(FATAL) << "WaitForSingleObject call on the process handle failed. "
"Error code: "
<< wait_status;
}
}
proc_mu_.lock();
if ((running_ == running) &&
(pi_.hProcess ==
reinterpret_cast<PROCESS_INFORMATION*>(win_pi_)->hProcess)) {
running_ = false;
CloseHandle(reinterpret_cast<PROCESS_INFORMATION*>(win_pi_)->hProcess);
CloseHandle(reinterpret_cast<PROCESS_INFORMATION*>(win_pi_)->hThread);
reinterpret_cast<PROCESS_INFORMATION*>(win_pi_)->hProcess = nullptr;
reinterpret_cast<PROCESS_INFORMATION*>(win_pi_)->hThread = nullptr;
}
proc_mu_.unlock();
return *status == 0;
}
bool SubProcess::Kill(int unused_signal) {
proc_mu_.lock();
bool running = running_;
PROCESS_INFORMATION pi_ = *reinterpret_cast<PROCESS_INFORMATION*>(win_pi_);
proc_mu_.unlock();
bool ret = false;
if (running && pi_.hProcess) {
ret = TerminateProcess(pi_.hProcess, 0);
}
return ret;
}
int SubProcess::Communicate(const string* stdin_input, string* stdout_output,
string* stderr_output) {
proc_mu_.lock();
bool running = running_;
proc_mu_.unlock();
if (!running) {
LOG(ERROR) << "Communicate called without a running process.";
return 1;
}
HANDLE thread_handles[kNFds];
int thread_count = 0;
ThreadData thread_params[kNFds];
data_mu_.lock();
proc_mu_.lock();
bool process_finished = IsProcessFinished(
reinterpret_cast<PROCESS_INFORMATION*>(win_pi_)->hProcess);
proc_mu_.unlock();
if (!process_finished || (parent_pipe_[CHAN_STDOUT] != nullptr) ||
(parent_pipe_[CHAN_STDERR] != nullptr)) {
if (parent_pipe_[CHAN_STDIN] != nullptr) {
if (stdin_input) {
thread_params[thread_count].iobuf = const_cast<string*>(stdin_input);
thread_params[thread_count].iohandle = parent_pipe_[CHAN_STDIN];
parent_pipe_[CHAN_STDIN] = nullptr;
thread_handles[thread_count] =
CreateThread(NULL, 0, InputThreadFunction,
thread_params + thread_count, 0, NULL);
thread_count++;
}
} else {
CloseHandle(parent_pipe_[CHAN_STDIN]);
parent_pipe_[CHAN_STDIN] = NULL;
}
if (parent_pipe_[CHAN_STDOUT] != nullptr) {
if (stdout_output != nullptr) {
thread_params[thread_count].iobuf = stdout_output;
thread_params[thread_count].iohandle = parent_pipe_[CHAN_STDOUT];
parent_pipe_[CHAN_STDOUT] = NULL;
thread_handles[thread_count] =
CreateThread(NULL, 0, OutputThreadFunction,
thread_params + thread_count, 0, NULL);
thread_count++;
} else {
CloseHandle(parent_pipe_[CHAN_STDOUT]);
parent_pipe_[CHAN_STDOUT] = nullptr;
}
}
if (parent_pipe_[CHAN_STDERR] != nullptr) {
if (stderr_output != nullptr) {
thread_params[thread_count].iobuf = stderr_output;
thread_params[thread_count].iohandle = parent_pipe_[CHAN_STDERR];
parent_pipe_[CHAN_STDERR] = NULL;
thread_handles[thread_count] =
CreateThread(NULL, 0, OutputThreadFunction,
thread_params + thread_count, 0, NULL);
thread_count++;
} else {
CloseHandle(parent_pipe_[CHAN_STDERR]);
parent_pipe_[CHAN_STDERR] = nullptr;
}
}
}
if (thread_count > 0) {
DWORD wait_result = WaitForMultipleObjects(thread_count, thread_handles,
true,
INFINITE);
if (wait_result != WAIT_OBJECT_0) {
LOG(ERROR) << "Waiting on the io threads failed! result: " << wait_result
<< std::endl;
data_mu_.unlock();
return -1;
}
for (int i = 0; i < thread_count; i++) {
DWORD exit_code;
if (GetExitCodeThread(thread_handles[i], &exit_code)) {
if (exit_code) {
LOG(ERROR) << "One of the IO threads failed with code: " << exit_code;
}
} else {
LOG(ERROR) << "Error checking io thread exit statuses. Error Code: "
<< GetLastError();
}
}
}
data_mu_.unlock();
int status;
return WaitInternal(&status) ? status : -1;
}
std::unique_ptr<SubProcess> CreateSubProcess(const std::vector<string>& argv) {
std::unique_ptr<SubProcess> proc(new SubProcess());
proc->SetProgram(argv[0], argv);
proc->SetChannelAction(CHAN_STDERR, ACTION_DUPPARENT);
proc->SetChannelAction(CHAN_STDOUT, ACTION_DUPPARENT);
return proc;
}
} | #include "tsl/platform/subprocess.h"
#include <stdlib.h>
#include <algorithm>
#include <string>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/path.h"
#include "tsl/platform/strcat.h"
#include "tsl/platform/test.h"
#ifdef PLATFORM_WINDOWS
#define WIFEXITED(code) ((code) != 3)
#define WEXITSTATUS(code) (code)
#define SIGKILL 9
#else
#include <sys/wait.h>
#endif
namespace tsl {
namespace {
string EchoProgram() {
std::string path =
io::JoinPath(testing::TslSrcRoot(), "platform", "testdata", "test_echo");
return tsl::io::AppendDotExeIfWindows(path);
}
string EchoArgv1Program() {
std::string path = io::JoinPath(testing::TslSrcRoot(), "platform", "testdata",
"test_echo_argv_1");
return tsl::io::AppendDotExeIfWindows(path);
}
string NoopProgram() {
std::string path =
io::JoinPath(testing::TslSrcRoot(), "platform", "testdata", "test_noop");
return tsl::io::AppendDotExeIfWindows(path);
}
string StdErrProgram() {
std::string path = io::JoinPath(testing::TslSrcRoot(), "platform", "testdata",
"test_stderr");
return tsl::io::AppendDotExeIfWindows(path);
}
class SubProcessTest : public ::testing::Test {};
TEST_F(SubProcessTest, NoOutputNoComm) {
tsl::SubProcess proc;
proc.SetProgram(NoopProgram().c_str(), {NoopProgram()});
EXPECT_TRUE(proc.Start());
EXPECT_TRUE(proc.Wait());
}
TEST_F(SubProcessTest, NoOutput) {
tsl::SubProcess proc;
proc.SetProgram(NoopProgram().c_str(), {NoopProgram()});
proc.SetChannelAction(CHAN_STDOUT, ACTION_PIPE);
proc.SetChannelAction(CHAN_STDERR, ACTION_PIPE);
EXPECT_TRUE(proc.Start());
string out, err;
int status = proc.Communicate(nullptr, &out, &err);
EXPECT_TRUE(WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
EXPECT_EQ("", out);
EXPECT_EQ("", err);
}
TEST_F(SubProcessTest, Stdout) {
tsl::SubProcess proc;
const char test_string[] = "hello_world";
proc.SetProgram(EchoArgv1Program().c_str(),
{EchoArgv1Program(), test_string});
proc.SetChannelAction(CHAN_STDOUT, ACTION_PIPE);
proc.SetChannelAction(CHAN_STDERR, ACTION_PIPE);
EXPECT_TRUE(proc.Start());
string out, err;
int status = proc.Communicate(nullptr, &out, &err);
EXPECT_TRUE(WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
EXPECT_EQ(test_string, out);
EXPECT_EQ("", err);
}
TEST_F(SubProcessTest, StdoutIgnored) {
tsl::SubProcess proc;
const char test_string[] = "hello_world";
proc.SetProgram(EchoArgv1Program().c_str(),
{EchoArgv1Program(), test_string});
proc.SetChannelAction(CHAN_STDOUT, ACTION_PIPE);
proc.SetChannelAction(CHAN_STDERR, ACTION_PIPE);
EXPECT_TRUE(proc.Start());
int status = proc.Communicate(nullptr, nullptr, nullptr);
EXPECT_TRUE(WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
}
TEST_F(SubProcessTest, Stderr) {
tsl::SubProcess proc;
const char test_string[] = "muh_failure!";
proc.SetProgram(StdErrProgram().c_str(), {StdErrProgram(), test_string});
proc.SetChannelAction(CHAN_STDOUT, ACTION_PIPE);
proc.SetChannelAction(CHAN_STDERR, ACTION_PIPE);
EXPECT_TRUE(proc.Start());
string out, err;
int status = proc.Communicate(nullptr, &out, &err);
EXPECT_TRUE(WIFEXITED(status));
EXPECT_NE(0, WEXITSTATUS(status));
EXPECT_EQ("", out);
EXPECT_EQ(test_string, err);
}
TEST_F(SubProcessTest, StderrIgnored) {
tsl::SubProcess proc;
const char test_string[] = "muh_failure!";
proc.SetProgram(StdErrProgram().c_str(), {StdErrProgram(), test_string});
proc.SetChannelAction(CHAN_STDOUT, ACTION_PIPE);
proc.SetChannelAction(CHAN_STDERR, ACTION_PIPE);
EXPECT_TRUE(proc.Start());
int status = proc.Communicate(nullptr, nullptr, nullptr);
EXPECT_TRUE(WIFEXITED(status));
EXPECT_NE(0, WEXITSTATUS(status));
}
TEST_F(SubProcessTest, Stdin) {
tsl::SubProcess proc;
proc.SetProgram(EchoProgram().c_str(), {EchoProgram()});
proc.SetChannelAction(CHAN_STDIN, ACTION_PIPE);
EXPECT_TRUE(proc.Start());
string in = "foobar\nbarfoo\nhaha\n";
int status = proc.Communicate(&in, nullptr, nullptr);
EXPECT_TRUE(WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
}
TEST_F(SubProcessTest, StdinStdout) {
tsl::SubProcess proc;
proc.SetProgram(EchoProgram().c_str(), {EchoProgram()});
proc.SetChannelAction(CHAN_STDIN, ACTION_PIPE);
proc.SetChannelAction(CHAN_STDOUT, ACTION_PIPE);
EXPECT_TRUE(proc.Start());
string in = "foobar\nbarfoo\nhaha\n";
string out;
int status = proc.Communicate(&in, &out, nullptr);
EXPECT_TRUE(WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
out.erase(std::remove(out.begin(), out.end(), '\r'), out.end());
EXPECT_EQ(in, out);
}
TEST_F(SubProcessTest, StdinChildExit) {
tsl::SubProcess proc;
proc.SetProgram(NoopProgram().c_str(), {NoopProgram()});
proc.SetChannelAction(CHAN_STDIN, ACTION_PIPE);
EXPECT_TRUE(proc.Start());
string in;
in.reserve(1000000);
for (int i = 0; i < 100000; i++) {
in += "hello xyz\n";
}
int status = proc.Communicate(&in, nullptr, nullptr);
EXPECT_TRUE(WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
}
TEST_F(SubProcessTest, StdinStdoutOverlap) {
tsl::SubProcess proc;
proc.SetProgram(EchoProgram().c_str(), {EchoProgram()});
proc.SetChannelAction(CHAN_STDIN, ACTION_PIPE);
proc.SetChannelAction(CHAN_STDOUT, ACTION_PIPE);
EXPECT_TRUE(proc.Start());
string in;
in.reserve(1000000);
for (int i = 0; i < 100000; i++) {
in += "hello xyz\n";
}
string out;
int status = proc.Communicate(&in, &out, nullptr);
EXPECT_TRUE(WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
out.erase(std::remove(out.begin(), out.end(), '\r'), out.end());
EXPECT_EQ(in, out);
}
TEST_F(SubProcessTest, KillProc) {
tsl::SubProcess proc;
proc.SetProgram(EchoProgram().c_str(), {EchoProgram()});
proc.SetChannelAction(CHAN_STDIN, ACTION_PIPE);
proc.SetChannelAction(CHAN_STDOUT, ACTION_PIPE);
EXPECT_TRUE(proc.Start());
EXPECT_TRUE(proc.Kill(SIGKILL));
EXPECT_TRUE(proc.Wait());
EXPECT_FALSE(proc.Kill(SIGKILL));
}
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/windows/subprocess.cc | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/subprocess_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
7ff3d883-8d11-4d35-95d6-3c1c628ac7ce | cpp | tensorflow/tensorflow | while_util | third_party/xla/xla/service/while_util.cc | third_party/xla/xla/service/while_util_test.cc | #include "xla/service/while_util.h"
#include <cstdint>
#include <iterator>
#include <memory>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/literal_util.h"
#include "xla/service/call_inliner.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/tuple_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
using absl::StrCat;
static absl::StatusOr<
std::pair<HloComputation*, CallInliner::InlinedInstructionMap>>
WidenWhileCondition(HloComputation* narrow_condition, const Shape& wide_shape) {
const Shape& narrow_shape =
narrow_condition->parameter_instruction(0)->shape();
HloComputation* wide_while_cond = [&]() {
HloComputation::Builder builder(StrCat("wide.", narrow_condition->name()));
builder.AddInstruction(HloInstruction::CreateParameter(
0, wide_shape,
absl::StrCat("wide.",
narrow_condition->parameter_instruction(0)->name())));
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
return narrow_condition->parent()->AddEmbeddedComputation(builder.Build());
}();
HloInstruction* truncated_parameter = TupleUtil::ExtractPrefix(
wide_while_cond->parameter_instruction(0),
narrow_shape.tuple_shapes_size(),
absl::StrCat("renarrowed.",
wide_while_cond->parameter_instruction(0)->name()));
HloInstruction* call_narrow_cond = wide_while_cond->AddInstruction(
HloInstruction::CreateCall(ShapeUtil::MakeShape(PRED, {}),
{truncated_parameter}, narrow_condition));
wide_while_cond->set_root_instruction(call_narrow_cond);
TF_ASSIGN_OR_RETURN(auto inlined_instructions_map,
CallInliner::Inline(call_narrow_cond));
return {{wide_while_cond, std::move(inlined_instructions_map)}};
}
static absl::StatusOr<
std::pair<HloComputation*, CallInliner::InlinedInstructionMap>>
WidenWhileBody(HloComputation* narrow_body, const Shape& wide_shape) {
const Shape& narrow_shape = narrow_body->parameter_instruction(0)->shape();
HloComputation* wide_while_body = [&]() {
HloComputation::Builder builder(StrCat("wide.", narrow_body->name()));
builder.AddInstruction(HloInstruction::CreateParameter(
0, wide_shape,
absl::StrCat("wide.", narrow_body->parameter_instruction(0)->name())));
return narrow_body->parent()->AddEmbeddedComputation(builder.Build());
}();
HloInstruction* wide_parameter = wide_while_body->parameter_instruction(0);
HloInstruction* truncated_parameter = TupleUtil::ExtractPrefix(
wide_parameter, narrow_shape.tuple_shapes_size(),
absl::StrCat("renarrowed.",
wide_while_body->parameter_instruction(0)->name()));
HloInstruction* call_narrow_body =
wide_while_body->AddInstruction(HloInstruction::CreateCall(
narrow_shape, {truncated_parameter}, narrow_body));
std::vector<HloInstruction*> live_through_values;
for (int i = narrow_shape.tuple_shapes_size();
i < wide_shape.tuple_shapes_size(); i++) {
live_through_values.push_back(wide_while_body->AddInstruction(
HloInstruction::CreateGetTupleElement(wide_shape.tuple_shapes(i),
wide_parameter, i),
absl::StrCat(wide_while_body->name(), ".through.",
i - narrow_shape.tuple_shapes_size())));
}
wide_while_body->set_root_instruction(
TupleUtil::AppendSuffix(call_narrow_body, live_through_values));
TF_ASSIGN_OR_RETURN(auto inlined_instructions_map,
CallInliner::Inline(call_narrow_body));
return {{wide_while_body, std::move(inlined_instructions_map)}};
}
absl::StatusOr<WhileUtil::MakeInstructionsLiveInResult>
WhileUtil::MakeInstructionsLiveIn(
HloInstruction* while_instr,
absl::Span<HloInstruction* const> instructions) {
CHECK(while_instr->shape().IsTuple());
int elements_in_old_while_shape = while_instr->shape().tuple_shapes_size();
Shape new_while_shape = while_instr->shape();
for (auto* instruction : instructions) {
*new_while_shape.add_tuple_shapes() = instruction->shape();
}
HloComputation* new_while_condition;
CallInliner::InlinedInstructionMap inlined_condition_instructions_map;
TF_ASSIGN_OR_RETURN(
std::tie(new_while_condition, inlined_condition_instructions_map),
WidenWhileCondition(while_instr->while_condition(), new_while_shape));
HloComputation* new_while_body;
CallInliner::InlinedInstructionMap inlined_instructions_map;
TF_ASSIGN_OR_RETURN(
std::tie(new_while_body, inlined_instructions_map),
WidenWhileBody(while_instr->while_body(), new_while_shape));
HloInstruction* new_while_init =
TupleUtil::AppendSuffix(while_instr->mutable_operand(0), instructions);
HloComputation* containing_computation = while_instr->parent();
HloInstruction* new_while = containing_computation->AddInstruction(
HloInstruction::CreateWhile(new_while_shape, new_while_condition,
new_while_body, new_while_init));
HloInstruction* replacement_instr = TupleUtil::ExtractPrefix(
new_while, while_instr->shape().tuple_shapes_size());
TF_RETURN_IF_ERROR(while_instr->ReplaceAllUsesWith(replacement_instr));
TF_RETURN_IF_ERROR(containing_computation->RemoveInstruction(while_instr));
HloInstruction* while_body_param = new_while_body->parameter_instruction(0);
std::vector<HloInstruction*> live_in_instructions;
for (int64_t i = elements_in_old_while_shape;
i < new_while_shape.tuple_shapes_size(); i++) {
live_in_instructions.push_back(new_while_body->AddInstruction(
HloInstruction::CreateGetTupleElement(
instructions[i - elements_in_old_while_shape]->shape(),
while_body_param, i),
absl::StrCat(new_while_body->name(), ".in.",
i - elements_in_old_while_shape)));
}
WhileUtil::MakeInstructionsLiveInResult result;
result.new_while_instr = new_while;
result.replacement_instr = replacement_instr;
result.while_body_live_in_values = std::move(live_in_instructions);
result.while_body_instruction_map = std::move(inlined_instructions_map);
result.while_condition_instruction_map =
std::move(inlined_condition_instructions_map);
return std::move(result);
}
static absl::StatusOr<std::unique_ptr<HloComputation>>
MakeCountedLoopConditionComputation(const Shape& loop_state_shape,
int32_t trip_count) {
Shape scalar_pred = ShapeUtil::MakeShape(PRED, {});
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloComputation> cond_computation,
CreateComputationWithSignature(
{&loop_state_shape}, scalar_pred, "while_cond"));
HloInstruction* trip_count_constant =
cond_computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(trip_count)));
HloInstruction* param = cond_computation->parameter_instruction(0);
TF_ASSIGN_OR_RETURN(HloInstruction * indvar,
MakeGetTupleElementHlo(param, 0));
TF_ASSIGN_OR_RETURN(
HloInstruction * compare,
MakeCompareHlo(ComparisonDirection::kLt, indvar, trip_count_constant));
cond_computation->set_root_instruction(compare);
return std::move(cond_computation);
}
static absl::StatusOr<std::unique_ptr<HloComputation>>
MakeCountedLoopBodyComputation(
const Shape& loop_state_shape,
absl::FunctionRef<absl::StatusOr<WhileUtil::LoopStateTy>(
HloInstruction*, const WhileUtil::LoopStateTy&)>
loop_body_generator) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloComputation> body_computation,
CreateComputationWithSignature(
{&loop_state_shape}, loop_state_shape, "while_body"));
HloInstruction* one = body_computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
HloInstruction* param = body_computation->parameter_instruction(0);
TF_ASSIGN_OR_RETURN(HloInstruction * indvar,
MakeGetTupleElementHlo(param, 0));
TF_ASSIGN_OR_RETURN(HloInstruction * next_indvar,
MakeBinaryHlo(HloOpcode::kAdd, indvar, one));
std::vector<HloInstruction*> loop_body_generator_args;
for (int i = 1, e = loop_state_shape.tuple_shapes_size(); i < e; i++) {
TF_ASSIGN_OR_RETURN(HloInstruction * tuple_element,
MakeGetTupleElementHlo(param, i));
loop_body_generator_args.push_back(tuple_element);
}
TF_ASSIGN_OR_RETURN(std::vector<HloInstruction*> next_state,
loop_body_generator(indvar, loop_body_generator_args));
next_state.insert(next_state.begin(), next_indvar);
HloInstruction* next_state_tuple =
body_computation->AddInstruction(HloInstruction::CreateTuple(next_state));
body_computation->set_root_instruction(next_state_tuple);
return std::move(body_computation);
}
static std::pair<std::unique_ptr<HloInstruction>,
std::unique_ptr<HloInstruction>>
MakeInitTupleFromInitValues(const WhileUtil::LoopStateTy& init_values) {
std::vector<HloInstruction*> init_values_with_indvar;
init_values_with_indvar.reserve(init_values.size() + 1);
std::unique_ptr<HloInstruction> zero =
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0));
init_values_with_indvar.push_back(zero.get());
absl::c_copy(init_values, std::back_inserter(init_values_with_indvar));
return std::make_pair(std::move(zero),
HloInstruction::CreateTuple(init_values_with_indvar));
}
static Shape MakeLoopStateShapeWithLayout(
const WhileUtil::LoopStateTy& init_values) {
std::vector<Shape> loop_state_shape_components;
loop_state_shape_components.reserve(init_values.size() + 1);
loop_state_shape_components.push_back(ShapeUtil::MakeShape(S32, {}));
absl::c_transform(init_values,
std::back_inserter(loop_state_shape_components),
[](HloInstruction* instr) {
Shape shape = instr->shape();
if (!shape.has_layout()) {
LayoutUtil::SetToDefaultLayout(&shape);
}
return shape;
});
return ShapeUtil::MakeTupleShape(loop_state_shape_components);
}
absl::StatusOr<WhileUtil::OwningLoopStateTy>
WhileUtil::MakeCountedLoop(HloModule* module, int32_t trip_count,
const WhileUtil::LoopStateTy& init_values,
WhileUtil::LoopBodyGeneratorTy loop_body_generator,
const OpMetadata& metadata) {
CHECK_GE(trip_count, 0);
Shape loop_state_shape = MakeLoopStateShapeWithLayout(init_values);
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloComputation> cond,
MakeCountedLoopConditionComputation(loop_state_shape, trip_count));
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloComputation> body,
MakeCountedLoopBodyComputation(loop_state_shape, loop_body_generator));
std::unique_ptr<HloInstruction> owned_indvar;
std::unique_ptr<HloInstruction> owned_init_tuple;
std::tie(owned_indvar, owned_init_tuple) =
MakeInitTupleFromInitValues(init_values);
std::unique_ptr<HloInstruction> owned_while = HloInstruction::CreateWhile(
loop_state_shape, module->AddEmbeddedComputation(std::move(cond)),
module->AddEmbeddedComputation(std::move(body)), owned_init_tuple.get());
owned_while->set_metadata(metadata);
HloInstruction* while_instr = owned_while.get();
std::vector<std::unique_ptr<HloInstruction>> owned;
owned.push_back(std::move(owned_indvar));
owned.push_back(std::move(owned_init_tuple));
owned.push_back(std::move(owned_while));
std::vector<HloInstruction*> while_results;
for (int64_t i = 0, e = init_values.size(); i < e; i++) {
std::unique_ptr<HloInstruction> user_state =
HloInstruction::CreateGetTupleElement(init_values[i]->shape(),
while_instr, i + 1);
while_results.push_back(user_state.get());
owned.push_back(std::move(user_state));
}
return WhileUtil::OwningLoopStateTy{std::move(owned), while_results};
}
absl::StatusOr<WhileUtil::LoopStateTy> WhileUtil::MakeCountedLoop(
HloComputation* computation, int32_t trip_count,
const WhileUtil::LoopStateTy& init_values,
WhileUtil::LoopBodyGeneratorTy loop_body_generator,
const OpMetadata& metadata) {
TF_ASSIGN_OR_RETURN(
auto owning_loop_state,
MakeCountedLoop(computation->parent(), trip_count, init_values,
loop_body_generator, metadata));
for (auto& instruction_to_add : owning_loop_state.instructions_to_add) {
computation->AddInstruction(std::move(instruction_to_add));
}
return owning_loop_state.while_results;
}
std::vector<HloInstruction*> WhileUtil::GetInvariantGTEsForWhileBody(
const HloComputation& while_body) {
std::vector<HloInstruction*> result;
const HloInstruction::InstructionVector root_operands =
while_body.root_instruction()->operands();
for (int i = 0; i < root_operands.size(); i++) {
HloInstruction* instr = root_operands[i];
if (instr->opcode() == HloOpcode::kGetTupleElement &&
instr->tuple_index() == i &&
instr->operand(0) == while_body.parameter_instruction(0)) {
result.push_back(instr);
}
}
return result;
}
absl::flat_hash_map<int64_t, absl::InlinedVector<HloInstruction*, 1>>
WhileUtil::GetGTEsMapForWhileConditional(
const HloComputation& while_conditional) {
absl::flat_hash_map<int64_t, absl::InlinedVector<HloInstruction*, 1>> result;
for (HloInstruction* user :
while_conditional.parameter_instruction(0)->users()) {
if (user->opcode() == HloOpcode::kGetTupleElement) {
result[user->tuple_index()].push_back(user);
}
}
return result;
}
} | #include "xla/service/while_util.h"
#include <memory>
#include "absl/algorithm/container.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = ::xla::testing::opcode_matchers;
class WhileUtilTest : public HloTestBase {
protected:
absl::StatusOr<std::unique_ptr<VerifiedHloModule>> GetParsedModule(
HloComputation** entry_computation, HloInstruction** param0,
HloInstruction** param1, HloInstruction** param2) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
while_body {
ROOT p_body = (f32[32,32]{1,0}, f32[32,32]{1,0}) parameter(0)
}
while_condition {
p_cond = (f32[32,32]{1,0}, f32[32,32]{1,0}) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
p_entry_0 = f32[32,32]{1,0} parameter(0)
p_entry_1 = s32[32,32]{1,0} parameter(1)
p_entry_2 = s64[32,32]{1,0} parameter(2)
while_init = (f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(p_entry_0, p_entry_0)
ROOT while = (f32[32,32]{1,0}, f32[32,32]{1,0}) while(while_init), condition=while_condition, body=while_body
}
)";
TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(hlo_string));
*entry_computation = module->entry_computation();
*param0 = (*entry_computation)->parameter_instruction(0);
*param1 = (*entry_computation)->parameter_instruction(1);
*param2 = (*entry_computation)->parameter_instruction(2);
return std::move(module);
}
};
TEST_F(WhileUtilTest, MakeZeroInstructionsLiveOp) {
HloInstruction *param0, *param1, *param2;
HloComputation* entry_computation;
TF_ASSERT_OK_AND_ASSIGN(
auto module,
GetParsedModule(&entry_computation, ¶m0, ¶m1, ¶m2));
HloInstruction* while_instr = entry_computation->root_instruction();
ASSERT_EQ(while_instr->opcode(), HloOpcode::kWhile);
TF_ASSERT_OK_AND_ASSIGN(
WhileUtil::MakeInstructionsLiveInResult make_live_in_result,
WhileUtil::MakeInstructionsLiveIn(while_instr, {}));
HloInstruction* new_while_instr = make_live_in_result.new_while_instr;
EXPECT_THAT(
entry_computation->root_instruction(),
op::Tuple(op::GetTupleElement(::testing::Eq(new_while_instr), 0),
op::GetTupleElement(::testing::Eq(new_while_instr), 1)));
auto param_reconstructed =
op::Tuple(op::GetTupleElement(op::Parameter(0), 0),
op::GetTupleElement(op::Parameter(0), 1));
EXPECT_THAT(new_while_instr->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(param_reconstructed, 0),
op::GetTupleElement(param_reconstructed, 1)));
}
TEST_F(WhileUtilTest, MakeTwoInstructionsLive) {
HloInstruction *param0, *param1, *param2;
HloComputation* entry_computation;
TF_ASSERT_OK_AND_ASSIGN(
auto module,
GetParsedModule(&entry_computation, ¶m0, ¶m1, ¶m2));
HloInstruction* while_instr = entry_computation->root_instruction();
ASSERT_EQ(while_instr->opcode(), HloOpcode::kWhile);
TF_ASSERT_OK_AND_ASSIGN(
WhileUtil::MakeInstructionsLiveInResult make_live_in_result,
WhileUtil::MakeInstructionsLiveIn(while_instr,
{param0, param1}));
HloInstruction* new_while_instr = make_live_in_result.new_while_instr;
XLA_VLOG_LINES(3, module->ToString());
EXPECT_THAT(
entry_computation->root_instruction(),
op::Tuple(op::GetTupleElement(::testing::Eq(new_while_instr), 0),
op::GetTupleElement(::testing::Eq(new_while_instr), 1)));
auto first_half_param_reconstructed =
op::Tuple(op::GetTupleElement(op::Parameter(0), 0),
op::GetTupleElement(op::Parameter(0), 1));
EXPECT_THAT(new_while_instr->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(first_half_param_reconstructed, 0),
op::GetTupleElement(first_half_param_reconstructed, 1),
op::GetTupleElement(op::Parameter(0), 2),
op::GetTupleElement(op::Parameter(0), 3)));
}
TEST_F(WhileUtilTest, GetInvariantGTEsForWhileBody) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
param.b = (s32[], s32[]) parameter(0)
gte.0 = s32[] get-tuple-element(param.b), index=0
gte.1 = s32[] get-tuple-element(param.b), index=1
add = s32[] add(gte.0, gte.1)
ROOT tuple = (s32[], s32[]) tuple(gte.0, add)
}
cond {
param.c = (s32[], s32[]) parameter(0)
ROOT constant = pred[] constant(true)
}
ENTRY main {
init = (s32[], s32[]) parameter(0)
ROOT while = (s32[], s32[]) while(init), condition=cond, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloComputation* while_body = module->GetComputationWithName("body");
ASSERT_NE(while_body, nullptr)
<< "Expected exactly one while_body computation";
std::vector<HloInstruction*> gte_list =
WhileUtil::GetInvariantGTEsForWhileBody(*while_body);
ASSERT_EQ(gte_list.size(), 1);
EXPECT_EQ((*gte_list.begin())->name(), "gte.0");
}
TEST_F(WhileUtilTest, AlwaysRemovePreviousWhileBody) {
const char* const hlo_string = R"(
HloModule WhileWithSideEffects
body {
param.b = (s32[], s32[]) parameter(0)
gte.0 = s32[] get-tuple-element(param.b), index=0
gte.1 = s32[] get-tuple-element(param.b), index=1
add = s32[] add(gte.0, gte.1)
ROOT tuple = (s32[], s32[]) tuple(gte.0, add)
}
cond {
param.c = (s32[], s32[]) parameter(0)
token0 = token[] after-all()
infeed = (pred[], token[]) infeed(token0)
ROOT condition = pred[] get-tuple-element(infeed), index=0
}
ENTRY main {
init = (s32[], s32[]) parameter(0)
to_make_live_in = f32[100] parameter(1)
ROOT while = (s32[], s32[]) while(init), condition=cond, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloComputation* main = module->GetComputationWithName("main");
HloInstruction* while_instr = main->root_instruction();
HloInstruction* to_make_live_in = main->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(
WhileUtil::MakeInstructionsLiveInResult make_live_in_result,
WhileUtil::MakeInstructionsLiveIn(while_instr,
{to_make_live_in}));
auto is_while = [](const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kWhile;
};
EXPECT_EQ(absl::c_count_if(main->instructions(), is_while), 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4f331c16-80de-4a7d-8bc0-99e3a571617b | cpp | google/arolla | dense_array_encoder | arolla/serialization_codecs/dense_array/encoders/dense_array_encoder.cc | arolla/serialization_codecs/dense_array/encoders/dense_array_encoder_test.cc | #include <cstddef>
#include <cstdint>
#include <type_traits>
#include <utility>
#include "absl/base/no_destructor.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "arolla/dense_array/bitmap.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/edge.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/serialization_base/encoder.h"
#include "arolla/serialization_codecs/dense_array/codec_name.h"
#include "arolla/serialization_codecs/dense_array/dense_array_codec.pb.h"
#include "arolla/serialization_codecs/registry.h"
#include "arolla/util/bytes.h"
#include "arolla/util/init_arolla.h"
#include "arolla/util/meta.h"
#include "arolla/util/text.h"
#include "arolla/util/unit.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::serialization_codecs {
namespace {
namespace bm = ::arolla::bitmap;
using ::arolla::serialization_base::Encoder;
using ::arolla::serialization_base::ValueProto;
using BitmapProto = std::decay_t<std::remove_const_t<
decltype(std::declval<DenseArrayV1Proto::DenseArrayUnitProto>().bitmap())>>;
absl::StatusOr<ValueProto> GenValueProto(Encoder& encoder) {
ASSIGN_OR_RETURN(auto codec_index, encoder.EncodeCodec(kDenseArrayV1Codec));
ValueProto value_proto;
value_proto.set_codec_index(codec_index);
return value_proto;
}
BitmapProto GenBitmapProto(const bm::Bitmap& bitmap, int offset, int64_t size) {
BitmapProto result;
if (bm::CountBits(bitmap, offset, size) == size) {
return result;
}
const int64_t bitmapSize = bm::BitmapSize(size);
result.Resize(bitmapSize, 0);
for (int64_t i = 0; i < bitmapSize; ++i) {
result[i] = bm::GetWordWithOffset(bitmap, i, offset);
}
if (int last_word_usage = size % bm::kWordBitCount) {
result[bitmapSize - 1] &= (1U << last_word_usage) - 1;
}
return result;
}
absl::StatusOr<ValueProto> EncodeDenseArrayUnitValue(TypedRef value,
Encoder& encoder) {
DCHECK(value.GetType() == GetQType<DenseArray<Unit>>());
const auto& dense_array = value.UnsafeAs<DenseArray<Unit>>();
ASSIGN_OR_RETURN(auto value_proto, GenValueProto(encoder));
auto* dense_array_unit_proto =
value_proto.MutableExtension(DenseArrayV1Proto::extension)
->mutable_dense_array_unit_value();
dense_array_unit_proto->set_size(dense_array.size());
*dense_array_unit_proto->mutable_bitmap() = GenBitmapProto(
dense_array.bitmap, dense_array.bitmap_bit_offset, dense_array.size());
return value_proto;
}
#define GEN_ENCODE_DENSE_ARRAY_QTYPE(NAME, FIELD) \
absl::StatusOr<ValueProto> EncodeDenseArray##NAME##QType(Encoder& encoder) { \
ASSIGN_OR_RETURN(auto value_proto, GenValueProto(encoder)); \
value_proto.MutableExtension(DenseArrayV1Proto::extension) \
->set_##FIELD##_qtype(true); \
return value_proto; \
}
GEN_ENCODE_DENSE_ARRAY_QTYPE(Unit, dense_array_unit)
#define GEN_ENCODE_DENSE_ARRAY_VALUE(NAME, T, FIELD) \
absl::StatusOr<ValueProto> EncodeDenseArray##NAME##Value(TypedRef value, \
Encoder& encoder) { \
\
const auto& dense_array = value.UnsafeAs<DenseArray<T>>(); \
ASSIGN_OR_RETURN(auto value_proto, GenValueProto(encoder)); \
auto* dense_array_value_proto = \
value_proto.MutableExtension(DenseArrayV1Proto::extension) \
->mutable_##FIELD##_value(); \
dense_array_value_proto->set_size(dense_array.size()); \
*dense_array_value_proto->mutable_bitmap() = \
GenBitmapProto(dense_array.bitmap, dense_array.bitmap_bit_offset, \
dense_array.size()); \
dense_array.ForEach([&](int64_t, bool present, const T& value) { \
if (present) { \
dense_array_value_proto->add_values(value); \
} \
}); \
return value_proto; \
} \
GEN_ENCODE_DENSE_ARRAY_QTYPE(NAME, FIELD)
GEN_ENCODE_DENSE_ARRAY_VALUE(Boolean, bool, dense_array_boolean)
GEN_ENCODE_DENSE_ARRAY_VALUE(Int32, int32_t, dense_array_int32)
GEN_ENCODE_DENSE_ARRAY_VALUE(Int64, int64_t, dense_array_int64)
GEN_ENCODE_DENSE_ARRAY_VALUE(UInt64, uint64_t, dense_array_uint64)
GEN_ENCODE_DENSE_ARRAY_VALUE(Float32, float, dense_array_float32)
GEN_ENCODE_DENSE_ARRAY_VALUE(Float64, double, dense_array_float64)
#undef GEN_ENCODE_DENSE_ARRAY_VALUE
#define GEN_ENCODE_DENSE_ARRAY_STRING_VALUE(NAME, T, FIELD) \
absl::StatusOr<ValueProto> EncodeDenseArray##NAME##Value(TypedRef value, \
Encoder& encoder) { \
\
const auto& dense_array = value.UnsafeAs<DenseArray<T>>(); \
ASSIGN_OR_RETURN(auto value_proto, GenValueProto(encoder)); \
auto* dense_array_value_proto = \
value_proto.MutableExtension(DenseArrayV1Proto::extension) \
->mutable_##FIELD##_value(); \
dense_array_value_proto->set_size(dense_array.size()); \
*dense_array_value_proto->mutable_bitmap() = \
GenBitmapProto(dense_array.bitmap, dense_array.bitmap_bit_offset, \
dense_array.size()); \
dense_array_value_proto->set_characters( \
dense_array.values.characters().span().data(), \
dense_array.values.characters().span().size()); \
for (size_t i = 0; i < dense_array.size(); ++i) { \
if (dense_array.present(i)) { \
const auto& offset = dense_array.values.offsets()[i]; \
dense_array_value_proto->add_value_offset_starts( \
offset.start - dense_array.values.base_offset()); \
dense_array_value_proto->add_value_offset_ends( \
offset.end - dense_array.values.base_offset()); \
} \
} \
return value_proto; \
} \
GEN_ENCODE_DENSE_ARRAY_QTYPE(NAME, FIELD)
GEN_ENCODE_DENSE_ARRAY_STRING_VALUE(Bytes, Bytes, dense_array_bytes)
GEN_ENCODE_DENSE_ARRAY_STRING_VALUE(Text, Text, dense_array_text)
#undef GEN_ENCODE_DENSE_ARRAY_STRING_VALUE
#undef GEN_ENCODE_DENSE_ARRAY_QTYPE
absl::StatusOr<ValueProto> EncodeDenseArrayEdgeQType(Encoder& encoder) {
ASSIGN_OR_RETURN(auto value_proto, GenValueProto(encoder));
value_proto.MutableExtension(DenseArrayV1Proto::extension)
->set_dense_array_edge_qtype(true);
return value_proto;
}
absl::StatusOr<ValueProto> EncodeDenseArrayEdgeValue(TypedRef value,
Encoder& encoder) {
ASSIGN_OR_RETURN(auto value_proto, GenValueProto(encoder));
auto* dense_array_edge_proto =
value_proto.MutableExtension(DenseArrayV1Proto::extension)
->mutable_dense_array_edge_value();
const auto& dense_array_edge = value.UnsafeAs<DenseArrayEdge>();
ASSIGN_OR_RETURN(auto dense_array_value_index,
encoder.EncodeValue(
TypedValue::FromValue(dense_array_edge.edge_values())));
value_proto.add_input_value_indices(dense_array_value_index);
switch (dense_array_edge.edge_type()) {
case DenseArrayEdge::EdgeType::MAPPING:
dense_array_edge_proto->set_edge_type(
DenseArrayV1Proto::DenseArrayEdgeProto::MAPPING);
dense_array_edge_proto->set_parent_size(dense_array_edge.parent_size());
return value_proto;
case DenseArrayEdge::EdgeType::SPLIT_POINTS:
dense_array_edge_proto->set_edge_type(
DenseArrayV1Proto::DenseArrayEdgeProto::SPLIT_POINTS);
return value_proto;
}
return absl::InternalError(absl::StrCat("unknown DesnseArrayEdge edge type: ",
dense_array_edge.edge_type()));
}
absl::StatusOr<ValueProto> EncodeDenseArrayToScalarEdgeQType(Encoder& encoder) {
ASSIGN_OR_RETURN(auto value_proto, GenValueProto(encoder));
value_proto.MutableExtension(DenseArrayV1Proto::extension)
->set_dense_array_to_scalar_edge_qtype(true);
return value_proto;
}
absl::StatusOr<ValueProto> EncodeDenseArrayToScalarEdgeValue(TypedRef value,
Encoder& encoder) {
const auto& dense_array_to_scalar_edge =
value.UnsafeAs<DenseArrayGroupScalarEdge>();
ASSIGN_OR_RETURN(auto value_proto, GenValueProto(encoder));
value_proto.MutableExtension(DenseArrayV1Proto::extension)
->set_dense_array_to_scalar_edge_value(
dense_array_to_scalar_edge.child_size());
return value_proto;
}
absl::StatusOr<ValueProto> EncodeDenseArrayShapeQType(Encoder& encoder) {
ASSIGN_OR_RETURN(auto value_proto, GenValueProto(encoder));
value_proto.MutableExtension(DenseArrayV1Proto::extension)
->set_dense_array_shape_qtype(true);
return value_proto;
}
absl::StatusOr<ValueProto> EncodeDenseArrayShapeValue(TypedRef value,
Encoder& encoder) {
const auto& dense_array_shape = value.UnsafeAs<DenseArrayShape>();
ASSIGN_OR_RETURN(auto value_proto, GenValueProto(encoder));
value_proto.MutableExtension(DenseArrayV1Proto::extension)
->set_dense_array_shape_value(dense_array_shape.size);
return value_proto;
}
absl::StatusOr<ValueProto> EncodeDenseArray(TypedRef value, Encoder& encoder) {
using QTypeEncoder = absl::StatusOr<ValueProto> (*)(Encoder&);
using ValueEncoder = absl::StatusOr<ValueProto> (*)(TypedRef, Encoder&);
using QTypeEncoders = absl::flat_hash_map<QTypePtr, QTypeEncoder>;
using ValueEncoders = absl::flat_hash_map<QTypePtr, ValueEncoder>;
static const absl::NoDestructor<QTypeEncoders> kQTypeEncoders(QTypeEncoders{
{GetDenseArrayQType<Unit>(), &EncodeDenseArrayUnitQType},
{GetDenseArrayQType<bool>(), &EncodeDenseArrayBooleanQType},
{GetDenseArrayQType<Bytes>(), &EncodeDenseArrayBytesQType},
{GetDenseArrayQType<Text>(), &EncodeDenseArrayTextQType},
{GetDenseArrayQType<int32_t>(), &EncodeDenseArrayInt32QType},
{GetDenseArrayQType<int64_t>(), &EncodeDenseArrayInt64QType},
{GetDenseArrayQType<uint64_t>(), &EncodeDenseArrayUInt64QType},
{GetDenseArrayQType<float>(), &EncodeDenseArrayFloat32QType},
{GetDenseArrayQType<double>(), &EncodeDenseArrayFloat64QType},
{GetQType<DenseArrayEdge>(), &EncodeDenseArrayEdgeQType},
{GetQType<DenseArrayGroupScalarEdge>(),
&EncodeDenseArrayToScalarEdgeQType},
{GetQType<DenseArrayShape>(), &EncodeDenseArrayShapeQType},
});
static const absl::NoDestructor<ValueEncoders> kValueEncoders(ValueEncoders{
{GetDenseArrayQType<Unit>(), &EncodeDenseArrayUnitValue},
{GetDenseArrayQType<bool>(), &EncodeDenseArrayBooleanValue},
{GetDenseArrayQType<Bytes>(), &EncodeDenseArrayBytesValue},
{GetDenseArrayQType<Text>(), &EncodeDenseArrayTextValue},
{GetDenseArrayQType<int32_t>(), &EncodeDenseArrayInt32Value},
{GetDenseArrayQType<int64_t>(), &EncodeDenseArrayInt64Value},
{GetDenseArrayQType<uint64_t>(), &EncodeDenseArrayUInt64Value},
{GetDenseArrayQType<float>(), &EncodeDenseArrayFloat32Value},
{GetDenseArrayQType<double>(), &EncodeDenseArrayFloat64Value},
{GetQType<DenseArrayEdge>(), &EncodeDenseArrayEdgeValue},
{GetQType<DenseArrayGroupScalarEdge>(),
&EncodeDenseArrayToScalarEdgeValue},
{GetQType<DenseArrayShape>(), &EncodeDenseArrayShapeValue},
});
if (value.GetType() == GetQType<QTypePtr>()) {
const auto& qtype_value = value.UnsafeAs<QTypePtr>();
auto it = kQTypeEncoders->find(qtype_value);
if (it != kQTypeEncoders->end()) {
return it->second(encoder);
}
} else {
auto it = kValueEncoders->find(value.GetType());
if (it != kValueEncoders->end()) {
return it->second(value, encoder);
}
}
return absl::UnimplementedError(absl::StrFormat(
"%s does not support serialization of %s: %s; this may indicate a "
"missing BUILD dependency on the encoder for this qtype",
kDenseArrayV1Codec, value.GetType()->name(), value.Repr()));
}
AROLLA_INITIALIZER(
.reverse_deps = {arolla::initializer_dep::kS11n},
.init_fn = []() -> absl::Status {
RETURN_IF_ERROR(RegisterValueEncoderByQType(
GetQType<DenseArrayEdge>(), EncodeDenseArray));
RETURN_IF_ERROR(RegisterValueEncoderByQType(
GetQType<DenseArrayGroupScalarEdge>(), EncodeDenseArray));
RETURN_IF_ERROR(RegisterValueEncoderByQType(
GetQType<DenseArrayShape>(), EncodeDenseArray));
absl::Status status;
arolla::meta::foreach_type<ScalarTypes>([&](auto meta_type) {
if (status.ok()) {
status = RegisterValueEncoderByQType(
GetDenseArrayQType<typename decltype(meta_type)::type>(),
EncodeDenseArray);
}
});
return status;
})
}
} | #include <cstdint>
#include <optional>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/log/check.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/memory/buffer.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/serialization/encode.h"
#include "arolla/serialization_base/base.pb.h"
#include "arolla/serialization_codecs/dense_array/dense_array_codec.pb.h"
#include "arolla/util/text.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::serialization_codecs {
namespace {
using ::arolla::serialization::Encode;
using ::arolla::serialization_base::ValueProto;
template <typename T>
absl::StatusOr<ValueProto> GenValueProto(const T& value) {
ASSIGN_OR_RETURN(auto container_proto,
Encode({TypedValue::FromValue(value)}, {}));
CHECK_GT(container_proto.decoding_steps_size(), 1);
CHECK(container_proto.decoding_steps().rbegin()[1].has_value());
return container_proto.decoding_steps().rbegin()[1].value();
}
TEST(EncodeDenseArrayTest, BitmapWithBitOffset) {
DenseArray<float> arr;
arr.values = CreateBuffer<float>({-1.0f, 1.0f, -1.0f, 3.0f, -1.0f});
arr.bitmap = CreateBuffer<uint32_t>({0b1111111111111111010100});
arr.bitmap_bit_offset = 1;
ASSERT_OK_AND_ASSIGN(auto value_proto, GenValueProto(arr));
ASSERT_TRUE(value_proto.HasExtension(DenseArrayV1Proto::extension));
const auto& dense_array_proto =
value_proto.GetExtension(DenseArrayV1Proto::extension);
ASSERT_EQ(dense_array_proto.value_case(),
DenseArrayV1Proto::kDenseArrayFloat32Value);
const auto& dense_array_float32_proto =
dense_array_proto.dense_array_float32_value();
ASSERT_EQ(dense_array_float32_proto.size(), 5);
ASSERT_THAT(dense_array_float32_proto.bitmap(),
testing::ElementsAre(0b1010U));
ASSERT_THAT(dense_array_float32_proto.values(),
testing::ElementsAre(1.0f, 3.0f));
}
TEST(EncodeDenseArrayTest, StringBufferBaseOffset) {
constexpr absl::string_view characters = "abracadabra";
DenseArray<Text> arr;
arr.values = StringsBuffer(
CreateBuffer<StringsBuffer::Offsets>({{1, 3}, {4, 5}, {8, 10}, {8, 11}}),
Buffer<char>::Create(characters.begin(), characters.end()), 1);
arr.bitmap = CreateBuffer<uint32_t>({0b0101});
ASSERT_THAT(arr,
testing::ElementsAre("ab", std::nullopt, "ab", std::nullopt));
ASSERT_OK_AND_ASSIGN(auto value_proto, GenValueProto(arr));
ASSERT_TRUE(value_proto.HasExtension(DenseArrayV1Proto::extension));
const auto& dense_array_proto =
value_proto.GetExtension(DenseArrayV1Proto::extension);
ASSERT_EQ(dense_array_proto.value_case(),
DenseArrayV1Proto::kDenseArrayTextValue);
const auto& dense_array_string_proto =
dense_array_proto.dense_array_text_value();
ASSERT_EQ(dense_array_string_proto.size(), 4);
ASSERT_THAT(dense_array_string_proto.bitmap(), testing::ElementsAre(0b101U));
ASSERT_EQ(dense_array_string_proto.characters(), characters);
ASSERT_THAT(dense_array_string_proto.value_offset_starts(),
testing::ElementsAre(0, 7));
ASSERT_THAT(dense_array_string_proto.value_offset_ends(),
testing::ElementsAre(2, 9));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/serialization_codecs/dense_array/encoders/dense_array_encoder.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/serialization_codecs/dense_array/encoders/dense_array_encoder_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
03550b38-ce64-42b1-8eb3-be6022a8b556 | cpp | google/leveldb | no_destructor | util/no_destructor.h | util/no_destructor_test.cc | #ifndef STORAGE_LEVELDB_UTIL_NO_DESTRUCTOR_H_
#define STORAGE_LEVELDB_UTIL_NO_DESTRUCTOR_H_
#include <type_traits>
#include <utility>
namespace leveldb {
template <typename InstanceType>
class NoDestructor {
public:
template <typename... ConstructorArgTypes>
explicit NoDestructor(ConstructorArgTypes&&... constructor_args) {
static_assert(sizeof(instance_storage_) >= sizeof(InstanceType),
"instance_storage_ is not large enough to hold the instance");
static_assert(
alignof(decltype(instance_storage_)) >= alignof(InstanceType),
"instance_storage_ does not meet the instance's alignment requirement");
new (&instance_storage_)
InstanceType(std::forward<ConstructorArgTypes>(constructor_args)...);
}
~NoDestructor() = default;
NoDestructor(const NoDestructor&) = delete;
NoDestructor& operator=(const NoDestructor&) = delete;
InstanceType* get() {
return reinterpret_cast<InstanceType*>(&instance_storage_);
}
private:
typename std::aligned_storage<sizeof(InstanceType),
alignof(InstanceType)>::type instance_storage_;
};
}
#endif | #include "util/no_destructor.h"
#include <cstdint>
#include <cstdlib>
#include <utility>
#include "gtest/gtest.h"
namespace leveldb {
namespace {
struct DoNotDestruct {
public:
DoNotDestruct(uint32_t a, uint64_t b) : a(a), b(b) {}
~DoNotDestruct() { std::abort(); }
uint32_t a;
uint64_t b;
};
constexpr const uint32_t kGoldenA = 0xdeadbeef;
constexpr const uint64_t kGoldenB = 0xaabbccddeeffaabb;
}
TEST(NoDestructorTest, StackInstance) {
NoDestructor<DoNotDestruct> instance(kGoldenA, kGoldenB);
ASSERT_EQ(kGoldenA, instance.get()->a);
ASSERT_EQ(kGoldenB, instance.get()->b);
}
TEST(NoDestructorTest, StaticInstance) {
static NoDestructor<DoNotDestruct> instance(kGoldenA, kGoldenB);
ASSERT_EQ(kGoldenA, instance.get()->a);
ASSERT_EQ(kGoldenB, instance.get()->b);
}
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/no_destructor.h | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/no_destructor_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
2be3d92f-9c0f-4d2a-8f13-adbaa9b977ab | cpp | google/quiche | quic_session | quiche/quic/core/quic_session.cc | quiche/quic/core/quic_session_test.cc | #include "quiche/quic/core/quic_session.h"
#include <algorithm>
#include <cstdint>
#include <limits>
#include <memory>
#include <optional>
#include <ostream>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/frames/quic_ack_frequency_frame.h"
#include "quiche/quic/core/frames/quic_reset_stream_at_frame.h"
#include "quiche/quic/core/frames/quic_window_update_frame.h"
#include "quiche/quic/core/quic_connection.h"
#include "quiche/quic/core/quic_connection_context.h"
#include "quiche/quic/core/quic_error_codes.h"
#include "quiche/quic/core/quic_flow_controller.h"
#include "quiche/quic/core/quic_stream.h"
#include "quiche/quic/core/quic_stream_priority.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/core/quic_write_blocked_list.h"
#include "quiche/quic/core/web_transport_write_blocked_list.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/platform/api/quic_server_stats.h"
#include "quiche/quic/platform/api/quic_stack_trace.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/quiche_callbacks.h"
#include "quiche/common/quiche_text_utils.h"
namespace quic {
namespace {
class ClosedStreamsCleanUpDelegate : public QuicAlarm::Delegate {
public:
explicit ClosedStreamsCleanUpDelegate(QuicSession* session)
: session_(session) {}
ClosedStreamsCleanUpDelegate(const ClosedStreamsCleanUpDelegate&) = delete;
ClosedStreamsCleanUpDelegate& operator=(const ClosedStreamsCleanUpDelegate&) =
delete;
QuicConnectionContext* GetConnectionContext() override {
return (session_->connection() == nullptr)
? nullptr
: session_->connection()->context();
}
void OnAlarm() override { session_->CleanUpClosedStreams(); }
private:
QuicSession* session_;
};
class StreamCountResetAlarmDelegate : public QuicAlarm::Delegate {
public:
explicit StreamCountResetAlarmDelegate(QuicSession* session)
: session_(session) {}
StreamCountResetAlarmDelegate(const StreamCountResetAlarmDelegate&) = delete;
StreamCountResetAlarmDelegate& operator=(
const StreamCountResetAlarmDelegate&) = delete;
QuicConnectionContext* GetConnectionContext() override {
return (session_->connection() == nullptr)
? nullptr
: session_->connection()->context();
}
void OnAlarm() override { session_->OnStreamCountReset(); }
private:
QuicSession* session_;
};
std::unique_ptr<QuicWriteBlockedListInterface> CreateWriteBlockedList(
QuicPriorityType priority_type) {
switch (priority_type) {
case QuicPriorityType::kHttp:
return std::make_unique<QuicWriteBlockedList>();
case QuicPriorityType::kWebTransport:
return std::make_unique<WebTransportWriteBlockedList>();
}
QUICHE_NOTREACHED();
return nullptr;
}
}
#define ENDPOINT \
(perspective() == Perspective::IS_SERVER ? "Server: " : "Client: ")
QuicSession::QuicSession(
QuicConnection* connection, Visitor* owner, const QuicConfig& config,
const ParsedQuicVersionVector& supported_versions,
QuicStreamCount num_expected_unidirectional_static_streams)
: QuicSession(connection, owner, config, supported_versions,
num_expected_unidirectional_static_streams, nullptr) {}
QuicSession::QuicSession(
QuicConnection* connection, Visitor* owner, const QuicConfig& config,
const ParsedQuicVersionVector& supported_versions,
QuicStreamCount num_expected_unidirectional_static_streams,
std::unique_ptr<QuicDatagramQueue::Observer> datagram_observer,
QuicPriorityType priority_type)
: connection_(connection),
perspective_(connection->perspective()),
visitor_(owner),
write_blocked_streams_(CreateWriteBlockedList(priority_type)),
config_(config),
stream_id_manager_(perspective(), connection->transport_version(),
kDefaultMaxStreamsPerConnection,
config_.GetMaxBidirectionalStreamsToSend()),
ietf_streamid_manager_(perspective(), connection->version(), this, 0,
num_expected_unidirectional_static_streams,
config_.GetMaxBidirectionalStreamsToSend(),
config_.GetMaxUnidirectionalStreamsToSend() +
num_expected_unidirectional_static_streams),
num_draining_streams_(0),
num_outgoing_draining_streams_(0),
num_static_streams_(0),
num_zombie_streams_(0),
flow_controller_(
this, QuicUtils::GetInvalidStreamId(connection->transport_version()),
true,
connection->version().AllowsLowFlowControlLimits()
? 0
: kMinimumFlowControlSendWindow,
config_.GetInitialSessionFlowControlWindowToSend(),
kSessionReceiveWindowLimit, perspective() == Perspective::IS_SERVER,
nullptr),
currently_writing_stream_id_(0),
transport_goaway_sent_(false),
transport_goaway_received_(false),
control_frame_manager_(this),
last_message_id_(0),
datagram_queue_(this, std::move(datagram_observer)),
closed_streams_clean_up_alarm_(nullptr),
supported_versions_(supported_versions),
is_configured_(false),
was_zero_rtt_rejected_(false),
liveness_testing_in_progress_(false),
stream_count_reset_alarm_(
absl::WrapUnique<QuicAlarm>(connection->alarm_factory()->CreateAlarm(
new StreamCountResetAlarmDelegate(this)))),
priority_type_(priority_type) {
closed_streams_clean_up_alarm_ =
absl::WrapUnique<QuicAlarm>(connection_->alarm_factory()->CreateAlarm(
new ClosedStreamsCleanUpDelegate(this)));
if (VersionHasIetfQuicFrames(transport_version())) {
config_.SetMaxUnidirectionalStreamsToSend(
config_.GetMaxUnidirectionalStreamsToSend() +
num_expected_unidirectional_static_streams);
}
}
void QuicSession::Initialize() {
connection_->set_visitor(this);
connection_->SetSessionNotifier(this);
connection_->SetDataProducer(this);
connection_->SetUnackedMapInitialCapacity();
if (perspective_ == Perspective::IS_CLIENT) {
if (config_.HasClientSentConnectionOption(kCHP1, perspective_)) {
config_.SetGoogleHandshakeMessageToSend(
std::string(kDefaultMaxPacketSize, '0'));
} else if (config_.HasClientSentConnectionOption(kCHP2, perspective_)) {
config_.SetGoogleHandshakeMessageToSend(
std::string(kDefaultMaxPacketSize * 2, '0'));
}
}
connection_->SetFromConfig(config_);
if (perspective_ == Perspective::IS_CLIENT) {
if (config_.HasClientRequestedIndependentOption(kAFFE, perspective_) &&
version().HasIetfQuicFrames()) {
connection_->set_can_receive_ack_frequency_frame();
config_.SetMinAckDelayMs(kDefaultMinAckDelayTimeMs);
}
}
if (perspective() == Perspective::IS_SERVER &&
connection_->version().handshake_protocol == PROTOCOL_TLS1_3) {
config_.SetStatelessResetTokenToSend(GetStatelessResetToken());
}
connection_->CreateConnectionIdManager();
if (perspective() == Perspective::IS_SERVER) {
connection_->OnSuccessfulVersionNegotiation();
}
if (QuicVersionUsesCryptoFrames(transport_version())) {
return;
}
QUICHE_DCHECK_EQ(QuicUtils::GetCryptoStreamId(transport_version()),
GetMutableCryptoStream()->id());
}
QuicSession::~QuicSession() {
if (closed_streams_clean_up_alarm_ != nullptr) {
closed_streams_clean_up_alarm_->PermanentCancel();
}
if (stream_count_reset_alarm_ != nullptr) {
stream_count_reset_alarm_->PermanentCancel();
}
}
PendingStream* QuicSession::PendingStreamOnStreamFrame(
const QuicStreamFrame& frame) {
QUICHE_DCHECK(VersionUsesHttp3(transport_version()));
QuicStreamId stream_id = frame.stream_id;
PendingStream* pending = GetOrCreatePendingStream(stream_id);
if (!pending) {
if (frame.fin) {
QuicStreamOffset final_byte_offset = frame.offset + frame.data_length;
OnFinalByteOffsetReceived(stream_id, final_byte_offset);
}
return nullptr;
}
pending->OnStreamFrame(frame);
if (!connection()->connected()) {
return nullptr;
}
return pending;
}
bool QuicSession::MaybeProcessPendingStream(PendingStream* pending) {
QUICHE_DCHECK(pending != nullptr && connection()->connected());
if (ExceedsPerLoopStreamLimit()) {
QUIC_DLOG(INFO) << "Skip processing pending stream " << pending->id()
<< " because it exceeds per loop limit.";
QUIC_CODE_COUNT_N(quic_pending_stream, 1, 3);
return false;
}
QuicStreamId stream_id = pending->id();
std::optional<QuicResetStreamError> stop_sending_error_code =
pending->GetStopSendingErrorCode();
QUIC_DLOG(INFO) << "Process pending stream " << pending->id();
QuicStream* stream = ProcessPendingStream(pending);
if (stream != nullptr) {
QUICHE_DCHECK(IsClosedStream(stream_id) || IsOpenStream(stream_id))
<< "Stream " << stream_id << " not created";
if (!stream->pending_duration().IsZero()) {
QUIC_SERVER_HISTOGRAM_TIMES("QuicStream.PendingDurationUs",
stream->pending_duration().ToMicroseconds(),
0, 1000 * 100, 20,
"Time a stream has been pending at server.");
++connection()->mutable_stats().num_total_pending_streams;
}
pending_stream_map_.erase(stream_id);
if (stop_sending_error_code) {
stream->OnStopSending(*stop_sending_error_code);
if (!connection()->connected()) {
return false;
}
}
stream->OnStreamCreatedFromPendingStream();
return connection()->connected();
}
if (pending->sequencer()->IsClosed()) {
ClosePendingStream(stream_id);
}
return connection()->connected();
}
void QuicSession::PendingStreamOnWindowUpdateFrame(
const QuicWindowUpdateFrame& frame) {
QUICHE_DCHECK(VersionUsesHttp3(transport_version()));
PendingStream* pending = GetOrCreatePendingStream(frame.stream_id);
if (pending) {
pending->OnWindowUpdateFrame(frame);
}
}
void QuicSession::PendingStreamOnStopSendingFrame(
const QuicStopSendingFrame& frame) {
QUICHE_DCHECK(VersionUsesHttp3(transport_version()));
PendingStream* pending = GetOrCreatePendingStream(frame.stream_id);
if (pending) {
pending->OnStopSending(frame.error());
}
}
void QuicSession::OnStreamFrame(const QuicStreamFrame& frame) {
QuicStreamId stream_id = frame.stream_id;
if (stream_id == QuicUtils::GetInvalidStreamId(transport_version())) {
connection()->CloseConnection(
QUIC_INVALID_STREAM_ID, "Received data for an invalid stream",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
if (ShouldProcessFrameByPendingStream(STREAM_FRAME, stream_id)) {
PendingStream* pending = PendingStreamOnStreamFrame(frame);
if (pending != nullptr && IsEncryptionEstablished()) {
MaybeProcessPendingStream(pending);
}
return;
}
QuicStream* stream = GetOrCreateStream(stream_id);
if (!stream) {
if (frame.fin) {
QuicStreamOffset final_byte_offset = frame.offset + frame.data_length;
OnFinalByteOffsetReceived(stream_id, final_byte_offset);
}
return;
}
stream->OnStreamFrame(frame);
}
void QuicSession::OnCryptoFrame(const QuicCryptoFrame& frame) {
GetMutableCryptoStream()->OnCryptoFrame(frame);
}
void QuicSession::OnStopSendingFrame(const QuicStopSendingFrame& frame) {
QUICHE_DCHECK(VersionHasIetfQuicFrames(transport_version()));
QUICHE_DCHECK(QuicVersionUsesCryptoFrames(transport_version()));
QuicStreamId stream_id = frame.stream_id;
if (stream_id == QuicUtils::GetInvalidStreamId(transport_version())) {
QUIC_DVLOG(1) << ENDPOINT
<< "Received STOP_SENDING with invalid stream_id: "
<< stream_id << " Closing connection";
connection()->CloseConnection(
QUIC_INVALID_STREAM_ID, "Received STOP_SENDING for an invalid stream",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
if (QuicUtils::GetStreamType(stream_id, perspective(),
IsIncomingStream(stream_id),
version()) == READ_UNIDIRECTIONAL) {
QUIC_DVLOG(1) << ENDPOINT
<< "Received STOP_SENDING for a read-only stream_id: "
<< stream_id << ".";
connection()->CloseConnection(
QUIC_INVALID_STREAM_ID, "Received STOP_SENDING for a read-only stream",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
if (visitor_) {
visitor_->OnStopSendingReceived(frame);
}
if (ShouldProcessFrameByPendingStream(STOP_SENDING_FRAME, stream_id)) {
PendingStreamOnStopSendingFrame(frame);
return;
}
QuicStream* stream = nullptr;
if (enable_stop_sending_for_zombie_streams_) {
stream = GetStream(stream_id);
if (stream != nullptr) {
if (stream->IsZombie()) {
QUIC_RELOADABLE_FLAG_COUNT_N(
quic_deliver_stop_sending_to_zombie_streams, 1, 3);
} else {
QUIC_RELOADABLE_FLAG_COUNT_N(
quic_deliver_stop_sending_to_zombie_streams, 2, 3);
}
stream->OnStopSending(frame.error());
return;
}
}
stream = GetOrCreateStream(stream_id);
if (!stream) {
return;
}
stream->OnStopSending(frame.error());
}
void QuicSession::OnPacketDecrypted(EncryptionLevel level) {
GetMutableCryptoStream()->OnPacketDecrypted(level);
if (liveness_testing_in_progress_) {
liveness_testing_in_progress_ = false;
OnCanCreateNewOutgoingStream(false);
}
}
void QuicSession::OnOneRttPacketAcknowledged() {
GetMutableCryptoStream()->OnOneRttPacketAcknowledged();
}
void QuicSession::OnHandshakePacketSent() {
GetMutableCryptoStream()->OnHandshakePacketSent();
}
std::unique_ptr<QuicDecrypter>
QuicSession::AdvanceKeysAndCreateCurrentOneRttDecrypter() {
return GetMutableCryptoStream()->AdvanceKeysAndCreateCurrentOneRttDecrypter();
}
std::unique_ptr<QuicEncrypter> QuicSession::CreateCurrentOneRttEncrypter() {
return GetMutableCryptoStream()->CreateCurrentOneRttEncrypter();
}
void QuicSession::PendingStreamOnRstStream(const QuicRstStreamFrame& frame) {
QUICHE_DCHECK(VersionUsesHttp3(transport_version()));
QuicStreamId stream_id = frame.stream_id;
PendingStream* pending = GetOrCreatePendingStream(stream_id);
if (!pending) {
HandleRstOnValidNonexistentStream(frame);
return;
}
pending->OnRstStreamFrame(frame);
ClosePendingStream(stream_id);
}
void QuicSession::PendingStreamOnResetStreamAt(
const QuicResetStreamAtFrame& frame) {
QUICHE_DCHECK(VersionUsesHttp3(transport_version()));
QuicStreamId stream_id = frame.stream_id;
PendingStream* pending = GetOrCreatePendingStream(stream_id);
if (!pending) {
HandleRstOnValidNonexistentStream(frame.ToRstStream());
return;
}
pending->OnResetStreamAtFrame(frame);
}
void QuicSession::OnRstStream(const QuicRstStreamFrame& frame) {
QuicStreamId stream_id = frame.stream_id;
if (stream_id == QuicUtils::GetInvalidStreamId(transport_version())) {
connection()->CloseConnection(
QUIC_INVALID_STREAM_ID, "Received data for an invalid stream",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
if (VersionHasIetfQuicFrames(transport_version()) &&
QuicUtils::GetStreamType(stream_id, perspective(),
IsIncomingStream(stream_id),
version()) == WRITE_UNIDIRECTIONAL) {
connection()->CloseConnection(
QUIC_INVALID_STREAM_ID, "Received RESET_STREAM for a write-only stream",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
if (visitor_) {
visitor_->OnRstStreamReceived(frame);
}
if (ShouldProcessFrameByPendingStream(RST_STREAM_FRAME, stream_id)) {
PendingStreamOnRstStream(frame);
return;
}
QuicStream* stream = GetOrCreateStream(stream_id);
if (!stream) {
HandleRstOnValidNonexistentStream(frame);
return;
}
stream->OnStreamReset(frame);
}
void QuicSession::OnResetStreamAt(const QuicResetStreamAtFrame& frame) {
QUICHE_DCHECK(VersionHasIetfQuicFrames(transport_version()));
QuicStreamId stream_id = frame.stream_id;
if (stream_id == QuicUtils::GetInvalidStreamId(transport_version())) {
connection()->CloseConnection(
QUIC_INVALID_STREAM_ID, "Received data for an invalid stream",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
if (VersionHasIetfQuicFrames(transport_version()) &&
QuicUtils::GetStreamType(stream_id, perspective(),
IsIncomingStream(stream_id),
version()) == WRITE_UNIDIRECTIONAL) {
connection()->CloseConnection(
QUIC_INVALID_STREAM_ID, "Received RESET_STREAM for a write-only stream",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
if (ShouldProcessFrameByPendingStream(RESET_STREAM_AT_FRAME, stream_id)) {
PendingStreamOnResetStreamAt(frame);
return;
}
QuicStream* stream = GetOrCreateStream(stream_id);
if (!stream) {
HandleRstOnValidNonexistentStream(frame.ToRstStream());
return;
}
stream->OnResetStreamAtFrame(frame);
}
void QuicSession::OnGoAway(const QuicGoAwayFrame& ) {
QUIC_BUG_IF(quic_bug_12435_1, version().UsesHttp3())
<< "gQUIC GOAWAY received on version " << version();
transport_goaway_received_ = true;
}
void QuicSession::OnMessageReceived(absl::string_view message) {
QUIC_DVLOG(1) << ENDPOINT << "Received message of length "
<< message.length();
QUIC_DVLOG(2) << ENDPOINT << "Contents of message of length "
<< message.length() << ":" << std::endl
<< quiche::QuicheTextUtils::HexDump(message);
}
void QuicSession::OnHandshakeDoneReceived() {
QUIC_DVLOG(1) << ENDPOINT << "OnHandshakeDoneReceived";
GetMutableCryptoStream()->OnHandshakeDoneReceived();
}
void QuicSession::OnNewTokenReceived(absl::string_view token) {
QUICHE_DCHECK_EQ(perspective_, Perspective::IS_CLIENT);
GetMutableCryptoStream()->OnNewTokenReceived(token);
}
void QuicSession::RecordConnectionCloseAtServer(QuicErrorCode error,
ConnectionCloseSource source) {
if (error != QUIC_NO_ERROR) {
if (source == ConnectionCloseSource::FROM_SELF) {
QUIC_SERVER_HISTOGRAM_ENUM(
"quic_server_connection_close_errors", error, QUIC_LAST_ERROR,
"QuicErrorCode for server-closed connections.");
} else {
QUIC_SERVER_HISTOGRAM_ENUM(
"quic_client_connection_close_errors", error, QUIC_LAST_ERROR,
"QuicErrorCode for client-closed connections.");
}
}
}
void QuicSession::OnConnectionClosed(const QuicConnectionCloseFrame& frame,
ConnectionCloseSource source) {
QUICHE_DCHECK(!connection_->connected());
if (perspective() == Perspective::IS_SERVER) {
RecordConnectionCloseAtServer(frame.quic_error_code, source);
}
if (on_closed_frame_.quic_error_code == QUIC_NO_ERROR) {
on_closed_frame_ = frame;
source_ = source;
}
GetMutableCryptoStream()->OnConnectionClosed(frame, source);
PerformActionOnActiveStreams([this, frame, source](QuicStream* stream) {
QuicStreamId id = stream->id();
stream->OnConnectionClosed(frame, source);
auto it = stream_map_.find(id);
if (it != stream_map_.end()) {
QUIC_BUG_IF(quic_bug_12435_2, !it->second->IsZombie())
<< ENDPOINT << "Non-zombie stream " << id
<< " failed to close under OnConnectionClosed";
}
return true;
});
closed_streams_clean_up_alarm_->Cancel();
stream_count_reset_alarm_->Cancel();
if (visitor_) {
visitor_->OnConnectionClosed(connection_->GetOneActiveServerConnectionId(),
frame.quic_error_code, frame.error_details,
source);
}
}
void QuicSession::OnWriteBlocked() {
if (!connection_->connected()) {
return;
}
if (visitor_) {
visitor_->OnWriteBlocked(connection_);
}
}
void QuicSession::OnSuccessfulVersionNegotiation(
const ParsedQuicVersion& ) {}
void QuicSession::OnPacketReceived(const QuicSocketAddress& ,
const QuicSocketAddress& peer_address,
bool is_connectivity_probe) {
QUICHE_DCHECK(!connection_->ignore_gquic_probing());
if (is_connectivity_probe && perspective() == Perspective::IS_SERVER) {
connection_->SendConnectivityProbingPacket(nullptr, peer_address);
}
}
void QuicSession::OnPathDegrading() {
if (visitor_) {
visitor_->OnPathDegrading();
}
}
void QuicSession::OnForwardProgressMadeAfterPathDegrading() {}
bool QuicSession::AllowSelfAddressChange() const { return false; }
void QuicSession::OnWindowUpdateFrame(const QuicWindowUpdateFrame& frame) {
QuicStreamId stream_id = frame.stream_id;
if (stream_id == QuicUtils::GetInvalidStreamId(transport_version())) {
QUIC_DVLOG(1) << ENDPOINT
<< "Received connection level flow control window "
"update with max data: "
<< frame.max_data;
flow_controller_.UpdateSendWindowOffset(frame.max_data);
return;
}
if (VersionHasIetfQuicFrames(transport_version()) &&
QuicUtils::GetStreamType(stream_id, perspective(),
IsIncomingStream(stream_id),
version()) == READ_UNIDIRECTIONAL) {
connection()->CloseConnection(
QUIC_WINDOW_UPDATE_RECEIVED_ON_READ_UNIDIRECTIONAL_STREAM,
"WindowUpdateFrame received on READ_UNIDIRECTIONAL stream.",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
if (ShouldProcessFrameByPendingStream(WINDOW_UPDATE_FRAME, stream_id)) {
PendingStreamOnWindowUpdateFrame(frame);
return;
}
QuicStream* stream = GetOrCreateStream(stream_id);
if (stream != nullptr) {
stream->OnWindowUpdateFrame(frame);
}
}
void QuicSession::OnBlockedFrame(const QuicBlockedFrame& frame) {
QUIC_DLOG(INFO) << ENDPOINT << "Received BLOCKED frame with stream id: "
<< frame.stream_id << ", offset: " << frame.offset;
}
bool QuicSession::CheckStreamNotBusyLooping(QuicStream* stream,
uint64_t previous_bytes_written,
bool previous_fin_sent) {
if (
!stream->write_side_closed() &&
!flow_controller_.IsBlocked() &&
previous_bytes_written == stream->stream_bytes_written() &&
previous_fin_sent == stream->fin_sent()) {
stream->set_busy_counter(stream->busy_counter() + 1);
QUIC_DVLOG(1) << ENDPOINT << "Suspected busy loop on stream id "
<< stream->id() << " stream_bytes_written "
<< stream->stream_bytes_written() << " fin "
<< stream->fin_sent() << " count " << stream->busy_counter();
if (stream->busy_counter() > 20) {
QUIC_LOG(ERROR) << ENDPOINT << "Detected busy loop on stream id "
<< stream->id() << " stream_bytes_written "
<< stream->stream_bytes_written() << " fin "
<< stream->fin_sent();
return false;
}
} else {
stream->set_busy_counter(0);
}
return true;
}
bool QuicSession::CheckStreamWriteBlocked(QuicStream* stream) const {
if (!stream->write_side_closed() && stream->HasBufferedData() &&
!stream->IsFlowControlBlocked() &&
!write_blocked_streams_->IsStreamBlocked(stream->id())) {
QUIC_DLOG(ERROR) << ENDPOINT << "stream " << stream->id()
<< " has buffered " << stream->BufferedDataBytes()
<< " bytes, and is not flow control blocked, "
"but it is not in the write block list.";
return false;
}
return true;
}
void QuicSession::OnCanWrite() {
if (connection_->framer().is_processing_packet()) {
QUIC_BUG(session_write_mid_packet_processing)
<< ENDPOINT << "Try to write mid packet processing.";
return;
}
if (!RetransmitLostData()) {
QUIC_DVLOG(1) << ENDPOINT
<< "Cannot finish retransmitting lost data, connection is "
"write blocked.";
return;
}
size_t num_writes = flow_controller_.IsBlocked()
? write_blocked_streams_->NumBlockedSpecialStreams()
: write_blocked_streams_->NumBlockedStreams();
if (num_writes == 0 && !control_frame_manager_.WillingToWrite() &&
datagram_queue_.empty() &&
(!QuicVersionUsesCryptoFrames(transport_version()) ||
!GetCryptoStream()->HasBufferedCryptoFrames())) {
return;
}
QuicConnection::ScopedPacketFlusher flusher(connection_);
if (QuicVersionUsesCryptoFrames(transport_version())) {
QuicCryptoStream* crypto_stream = GetMutableCryptoStream();
if (crypto_stream->HasBufferedCryptoFrames()) {
crypto_stream->WriteBufferedCryptoFrames();
}
if ((GetQuicReloadableFlag(
quic_no_write_control_frame_upon_connection_close) &&
!connection_->connected()) ||
crypto_stream->HasBufferedCryptoFrames()) {
if (!connection_->connected()) {
QUIC_RELOADABLE_FLAG_COUNT(
quic_no_write_control_frame_upon_connection_close);
}
return;
}
}
if (control_frame_manager_.WillingToWrite()) {
control_frame_manager_.OnCanWrite();
}
if (version().UsesTls() && GetHandshakeState() != HANDSHAKE_CONFIRMED &&
connection_->in_probe_time_out()) {
QUIC_CODE_COUNT(quic_donot_pto_stream_data_before_handshake_confirmed);
return;
}
if (!datagram_queue_.empty()) {
size_t written = datagram_queue_.SendDatagrams();
QUIC_DVLOG(1) << ENDPOINT << "Sent " << written << " datagrams";
if (!datagram_queue_.empty()) {
return;
}
}
std::vector<QuicStreamId> last_writing_stream_ids;
for (size_t i = 0; i < num_writes; ++i) {
if (!(write_blocked_streams_->HasWriteBlockedSpecialStream() ||
write_blocked_streams_->HasWriteBlockedDataStreams())) {
QUIC_BUG(quic_bug_10866_1)
<< "WriteBlockedStream is missing, num_writes: " << num_writes
<< ", finished_writes: " << i
<< ", connected: " << connection_->connected()
<< ", connection level flow control blocked: "
<< flow_controller_.IsBlocked();
for (QuicStreamId id : last_writing_stream_ids) {
QUIC_LOG(WARNING) << "last_writing_stream_id: " << id;
}
connection_->CloseConnection(QUIC_INTERNAL_ERROR,
"WriteBlockedStream is missing",
ConnectionCloseBehavior::SILENT_CLOSE);
return;
}
if (!CanWriteStreamData()) {
return;
}
currently_writing_stream_id_ = write_blocked_streams_->PopFront();
last_writing_stream_ids.push_back(currently_writing_stream_id_);
QUIC_DVLOG(1) << ENDPOINT << "Removing stream "
<< currently_writing_stream_id_ << " from write-blocked list";
QuicStream* stream = GetOrCreateStream(currently_writing_stream_id_);
if (stream != nullptr && !stream->IsFlowControlBlocked()) {
uint64_t previous_bytes_written = stream->stream_bytes_written();
bool previous_fin_sent = stream->fin_sent();
QUIC_DVLOG(1) << ENDPOINT << "stream " << stream->id()
<< " bytes_written " << previous_bytes_written << " fin "
<< previous_fin_sent;
stream->OnCanWrite();
QUICHE_DCHECK(CheckStreamWriteBlocked(stream));
QUICHE_DCHECK(CheckStreamNotBusyLooping(stream, previous_bytes_written,
previous_fin_sent));
}
currently_writing_stream_id_ = 0;
}
}
bool QuicSession::WillingAndAbleToWrite() const {
if (QuicVersionUsesCryptoFrames(transport_version())) {
if (HasPendingHandshake()) {
return true;
}
if (!IsEncryptionEstablished()) {
return false;
}
}
if (control_frame_manager_.WillingToWrite() ||
!streams_with_pending_retransmission_.empty()) {
return true;
}
if (flow_controller_.IsBlocked()) {
if (VersionUsesHttp3(transport_version())) {
return false;
}
return write_blocked_streams_->HasWriteBlockedSpecialStream();
}
return write_blocked_streams_->HasWriteBlockedSpecialStream() ||
write_blocked_streams_->HasWriteBlockedDataStreams();
}
std::string QuicSession::GetStreamsInfoForLogging() const {
std::string info = absl::StrCat(
"num_active_streams: ", GetNumActiveStreams(),
", num_pending_streams: ", pending_streams_size(),
", num_outgoing_draining_streams: ", num_outgoing_draining_streams(),
" ");
size_t i = 5;
for (const auto& it : stream_map_) {
if (it.second->is_static()) {
continue;
}
const QuicTime::Delta delay =
connection_->clock()->ApproximateNow() - it.second->creation_time();
absl::StrAppend(
&info, "{", it.second->id(), ":", delay.ToDebuggingValue(), ";",
it.second->stream_bytes_written(), ",", it.second->fin_sent(), ",",
it.second->HasBufferedData(), ",", it.second->fin_buffered(), ";",
it.second->stream_bytes_read(), ",", it.second->fin_received(), "}");
--i;
if (i == 0) {
break;
}
}
return info;
}
bool QuicSession::HasPendingHandshake() const {
if (QuicVersionUsesCryptoFrames(transport_version())) {
return GetCryptoStream()->HasPendingCryptoRetransmission() ||
GetCryptoStream()->HasBufferedCryptoFrames();
}
return streams_with_pending_retransmission_.contains(
QuicUtils::GetCryptoStreamId(transport_version())) ||
write_blocked_streams_->IsStreamBlocked(
QuicUtils::GetCryptoStreamId(transport_version()));
}
void QuicSession::ProcessUdpPacket(const QuicSocketAddress& self_address,
const QuicSocketAddress& peer_address,
const QuicReceivedPacket& packet) {
QuicConnectionContextSwitcher cs(connection_->context());
connection_->ProcessUdpPacket(self_address, peer_address, packet);
}
std::string QuicSession::on_closed_frame_string() const {
std::stringstream ss;
ss << on_closed_frame_;
if (source_.has_value()) {
ss << " " << ConnectionCloseSourceToString(*source_);
}
return ss.str();
}
QuicConsumedData QuicSession::WritevData(QuicStreamId id, size_t write_length,
QuicStreamOffset offset,
StreamSendingState state,
TransmissionType type,
EncryptionLevel level) {
QUIC_BUG_IF(session writevdata when disconnected, !connection()->connected())
<< ENDPOINT << "Try to write stream data when connection is closed: "
<< on_closed_frame_string();
if (!IsEncryptionEstablished() &&
!QuicUtils::IsCryptoStreamId(transport_version(), id)) {
if (was_zero_rtt_rejected_ && !OneRttKeysAvailable()) {
QUICHE_DCHECK(version().UsesTls() &&
perspective() == Perspective::IS_CLIENT);
QUIC_DLOG(INFO) << ENDPOINT
<< "Suppress the write while 0-RTT gets rejected and "
"1-RTT keys are not available. Version: "
<< ParsedQuicVersionToString(version());
} else if (version().UsesTls() || perspective() == Perspective::IS_SERVER) {
QUIC_BUG(quic_bug_10866_2)
<< ENDPOINT << "Try to send data of stream " << id
<< " before encryption is established. Version: "
<< ParsedQuicVersionToString(version());
} else {
QUIC_DLOG(INFO) << ENDPOINT << "Try to send data of stream " << id
<< " before encryption is established.";
}
return QuicConsumedData(0, false);
}
SetTransmissionType(type);
QuicConnection::ScopedEncryptionLevelContext context(connection(), level);
QuicConsumedData data =
connection_->SendStreamData(id, write_length, offset, state);
if (type == NOT_RETRANSMISSION) {
write_blocked_streams_->UpdateBytesForStream(id, data.bytes_consumed);
}
return data;
}
size_t QuicSession::SendCryptoData(EncryptionLevel level, size_t write_length,
QuicStreamOffset offset,
TransmissionType type) {
QUICHE_DCHECK(QuicVersionUsesCryptoFrames(transport_version()));
if (!connection()->framer().HasEncrypterOfEncryptionLevel(level)) {
const std::string error_details = absl::StrCat(
"Try to send crypto data with missing keys of encryption level: ",
EncryptionLevelToString(level));
QUIC_BUG(quic_bug_10866_3) << ENDPOINT << error_details;
connection()->CloseConnection(
QUIC_MISSING_WRITE_KEYS, error_details,
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return 0;
}
SetTransmissionType(type);
QuicConnection::ScopedEncryptionLevelContext context(connection(), level);
const auto bytes_consumed =
connection_->SendCryptoData(level, write_length, offset);
return bytes_consumed;
}
void QuicSession::OnControlFrameManagerError(QuicErrorCode error_code,
std::string error_details) {
connection_->CloseConnection(
error_code, error_details,
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
}
bool QuicSession::WriteControlFrame(const QuicFrame& frame,
TransmissionType type) {
QUIC_BUG_IF(quic_bug_12435_11, !connection()->connected())
<< ENDPOINT
<< absl::StrCat("Try to write control frame: ", QuicFrameToString(frame),
" when connection is closed: ")
<< on_closed_frame_string();
if (!IsEncryptionEstablished()) {
return false;
}
SetTransmissionType(type);
QuicConnection::ScopedEncryptionLevelContext context(
connection(), GetEncryptionLevelToSendApplicationData());
return connection_->SendControlFrame(frame);
}
void QuicSession::ResetStream(QuicStreamId id, QuicRstStreamErrorCode error) {
QuicStream* stream = GetStream(id);
if (stream != nullptr && stream->is_static()) {
connection()->CloseConnection(
QUIC_INVALID_STREAM_ID, "Try to reset a static stream",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
if (stream != nullptr) {
stream->Reset(error);
return;
}
QuicConnection::ScopedPacketFlusher flusher(connection());
MaybeSendStopSendingFrame(id, QuicResetStreamError::FromInternal(error));
MaybeSendRstStreamFrame(id, QuicResetStreamError::FromInternal(error), 0);
}
void QuicSession::MaybeSendRstStreamFrame(QuicStreamId id,
QuicResetStreamError error,
QuicStreamOffset bytes_written) {
if (!connection()->connected()) {
return;
}
if (!VersionHasIetfQuicFrames(transport_version()) ||
QuicUtils::GetStreamType(id, perspective(), IsIncomingStream(id),
version()) != READ_UNIDIRECTIONAL) {
control_frame_manager_.WriteOrBufferRstStream(id, error, bytes_written);
}
connection_->OnStreamReset(id, error.internal_code());
}
void QuicSession::MaybeSendStopSendingFrame(QuicStreamId id,
QuicResetStreamError error) {
if (!connection()->connected()) {
return;
}
if (VersionHasIetfQuicFrames(transport_version()) &&
QuicUtils::GetStreamType(id, perspective(), IsIncomingStream(id),
version()) != WRITE_UNIDIRECTIONAL) {
control_frame_manager_.WriteOrBufferStopSending(error, id);
}
}
void QuicSession::SendGoAway(QuicErrorCode error_code,
const std::string& reason) {
QUICHE_DCHECK(!VersionHasIetfQuicFrames(transport_version()));
if (!IsEncryptionEstablished()) {
QUIC_CODE_COUNT(quic_goaway_before_encryption_established);
connection_->CloseConnection(
error_code, reason,
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
if (transport_goaway_sent_) {
return;
}
transport_goaway_sent_ = true;
QUICHE_DCHECK_EQ(perspective(), Perspective::IS_SERVER);
control_frame_manager_.WriteOrBufferGoAway(
error_code,
QuicUtils::GetMaxClientInitiatedBidirectionalStreamId(
transport_version()),
reason);
}
void QuicSession::SendBlocked(QuicStreamId id, QuicStreamOffset byte_offset) {
control_frame_manager_.WriteOrBufferBlocked(id, byte_offset);
}
void QuicSession::SendWindowUpdate(QuicStreamId id,
QuicStreamOffset byte_offset) {
control_frame_manager_.WriteOrBufferWindowUpdate(id, byte_offset);
}
void QuicSession::OnStreamError(QuicErrorCode error_code,
std::string error_details) {
connection_->CloseConnection(
error_code, error_details,
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
}
void QuicSession::OnStreamError(QuicErrorCode error_code,
QuicIetfTransportErrorCodes ietf_error,
std::string error_details) {
connection_->CloseConnection(
error_code, ietf_error, error_details,
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
}
bool QuicSession::CanSendMaxStreams() {
return control_frame_manager_.NumBufferedMaxStreams() < 2;
}
void QuicSession::SendMaxStreams(QuicStreamCount stream_count,
bool unidirectional) {
if (!is_configured_) {
QUIC_BUG(quic_bug_10866_5)
<< "Try to send max streams before config negotiated.";
return;
}
control_frame_manager_.WriteOrBufferMaxStreams(stream_count, unidirectional);
}
void QuicSession::InsertLocallyClosedStreamsHighestOffset(
const QuicStreamId id, QuicStreamOffset offset) {
locally_closed_streams_highest_offset_[id] = offset;
}
void QuicSession::OnStreamClosed(QuicStreamId stream_id) {
QUIC_DVLOG(1) << ENDPOINT << "Closing stream: " << stream_id;
StreamMap::iterator it = stream_map_.find(stream_id);
if (it == stream_map_.end()) {
QUIC_BUG(quic_bug_10866_6)
<< ENDPOINT << "Stream is already closed: " << stream_id;
return;
}
QuicStream* stream = it->second.get();
StreamType type = stream->type();
const bool stream_waiting_for_acks = stream->IsWaitingForAcks();
if (stream_waiting_for_acks) {
++num_zombie_streams_;
} else {
closed_streams_.push_back(std::move(it->second));
stream_map_.erase(it);
streams_with_pending_retransmission_.erase(stream_id);
if (!closed_streams_clean_up_alarm_->IsSet()) {
closed_streams_clean_up_alarm_->Set(
connection_->clock()->ApproximateNow());
}
connection_->QuicBugIfHasPendingFrames(stream_id);
}
if (!stream->HasReceivedFinalOffset()) {
QUICHE_DCHECK(!stream->was_draining());
InsertLocallyClosedStreamsHighestOffset(
stream_id, stream->highest_received_byte_offset());
return;
}
const bool stream_was_draining = stream->was_draining();
QUIC_DVLOG_IF(1, stream_was_draining)
<< ENDPOINT << "Stream " << stream_id << " was draining";
if (stream_was_draining) {
QUIC_BUG_IF(quic_bug_12435_4, num_draining_streams_ == 0);
--num_draining_streams_;
if (!IsIncomingStream(stream_id)) {
QUIC_BUG_IF(quic_bug_12435_5, num_outgoing_draining_streams_ == 0);
--num_outgoing_draining_streams_;
}
return;
}
if (!VersionHasIetfQuicFrames(transport_version())) {
stream_id_manager_.OnStreamClosed(
IsIncomingStream(stream_id));
}
if (!connection_->connected()) {
return;
}
if (IsIncomingStream(stream_id)) {
if (VersionHasIetfQuicFrames(transport_version())) {
ietf_streamid_manager_.OnStreamClosed(stream_id);
}
return;
}
if (!VersionHasIetfQuicFrames(transport_version())) {
OnCanCreateNewOutgoingStream(type != BIDIRECTIONAL);
}
}
void QuicSession::ClosePendingStream(QuicStreamId stream_id) {
QUIC_DVLOG(1) << ENDPOINT << "Closing stream " << stream_id;
QUICHE_DCHECK(VersionHasIetfQuicFrames(transport_version()));
pending_stream_map_.erase(stream_id);
if (connection_->connected()) {
ietf_streamid_manager_.OnStreamClosed(stream_id);
}
}
bool QuicSession::ShouldProcessFrameByPendingStream(QuicFrameType type,
QuicStreamId id) const {
return stream_map_.find(id) == stream_map_.end() &&
((version().HasIetfQuicFrames() && ExceedsPerLoopStreamLimit()) ||
UsesPendingStreamForFrame(type, id));
}
void QuicSession::OnFinalByteOffsetReceived(
QuicStreamId stream_id, QuicStreamOffset final_byte_offset) {
auto it = locally_closed_streams_highest_offset_.find(stream_id);
if (it == locally_closed_streams_highest_offset_.end()) {
return;
}
QUIC_DVLOG(1) << ENDPOINT << "Received final byte offset "
<< final_byte_offset << " for stream " << stream_id;
QuicByteCount offset_diff = final_byte_offset - it->second;
if (flow_controller_.UpdateHighestReceivedOffset(
flow_controller_.highest_received_byte_offset() + offset_diff)) {
if (flow_controller_.FlowControlViolation()) {
connection_->CloseConnection(
QUIC_FLOW_CONTROL_RECEIVED_TOO_MUCH_DATA,
"Connection level flow control violation",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
}
flow_controller_.AddBytesConsumed(offset_diff);
locally_closed_streams_highest_offset_.erase(it);
if (!VersionHasIetfQuicFrames(transport_version())) {
stream_id_manager_.OnStreamClosed(
IsIncomingStream(stream_id));
}
if (IsIncomingStream(stream_id)) {
if (VersionHasIetfQuicFrames(transport_version())) {
ietf_streamid_manager_.OnStreamClosed(stream_id);
}
} else if (!VersionHasIetfQuicFrames(transport_version())) {
OnCanCreateNewOutgoingStream(false);
}
}
bool QuicSession::IsEncryptionEstablished() const {
if (GetCryptoStream() == nullptr) {
return false;
}
return GetCryptoStream()->encryption_established();
}
bool QuicSession::OneRttKeysAvailable() const {
if (GetCryptoStream() == nullptr) {
return false;
}
return GetCryptoStream()->one_rtt_keys_available();
}
void QuicSession::OnConfigNegotiated() {
if (version().UsesTls() && is_configured_ &&
connection_->encryption_level() != ENCRYPTION_FORWARD_SECURE) {
QUIC_BUG(quic_bug_12435_6)
<< ENDPOINT
<< "1-RTT keys missing when config is negotiated for the second time.";
connection_->CloseConnection(
QUIC_INTERNAL_ERROR,
"1-RTT keys missing when config is negotiated for the second time.",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
QUIC_DVLOG(1) << ENDPOINT << "OnConfigNegotiated";
connection_->SetFromConfig(config_);
if (VersionHasIetfQuicFrames(transport_version())) {
uint32_t max_streams = 0;
if (config_.HasReceivedMaxBidirectionalStreams()) {
max_streams = config_.ReceivedMaxBidirectionalStreams();
}
if (was_zero_rtt_rejected_ &&
max_streams <
ietf_streamid_manager_.outgoing_bidirectional_stream_count()) {
connection_->CloseConnection(
QUIC_ZERO_RTT_UNRETRANSMITTABLE,
absl::StrCat(
"Server rejected 0-RTT, aborting because new bidirectional "
"initial stream limit ",
max_streams, " is less than current open streams: ",
ietf_streamid_manager_.outgoing_bidirectional_stream_count()),
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
QUIC_DVLOG(1) << ENDPOINT
<< "Setting Bidirectional outgoing_max_streams_ to "
<< max_streams;
if (perspective_ == Perspective::IS_CLIENT &&
max_streams <
ietf_streamid_manager_.max_outgoing_bidirectional_streams()) {
connection_->CloseConnection(
was_zero_rtt_rejected_ ? QUIC_ZERO_RTT_REJECTION_LIMIT_REDUCED
: QUIC_ZERO_RTT_RESUMPTION_LIMIT_REDUCED,
absl::StrCat(
was_zero_rtt_rejected_
? "Server rejected 0-RTT, aborting because "
: "",
"new bidirectional limit ", max_streams,
" decreases the current limit: ",
ietf_streamid_manager_.max_outgoing_bidirectional_streams()),
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
if (ietf_streamid_manager_.MaybeAllowNewOutgoingBidirectionalStreams(
max_streams)) {
OnCanCreateNewOutgoingStream( false);
}
max_streams = 0;
if (config_.HasReceivedMaxUnidirectionalStreams()) {
max_streams = config_.ReceivedMaxUnidirectionalStreams();
}
if (was_zero_rtt_rejected_ &&
max_streams <
ietf_streamid_manager_.outgoing_unidirectional_stream_count()) {
connection_->CloseConnection(
QUIC_ZERO_RTT_UNRETRANSMITTABLE,
absl::StrCat(
"Server rejected 0-RTT, aborting because new unidirectional "
"initial stream limit ",
max_streams, " is less than current open streams: ",
ietf_streamid_manager_.outgoing_unidirectional_stream_count()),
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
if (max_streams <
ietf_streamid_manager_.max_outgoing_unidirectional_streams()) {
connection_->CloseConnection(
was_zero_rtt_rejected_ ? QUIC_ZERO_RTT_REJECTION_LIMIT_REDUCED
: QUIC_ZERO_RTT_RESUMPTION_LIMIT_REDUCED,
absl::StrCat(
was_zero_rtt_rejected_
? "Server rejected 0-RTT, aborting because "
: "",
"new unidirectional limit ", max_streams,
" decreases the current limit: ",
ietf_streamid_manager_.max_outgoing_unidirectional_streams()),
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
QUIC_DVLOG(1) << ENDPOINT
<< "Setting Unidirectional outgoing_max_streams_ to "
<< max_streams;
if (ietf_streamid_manager_.MaybeAllowNewOutgoingUnidirectionalStreams(
max_streams)) {
OnCanCreateNewOutgoingStream( true);
}
} else {
uint32_t max_streams = 0;
if (config_.HasReceivedMaxBidirectionalStreams()) {
max_streams = config_.ReceivedMaxBidirectionalStreams();
}
QUIC_DVLOG(1) << ENDPOINT << "Setting max_open_outgoing_streams_ to "
<< max_streams;
if (was_zero_rtt_rejected_ &&
max_streams < stream_id_manager_.num_open_outgoing_streams()) {
connection_->CloseConnection(
QUIC_INTERNAL_ERROR,
absl::StrCat(
"Server rejected 0-RTT, aborting because new stream limit ",
max_streams, " is less than current open streams: ",
stream_id_manager_.num_open_outgoing_streams()),
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
stream_id_manager_.set_max_open_outgoing_streams(max_streams);
}
if (perspective() == Perspective::IS_SERVER) {
if (config_.HasReceivedConnectionOptions()) {
if (ContainsQuicTag(config_.ReceivedConnectionOptions(), kIFW6)) {
AdjustInitialFlowControlWindows(64 * 1024);
}
if (ContainsQuicTag(config_.ReceivedConnectionOptions(), kIFW7)) {
AdjustInitialFlowControlWindows(128 * 1024);
}
if (ContainsQuicTag(config_.ReceivedConnectionOptions(), kIFW8)) {
AdjustInitialFlowControlWindows(256 * 1024);
}
if (ContainsQuicTag(config_.ReceivedConnectionOptions(), kIFW9)) {
AdjustInitialFlowControlWindows(512 * 1024);
}
if (ContainsQuicTag(config_.ReceivedConnectionOptions(), kIFWA)) {
AdjustInitialFlowControlWindows(1024 * 1024);
}
}
config_.SetStatelessResetTokenToSend(GetStatelessResetToken());
}
if (VersionHasIetfQuicFrames(transport_version())) {
ietf_streamid_manager_.SetMaxOpenIncomingBidirectionalStreams(
config_.GetMaxBidirectionalStreamsToSend());
ietf_streamid_manager_.SetMaxOpenIncomingUnidirectionalStreams(
config_.GetMaxUnidirectionalStreamsToSend());
} else {
uint32_t max_incoming_streams_to_send =
config_.GetMaxBidirectionalStreamsToSend();
uint32_t max_incoming_streams =
std::max(max_incoming_streams_to_send + kMaxStreamsMinimumIncrement,
static_cast<uint32_t>(max_incoming_streams_to_send *
kMaxStreamsMultiplier));
stream_id_manager_.set_max_open_incoming_streams(max_incoming_streams);
}
if (connection_->version().handshake_protocol == PROTOCOL_TLS1_3) {
if (config_.HasReceivedInitialMaxStreamDataBytesOutgoingBidirectional()) {
OnNewStreamOutgoingBidirectionalFlowControlWindow(
config_.ReceivedInitialMaxStreamDataBytesOutgoingBidirectional());
}
if (config_.HasReceivedInitialMaxStreamDataBytesIncomingBidirectional()) {
OnNewStreamIncomingBidirectionalFlowControlWindow(
config_.ReceivedInitialMaxStreamDataBytesIncomingBidirectional());
}
if (config_.HasReceivedInitialMaxStreamDataBytesUnidirectional()) {
OnNewStreamUnidirectionalFlowControlWindow(
config_.ReceivedInitialMaxStreamDataBytesUnidirectional());
}
} else {
if (config_.HasReceivedInitialStreamFlowControlWindowBytes()) {
OnNewStreamFlowControlWindow(
config_.ReceivedInitialStreamFlowControlWindowBytes());
}
}
if (config_.HasReceivedInitialSessionFlowControlWindowBytes()) {
OnNewSessionFlowControlWindow(
config_.ReceivedInitialSessionFlowControlWindowBytes());
}
if (perspective_ == Perspective::IS_SERVER && version().HasIetfQuicFrames() &&
connection_->effective_peer_address().IsInitialized()) {
if (config_.SupportsServerPreferredAddress(perspective_)) {
quiche::IpAddressFamily address_family =
connection_->effective_peer_address()
.Normalized()
.host()
.address_family();
std::optional<QuicSocketAddress> expected_preferred_address =
config_.GetMappedAlternativeServerAddress(address_family);
if (expected_preferred_address.has_value()) {
std::optional<QuicNewConnectionIdFrame> frame =
connection_->MaybeIssueNewConnectionIdForPreferredAddress();
if (frame.has_value()) {
config_.SetPreferredAddressConnectionIdAndTokenToSend(
frame->connection_id, frame->stateless_reset_token);
}
connection_->set_expected_server_preferred_address(
*expected_preferred_address);
}
config_.ClearAlternateServerAddressToSend(
address_family == quiche::IpAddressFamily::IP_V4
? quiche::IpAddressFamily::IP_V6
: quiche::IpAddressFamily::IP_V4);
} else {
config_.ClearAlternateServerAddressToSend(quiche::IpAddressFamily::IP_V4);
config_.ClearAlternateServerAddressToSend(quiche::IpAddressFamily::IP_V6);
}
}
is_configured_ = true;
connection()->OnConfigNegotiated();
if (!connection_->framer().is_processing_packet() &&
(connection_->version().AllowsLowFlowControlLimits() ||
version().UsesTls())) {
QUIC_CODE_COUNT(quic_session_on_can_write_on_config_negotiated);
OnCanWrite();
}
}
std::optional<std::string> QuicSession::OnAlpsData(const uint8_t* ,
size_t ) {
return std::nullopt;
}
void QuicSession::AdjustInitialFlowControlWindows(size_t stream_window) {
const float session_window_multiplier =
config_.GetInitialStreamFlowControlWindowToSend()
? static_cast<float>(
config_.GetInitialSessionFlowControlWindowToSend()) /
config_.GetInitialStreamFlowControlWindowToSend()
: 1.5;
QUIC_DVLOG(1) << ENDPOINT << "Set stream receive window to " << stream_window;
config_.SetInitialStreamFlowControlWindowToSend(stream_window);
size_t session_window = session_window_multiplier * stream_window;
QUIC_DVLOG(1) << ENDPOINT << "Set session receive window to "
<< session_window;
config_.SetInitialSessionFlowControlWindowToSend(session_window);
flow_controller_.UpdateReceiveWindowSize(session_window);
for (auto const& kv : stream_map_) {
kv.second->UpdateReceiveWindowSize(stream_window);
}
if (!QuicVersionUsesCryptoFrames(transport_version())) {
GetMutableCryptoStream()->UpdateReceiveWindowSize(stream_window);
}
}
void QuicSession::HandleFrameOnNonexistentOutgoingStream(
QuicStreamId stream_id) {
QUICHE_DCHECK(!IsClosedStream(stream_id));
if (VersionHasIetfQuicFrames(transport_version())) {
connection()->CloseConnection(
QUIC_HTTP_STREAM_WRONG_DIRECTION, "Data for nonexistent stream",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
connection()->CloseConnection(
QUIC_INVALID_STREAM_ID, "Data for nonexistent stream",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
}
void QuicSession::HandleRstOnValidNonexistentStream(
const QuicRstStreamFrame& frame) {
if (IsClosedStream(frame.stream_id)) {
OnFinalByteOffsetReceived(frame.stream_id, frame.byte_offset);
}
}
void QuicSession::OnNewStreamFlowControlWindow(QuicStreamOffset new_window) {
QUICHE_DCHECK(version().UsesQuicCrypto());
QUIC_DVLOG(1) << ENDPOINT << "OnNewStreamFlowControlWindow " << new_window;
if (new_window < kMinimumFlowControlSendWindow) {
QUIC_LOG_FIRST_N(ERROR, 1)
<< "Peer sent us an invalid stream flow control send window: "
<< new_window << ", below minimum: " << kMinimumFlowControlSendWindow;
connection_->CloseConnection(
QUIC_FLOW_CONTROL_INVALID_WINDOW, "New stream window too low",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
for (auto const& kv : stream_map_) {
QUIC_DVLOG(1) << ENDPOINT << "Informing stream " << kv.first
<< " of new stream flow control window " << new_window;
if (!kv.second->MaybeConfigSendWindowOffset(
new_window, false)) {
return;
}
}
if (!QuicVersionUsesCryptoFrames(transport_version())) {
QUIC_DVLOG(1)
<< ENDPOINT
<< "Informing crypto stream of new stream flow control window "
<< new_window;
GetMutableCryptoStream()->MaybeConfigSendWindowOffset(
new_window, false);
}
}
void QuicSession::OnNewStreamUnidirectionalFlowControlWindow(
QuicStreamOffset new_window) {
QUICHE_DCHECK_EQ(connection_->version().handshake_protocol, PROTOCOL_TLS1_3);
QUIC_DVLOG(1) << ENDPOINT << "OnNewStreamUnidirectionalFlowControlWindow "
<< new_window;
for (auto const& kv : stream_map_) {
const QuicStreamId id = kv.first;
if (!version().HasIetfQuicFrames()) {
if (kv.second->type() == BIDIRECTIONAL) {
continue;
}
} else {
if (QuicUtils::IsBidirectionalStreamId(id, version())) {
continue;
}
}
if (!QuicUtils::IsOutgoingStreamId(connection_->version(), id,
perspective())) {
continue;
}
QUIC_DVLOG(1) << ENDPOINT << "Informing unidirectional stream " << id
<< " of new stream flow control window " << new_window;
if (!kv.second->MaybeConfigSendWindowOffset(new_window,
was_zero_rtt_rejected_)) {
return;
}
}
}
void QuicSession::OnNewStreamOutgoingBidirectionalFlowControlWindow(
QuicStreamOffset new_window) {
QUICHE_DCHECK_EQ(connection_->version().handshake_protocol, PROTOCOL_TLS1_3);
QUIC_DVLOG(1) << ENDPOINT
<< "OnNewStreamOutgoingBidirectionalFlowControlWindow "
<< new_window;
for (auto const& kv : stream_map_) {
const QuicStreamId id = kv.first;
if (!version().HasIetfQuicFrames()) {
if (kv.second->type() != BIDIRECTIONAL) {
continue;
}
} else {
if (!QuicUtils::IsBidirectionalStreamId(id, version())) {
continue;
}
}
if (!QuicUtils::IsOutgoingStreamId(connection_->version(), id,
perspective())) {
continue;
}
QUIC_DVLOG(1) << ENDPOINT << "Informing outgoing bidirectional stream "
<< id << " of new stream flow control window " << new_window;
if (!kv.second->MaybeConfigSendWindowOffset(new_window,
was_zero_rtt_rejected_)) {
return;
}
}
}
void QuicSession::OnNewStreamIncomingBidirectionalFlowControlWindow(
QuicStreamOffset new_window) {
QUICHE_DCHECK_EQ(connection_->version().handshake_protocol, PROTOCOL_TLS1_3);
QUIC_DVLOG(1) << ENDPOINT
<< "OnNewStreamIncomingBidirectionalFlowControlWindow "
<< new_window;
for (auto const& kv : stream_map_) {
const QuicStreamId id = kv.first;
if (!version().HasIetfQuicFrames()) {
if (kv.second->type() != BIDIRECTIONAL) {
continue;
}
} else {
if (!QuicUtils::IsBidirectionalStreamId(id, version())) {
continue;
}
}
if (QuicUtils::IsOutgoingStreamId(connection_->version(), id,
perspective())) {
continue;
}
QUIC_DVLOG(1) << ENDPOINT << "Informing incoming bidirectional stream "
<< id << " of new stream flow control window " << new_window;
if (!kv.second->MaybeConfigSendWindowOffset(new_window,
was_zero_rtt_rejected_)) {
return;
}
}
}
void QuicSession::OnNewSessionFlowControlWindow(QuicStreamOffset new_window) {
QUIC_DVLOG(1) << ENDPOINT << "OnNewSessionFlowControlWindow " << new_window;
if (was_zero_rtt_rejected_ && new_window < flow_controller_.bytes_sent()) {
std::string error_details = absl::StrCat(
"Server rejected 0-RTT. Aborting because the client received session "
"flow control send window: ",
new_window,
", which is below currently used: ", flow_controller_.bytes_sent());
QUIC_LOG(ERROR) << error_details;
connection_->CloseConnection(
QUIC_ZERO_RTT_UNRETRANSMITTABLE, error_details,
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
if (!connection_->version().AllowsLowFlowControlLimits() &&
new_window < kMinimumFlowControlSendWindow) {
std::string error_details = absl::StrCat(
"Peer sent us an invalid session flow control send window: ",
new_window, ", below minimum: ", kMinimumFlowControlSendWindow);
QUIC_LOG_FIRST_N(ERROR, 1) << error_details;
connection_->CloseConnection(
QUIC_FLOW_CONTROL_INVALID_WINDOW, error_details,
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
if (perspective_ == Perspective::IS_CLIENT &&
new_window < flow_controller_.send_window_offset()) {
std::string error_details = absl::StrCat(
was_zero_rtt_rejected_ ? "Server rejected 0-RTT, aborting because "
: "",
"new session max data ", new_window,
" decreases current limit: ", flow_controller_.send_window_offset());
QUIC_LOG(ERROR) << error_details;
connection_->CloseConnection(
was_zero_rtt_rejected_ ? QUIC_ZERO_RTT_REJECTION_LIMIT_REDUCED
: QUIC_ZERO_RTT_RESUMPTION_LIMIT_REDUCED,
error_details, ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
flow_controller_.UpdateSendWindowOffset(new_window);
}
bool QuicSession::OnNewDecryptionKeyAvailable(
EncryptionLevel level, std::unique_ptr<QuicDecrypter> decrypter,
bool set_alternative_decrypter, bool latch_once_used) {
if (connection_->version().handshake_protocol == PROTOCOL_TLS1_3 &&
!connection()->framer().HasEncrypterOfEncryptionLevel(
QuicUtils::GetEncryptionLevelToSendAckofSpace(
QuicUtils::GetPacketNumberSpace(level)))) {
return false;
}
if (connection()->version().KnowsWhichDecrypterToUse()) {
connection()->InstallDecrypter(level, std::move(decrypter));
return true;
}
if (set_alternative_decrypter) {
connection()->SetAlternativeDecrypter(level, std::move(decrypter),
latch_once_used);
return true;
}
connection()->SetDecrypter(level, std::move(decrypter));
return true;
}
void QuicSession::OnNewEncryptionKeyAvailable(
EncryptionLevel level, std::unique_ptr<QuicEncrypter> encrypter) {
connection()->SetEncrypter(level, std::move(encrypter));
if (connection_->version().handshake_protocol != PROTOCOL_TLS1_3) {
return;
}
bool reset_encryption_level = false;
if (IsEncryptionEstablished() && level == ENCRYPTION_HANDSHAKE) {
reset_encryption_level = true;
}
QUIC_DVLOG(1) << ENDPOINT << "Set default encryption level to " << level;
connection()->SetDefaultEncryptionLevel(level);
if (reset_encryption_level) {
connection()->SetDefaultEncryptionLevel(ENCRYPTION_ZERO_RTT);
}
QUIC_BUG_IF(quic_bug_12435_7,
IsEncryptionEstablished() &&
(connection()->encryption_level() == ENCRYPTION_INITIAL ||
connection()->encryption_level() == ENCRYPTION_HANDSHAKE))
<< "Encryption is established, but the encryption level " << level
<< " does not support sending stream data";
}
void QuicSession::SetDefaultEncryptionLevel(EncryptionLevel level) {
QUICHE_DCHECK_EQ(PROTOCOL_QUIC_CRYPTO,
connection_->version().handshake_protocol);
QUIC_DVLOG(1) << ENDPOINT << "Set default encryption level to " << level;
connection()->SetDefaultEncryptionLevel(level);
switch (level) {
case ENCRYPTION_INITIAL:
break;
case ENCRYPTION_ZERO_RTT:
if (perspective() == Perspective::IS_CLIENT) {
connection_->MarkZeroRttPacketsForRetransmission(0);
if (!connection_->framer().is_processing_packet()) {
QUIC_CODE_COUNT(
quic_session_on_can_write_set_default_encryption_level);
OnCanWrite();
}
}
break;
case ENCRYPTION_HANDSHAKE:
break;
case ENCRYPTION_FORWARD_SECURE:
QUIC_BUG_IF(quic_bug_12435_8, !config_.negotiated())
<< ENDPOINT << "Handshake confirmed without parameter negotiation.";
connection()->mutable_stats().handshake_completion_time =
connection()->clock()->ApproximateNow();
break;
default:
QUIC_BUG(quic_bug_10866_7) << "Unknown encryption level: " << level;
}
}
void QuicSession::OnTlsHandshakeComplete() {
QUICHE_DCHECK_EQ(PROTOCOL_TLS1_3, connection_->version().handshake_protocol);
QUIC_BUG_IF(quic_bug_12435_9,
!GetCryptoStream()->crypto_negotiated_params().cipher_suite)
<< ENDPOINT << "Handshake completes without cipher suite negotiation.";
QUIC_BUG_IF(quic_bug_12435_10, !config_.negotiated())
<< ENDPOINT << "Handshake completes without parameter negotiation.";
connection()->mutable_stats().handshake_completion_time =
connection()->clock()->ApproximateNow();
if (connection()->ShouldFixTimeouts(config_)) {
QUIC_RELOADABLE_FLAG_COUNT_N(quic_fix_timeouts, 2, 2);
connection()->SetNetworkTimeouts(QuicTime::Delta::Infinite(),
config_.IdleNetworkTimeout());
}
if (connection()->version().UsesTls() &&
perspective_ == Perspective::IS_SERVER) {
control_frame_manager_.WriteOrBufferHandshakeDone();
if (connection()->version().HasIetfQuicFrames()) {
MaybeSendAddressToken();
}
}
if (perspective_ == Perspective::IS_CLIENT &&
(config_.HasClientSentConnectionOption(kCHP1, perspective_) ||
config_.HasClientSentConnectionOption(kCHP2, perspective_))) {
config_.ClearGoogleHandshakeMessage();
}
}
bool QuicSession::MaybeSendAddressToken() {
QUICHE_DCHECK(perspective_ == Perspective::IS_SERVER &&
connection()->version().HasIetfQuicFrames());
std::optional<CachedNetworkParameters> cached_network_params =
GenerateCachedNetworkParameters();
std::string address_token = GetCryptoStream()->GetAddressToken(
cached_network_params.has_value() ? &*cached_network_params : nullptr);
if (address_token.empty()) {
return false;
}
const size_t buf_len = address_token.length() + 1;
auto buffer = std::make_unique<char[]>(buf_len);
QuicDataWriter writer(buf_len, buffer.get());
writer.WriteUInt8(kAddressTokenPrefix);
writer.WriteBytes(address_token.data(), address_token.length());
control_frame_manager_.WriteOrBufferNewToken(
absl::string_view(buffer.get(), buf_len));
if (cached_network_params.has_value()) {
connection()->OnSendConnectionState(*cached_network_params);
}
return true;
}
void QuicSession::DiscardOldDecryptionKey(EncryptionLevel level) {
if (!connection()->version().KnowsWhichDecrypterToUse()) {
return;
}
connection()->RemoveDecrypter(level);
}
void QuicSession::DiscardOldEncryptionKey(EncryptionLevel level) {
QUIC_DLOG(INFO) << ENDPOINT << "Discarding " << level << " keys";
if (connection()->version().handshake_protocol == PROTOCOL_TLS1_3) {
connection()->RemoveEncrypter(level);
}
switch (level) {
case ENCRYPTION_INITIAL:
NeuterUnencryptedData();
break;
case ENCRYPTION_HANDSHAKE:
NeuterHandshakeData();
break;
case ENCRYPTION_ZERO_RTT:
break;
case ENCRYPTION_FORWARD_SECURE:
QUIC_BUG(quic_bug_10866_8)
<< ENDPOINT << "Discarding 1-RTT keys is not allowed";
break;
default:
QUIC_BUG(quic_bug_10866_9)
<< ENDPOINT
<< "Cannot discard keys for unknown encryption level: " << level;
}
}
void QuicSession::NeuterHandshakeData() {
GetMutableCryptoStream()->NeuterStreamDataOfEncryptionLevel(
ENCRYPTION_HANDSHAKE);
connection()->OnHandshakeComplete();
}
void QuicSession::OnZeroRttRejected(int reason) {
was_zero_rtt_rejected_ = true;
connection_->MarkZeroRttPacketsForRetransmission(reason);
if (connection_->encryption_level() == ENCRYPTION_FORWARD_SECURE) {
QUIC_BUG(quic_bug_10866_10)
<< "1-RTT keys already available when 0-RTT is rejected.";
connection_->CloseConnection(
QUIC_INTERNAL_ERROR,
"1-RTT keys already available when 0-RTT is rejected.",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
}
}
bool QuicSession::FillTransportParameters(TransportParameters* params) {
if (version().UsesTls()) {
if (perspective() == Perspective::IS_SERVER) {
config_.SetOriginalConnectionIdToSend(
connection_->GetOriginalDestinationConnectionId());
config_.SetInitialSourceConnectionIdToSend(connection_->connection_id());
} else {
config_.SetInitialSourceConnectionIdToSend(
connection_->client_connection_id());
}
}
return config_.FillTransportParameters(params);
}
QuicErrorCode QuicSession::ProcessTransportParameters(
const TransportParameters& params, bool is_resumption,
std::string* error_details) {
return config_.ProcessTransportParameters(params, is_resumption,
error_details);
}
void QuicSession::OnHandshakeCallbackDone() {
if (!connection_->connected()) {
return;
}
if (!connection()->is_processing_packet()) {
connection()->MaybeProcessUndecryptablePackets();
}
}
bool QuicSession::PacketFlusherAttached() const {
QUICHE_DCHECK(connection_->connected());
return connection()->packet_creator().PacketFlusherAttached();
}
void QuicSession::OnEncryptedClientHelloSent(
absl::string_view client_hello) const {
connection()->OnEncryptedClientHelloSent(client_hello);
}
void QuicSession::OnEncryptedClientHelloReceived(
absl::string_view client_hello) const {
connection()->OnEncryptedClientHelloReceived(client_hello);
}
void QuicSession::OnCryptoHandshakeMessageSent(
const CryptoHandshakeMessage& ) {}
void QuicSession::OnCryptoHandshakeMessageReceived(
const CryptoHandshakeMessage& ) {}
void QuicSession::RegisterStreamPriority(QuicStreamId id, bool is_static,
const QuicStreamPriority& priority) {
write_blocked_streams()->RegisterStream(id, is_static, priority);
}
void QuicSession::UnregisterStreamPriority(QuicStreamId id) {
write_blocked_streams()->UnregisterStream(id);
}
void QuicSession::UpdateStreamPriority(QuicStreamId id,
const QuicStreamPriority& new_priority) {
write_blocked_streams()->UpdateStreamPriority(id, new_priority);
}
void QuicSession::ActivateStream(std::unique_ptr<QuicStream> stream) {
const bool should_keep_alive = ShouldKeepConnectionAlive();
QuicStreamId stream_id = stream->id();
bool is_static = stream->is_static();
QUIC_DVLOG(1) << ENDPOINT << "num_streams: " << stream_map_.size()
<< ". activating stream " << stream_id;
QUICHE_DCHECK(!stream_map_.contains(stream_id));
stream_map_[stream_id] = std::move(stream);
if (is_static) {
++num_static_streams_;
return;
}
if (version().HasIetfQuicFrames() && IsIncomingStream(stream_id) &&
max_streams_accepted_per_loop_ != kMaxQuicStreamCount) {
QUICHE_DCHECK(!ExceedsPerLoopStreamLimit());
++new_incoming_streams_in_current_loop_;
if (!stream_count_reset_alarm_->IsSet()) {
stream_count_reset_alarm_->Set(connection()->clock()->ApproximateNow());
}
}
if (!VersionHasIetfQuicFrames(transport_version())) {
stream_id_manager_.ActivateStream(
IsIncomingStream(stream_id));
}
if (perspective() == Perspective::IS_CLIENT &&
connection()->multi_port_stats() != nullptr && !should_keep_alive &&
ShouldKeepConnectionAlive()) {
connection()->MaybeProbeMultiPortPath();
}
}
QuicStreamId QuicSession::GetNextOutgoingBidirectionalStreamId() {
if (VersionHasIetfQuicFrames(transport_version())) {
return ietf_streamid_manager_.GetNextOutgoingBidirectionalStreamId();
}
return stream_id_manager_.GetNextOutgoingStreamId();
}
QuicStreamId QuicSession::GetNextOutgoingUnidirectionalStreamId() {
if (VersionHasIetfQuicFrames(transport_version())) {
return ietf_streamid_manager_.GetNextOutgoingUnidirectionalStreamId();
}
return stream_id_manager_.GetNextOutgoingStreamId();
}
bool QuicSession::CanOpenNextOutgoingBidirectionalStream() {
if (liveness_testing_in_progress_) {
QUICHE_DCHECK_EQ(Perspective::IS_CLIENT, perspective());
QUIC_CODE_COUNT(
quic_client_fails_to_create_stream_liveness_testing_in_progress);
return false;
}
if (!VersionHasIetfQuicFrames(transport_version())) {
if (!stream_id_manager_.CanOpenNextOutgoingStream()) {
return false;
}
} else {
if (!ietf_streamid_manager_.CanOpenNextOutgoingBidirectionalStream()) {
QUIC_CODE_COUNT(
quic_fails_to_create_stream_close_too_many_streams_created);
if (is_configured_) {
control_frame_manager_.WriteOrBufferStreamsBlocked(
ietf_streamid_manager_.max_outgoing_bidirectional_streams(),
false);
}
return false;
}
}
if (perspective() == Perspective::IS_CLIENT &&
connection_->MaybeTestLiveness()) {
liveness_testing_in_progress_ = true;
QUIC_CODE_COUNT(quic_client_fails_to_create_stream_close_to_idle_timeout);
return false;
}
return true;
}
bool QuicSession::CanOpenNextOutgoingUnidirectionalStream() {
if (!VersionHasIetfQuicFrames(transport_version())) {
return stream_id_manager_.CanOpenNextOutgoingStream();
}
if (ietf_streamid_manager_.CanOpenNextOutgoingUnidirectionalStream()) {
return true;
}
if (is_configured_) {
control_frame_manager_.WriteOrBufferStreamsBlocked(
ietf_streamid_manager_.max_outgoing_unidirectional_streams(),
true);
}
return false;
}
QuicStreamCount QuicSession::GetAdvertisedMaxIncomingBidirectionalStreams()
const {
QUICHE_DCHECK(VersionHasIetfQuicFrames(transport_version()));
return ietf_streamid_manager_.advertised_max_incoming_bidirectional_streams();
}
QuicStream* QuicSession::GetOrCreateStream(const QuicStreamId stream_id) {
QUICHE_DCHECK(!pending_stream_map_.contains(stream_id));
if (QuicUtils::IsCryptoStreamId(transport_version(), stream_id)) {
return GetMutableCryptoStream();
}
StreamMap::iterator it = stream_map_.find(stream_id);
if (it != stream_map_.end()) {
return it->second->IsZombie() ? nullptr : it->second.get();
}
if (IsClosedStream(stream_id)) {
return nullptr;
}
if (!IsIncomingStream(stream_id)) {
HandleFrameOnNonexistentOutgoingStream(stream_id);
return nullptr;
}
if (!MaybeIncreaseLargestPeerStreamId(stream_id)) {
return nullptr;
}
if (!VersionHasIetfQuicFrames(transport_version()) &&
!stream_id_manager_.CanOpenIncomingStream()) {
ResetStream(stream_id, QUIC_REFUSED_STREAM);
return nullptr;
}
return CreateIncomingStream(stream_id);
}
void QuicSession::StreamDraining(QuicStreamId stream_id, bool unidirectional) {
QUICHE_DCHECK(stream_map_.contains(stream_id));
QUIC_DVLOG(1) << ENDPOINT << "Stream " << stream_id << " is draining";
if (VersionHasIetfQuicFrames(transport_version())) {
ietf_streamid_manager_.OnStreamClosed(stream_id);
} else {
stream_id_manager_.OnStreamClosed(
IsIncomingStream(stream_id));
}
++num_draining_streams_;
if (!IsIncomingStream(stream_id)) {
++num_outgoing_draining_streams_;
if (!VersionHasIetfQuicFrames(transport_version())) {
OnCanCreateNewOutgoingStream(unidirectional);
}
}
}
bool QuicSession::MaybeIncreaseLargestPeerStreamId(
const QuicStreamId stream_id) {
if (VersionHasIetfQuicFrames(transport_version())) {
std::string error_details;
if (ietf_streamid_manager_.MaybeIncreaseLargestPeerStreamId(
stream_id, &error_details)) {
return true;
}
connection()->CloseConnection(
QUIC_INVALID_STREAM_ID, error_details,
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return false;
}
if (!stream_id_manager_.MaybeIncreaseLargestPeerStreamId(stream_id)) {
connection()->CloseConnection(
QUIC_TOO_MANY_AVAILABLE_STREAMS,
absl::StrCat(stream_id, " exceeds available streams ",
stream_id_manager_.MaxAvailableStreams()),
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return false;
}
return true;
}
bool QuicSession::ShouldYield(QuicStreamId stream_id) {
if (stream_id == currently_writing_stream_id_) {
return false;
}
return write_blocked_streams()->ShouldYield(stream_id);
}
PendingStream* QuicSession::GetOrCreatePendingStream(QuicStreamId stream_id) {
auto it = pending_stream_map_.find(stream_id);
if (it != pending_stream_map_.end()) {
return it->second.get();
}
if (IsClosedStream(stream_id) ||
!MaybeIncreaseLargestPeerStreamId(stream_id)) {
return nullptr;
}
auto pending = std::make_unique<PendingStream>(stream_id, this);
PendingStream* unowned_pending = pending.get();
pending_stream_map_[stream_id] = std::move(pending);
return unowned_pending;
}
void QuicSession::set_largest_peer_created_stream_id(
QuicStreamId largest_peer_created_stream_id) {
QUICHE_DCHECK(!VersionHasIetfQuicFrames(transport_version()));
stream_id_manager_.set_largest_peer_created_stream_id(
largest_peer_created_stream_id);
}
QuicStreamId QuicSession::GetLargestPeerCreatedStreamId(
bool unidirectional) const {
QUICHE_DCHECK(VersionHasIetfQuicFrames(transport_version()));
return ietf_streamid_manager_.GetLargestPeerCreatedStreamId(unidirectional);
}
void QuicSession::DeleteConnection() {
if (connection_) {
delete connection_;
connection_ = nullptr;
}
}
bool QuicSession::MaybeSetStreamPriority(QuicStreamId stream_id,
const QuicStreamPriority& priority) {
auto active_stream = stream_map_.find(stream_id);
if (active_stream != stream_map_.end()) {
active_stream->second->SetPriority(priority);
return true;
}
return false;
}
bool QuicSession::IsClosedStream(QuicStreamId id) {
QUICHE_DCHECK_NE(QuicUtils::GetInvalidStreamId(transport_version()), id);
if (IsOpenStream(id)) {
return false;
}
if (VersionHasIetfQuicFrames(transport_version())) {
return !ietf_streamid_manager_.IsAvailableStream(id);
}
return !stream_id_manager_.IsAvailableStream(id);
}
bool QuicSession::IsOpenStream(QuicStreamId id) {
QUICHE_DCHECK_NE(QuicUtils::GetInvalidStreamId(transport_version()), id);
const StreamMap::iterator it = stream_map_.find(id);
if (it != stream_map_.end()) {
return !it->second->IsZombie();
}
if (pending_stream_map_.contains(id) ||
QuicUtils::IsCryptoStreamId(transport_version(), id)) {
return true;
}
return false;
}
bool QuicSession::IsStaticStream(QuicStreamId id) const {
auto it = stream_map_.find(id);
if (it == stream_map_.end()) {
return false;
}
return it->second->is_static();
}
size_t QuicSession::GetNumActiveStreams() const {
QUICHE_DCHECK_GE(
static_cast<QuicStreamCount>(stream_map_.size()),
num_static_streams_ + num_draining_streams_ + num_zombie_streams_);
return stream_map_.size() - num_draining_streams_ - num_static_streams_ -
num_zombie_streams_;
}
void QuicSession::MarkConnectionLevelWriteBlocked(QuicStreamId id) {
if (GetOrCreateStream(id) == nullptr) {
QUIC_BUG(quic_bug_10866_11)
<< "Marking unknown stream " << id << " blocked.";
QUIC_LOG_FIRST_N(ERROR, 2) << QuicStackTrace();
}
QUIC_DVLOG(1) << ENDPOINT << "Adding stream " << id
<< " to write-blocked list";
write_blocked_streams_->AddStream(id);
}
bool QuicSession::HasDataToWrite() const {
return write_blocked_streams_->HasWriteBlockedSpecialStream() ||
write_blocked_streams_->HasWriteBlockedDataStreams() ||
connection_->HasQueuedData() ||
!streams_with_pending_retransmission_.empty() ||
control_frame_manager_.WillingToWrite();
}
void QuicSession::OnAckNeedsRetransmittableFrame() {
flow_controller_.SendWindowUpdate();
}
void QuicSession::SendAckFrequency(const QuicAckFrequencyFrame& frame) {
control_frame_manager_.WriteOrBufferAckFrequency(frame);
}
void QuicSession::SendNewConnectionId(const QuicNewConnectionIdFrame& frame) {
control_frame_manager_.WriteOrBufferNewConnectionId(
frame.connection_id, frame.sequence_number, frame.retire_prior_to,
frame.stateless_reset_token);
}
void QuicSession::SendRetireConnectionId(uint64_t sequence_number) {
if (GetQuicReloadableFlag(
quic_no_write_control_frame_upon_connection_close2)) {
QUIC_RELOADABLE_FLAG_COUNT(
quic_no_write_control_frame_upon_connection_close2);
if (!connection_->connected()) {
return;
}
}
control_frame_manager_.WriteOrBufferRetireConnectionId(sequence_number);
}
bool QuicSession::MaybeReserveConnectionId(
const QuicConnectionId& server_connection_id) {
if (visitor_) {
return visitor_->TryAddNewConnectionId(
connection_->GetOneActiveServerConnectionId(), server_connection_id);
}
return true;
}
void QuicSession::OnServerConnectionIdRetired(
const QuicConnectionId& server_connection_id) {
if (visitor_) {
visitor_->OnConnectionIdRetired(server_connection_id);
}
}
bool QuicSession::IsConnectionFlowControlBlocked() const {
return flow_controller_.IsBlocked();
}
bool QuicSession::IsStreamFlowControlBlocked() {
for (auto const& kv : stream_map_) {
if (kv.second->IsFlowControlBlocked()) {
return true;
}
}
if (!QuicVersionUsesCryptoFrames(transport_version()) &&
GetMutableCryptoStream()->IsFlowControlBlocked()) {
return true;
}
return false;
}
size_t QuicSession::MaxAvailableBidirectionalStreams() const {
if (VersionHasIetfQuicFrames(transport_version())) {
return ietf_streamid_manager_.GetMaxAllowdIncomingBidirectionalStreams();
}
return stream_id_manager_.MaxAvailableStreams();
}
size_t QuicSession::MaxAvailableUnidirectionalStreams() const {
if (VersionHasIetfQuicFrames(transport_version())) {
return ietf_streamid_manager_.GetMaxAllowdIncomingUnidirectionalStreams();
}
return stream_id_manager_.MaxAvailableStreams();
}
bool QuicSession::IsIncomingStream(QuicStreamId id) const {
if (VersionHasIetfQuicFrames(transport_version())) {
return !QuicUtils::IsOutgoingStreamId(version(), id, perspective_);
}
return stream_id_manager_.IsIncomingStream(id);
}
void QuicSession::MaybeCloseZombieStream(QuicStreamId id) {
auto it = stream_map_.find(id);
if (it == stream_map_.end()) {
return;
}
--num_zombie_streams_;
closed_streams_.push_back(std::move(it->second));
stream_map_.erase(it);
if (!closed_streams_clean_up_alarm_->IsSet()) {
closed_streams_clean_up_alarm_->Set(connection_->clock()->ApproximateNow());
}
streams_with_pending_retransmission_.erase(id);
connection_->QuicBugIfHasPendingFrames(id);
}
QuicStream* QuicSession::GetStream(QuicStreamId id) const {
auto active_stream = stream_map_.find(id);
if (active_stream != stream_map_.end()) {
return active_stream->second.get();
}
if (QuicUtils::IsCryptoStreamId(transport_version(), id)) {
return const_cast<QuicCryptoStream*>(GetCryptoStream());
}
return nullptr;
}
QuicStream* QuicSession::GetActiveStream(QuicStreamId id) const {
auto stream = stream_map_.find(id);
if (stream != stream_map_.end() && !stream->second->is_static()) {
return stream->second.get();
}
return nullptr;
}
bool QuicSession::OnFrameAcked(const QuicFrame& frame,
QuicTime::Delta ack_delay_time,
QuicTime receive_timestamp) {
if (frame.type == MESSAGE_FRAME) {
OnMessageAcked(frame.message_frame->message_id, receive_timestamp);
return true;
}
if (frame.type == CRYPTO_FRAME) {
return GetMutableCryptoStream()->OnCryptoFrameAcked(*frame.crypto_frame,
ack_delay_time);
}
if (frame.type != STREAM_FRAME) {
bool acked = control_frame_manager_.OnControlFrameAcked(frame);
if (acked && frame.type == MAX_STREAMS_FRAME) {
ietf_streamid_manager_.MaybeSendMaxStreamsFrame();
}
return acked;
}
bool new_stream_data_acked = false;
QuicStream* stream = GetStream(frame.stream_frame.stream_id);
if (stream != nullptr) {
QuicByteCount newly_acked_length = 0;
new_stream_data_acked = stream->OnStreamFrameAcked(
frame.stream_frame.offset, frame.stream_frame.data_length,
frame.stream_frame.fin, ack_delay_time, receive_timestamp,
&newly_acked_length);
if (!stream->HasPendingRetransmission()) {
streams_with_pending_retransmission_.erase(stream->id());
}
}
return new_stream_data_acked;
}
void QuicSession::OnStreamFrameRetransmitted(const QuicStreamFrame& frame) {
QuicStream* stream = GetStream(frame.stream_id);
if (stream == nullptr) {
QUIC_BUG(quic_bug_10866_12)
<< "Stream: " << frame.stream_id << " is closed when " << frame
<< " is retransmitted.";
connection()->CloseConnection(
QUIC_INTERNAL_ERROR, "Attempt to retransmit frame of a closed stream",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
stream->OnStreamFrameRetransmitted(frame.offset, frame.data_length,
frame.fin);
}
void QuicSession::OnFrameLost(const QuicFrame& frame) {
if (frame.type == MESSAGE_FRAME) {
++total_datagrams_lost_;
OnMessageLost(frame.message_frame->message_id);
return;
}
if (frame.type == CRYPTO_FRAME) {
GetMutableCryptoStream()->OnCryptoFrameLost(frame.crypto_frame);
return;
}
if (frame.type != STREAM_FRAME) {
control_frame_manager_.OnControlFrameLost(frame);
return;
}
QuicStream* stream = GetStream(frame.stream_frame.stream_id);
if (stream == nullptr) {
return;
}
stream->OnStreamFrameLost(frame.stream_frame.offset,
frame.stream_frame.data_length,
frame.stream_frame.fin);
if (stream->HasPendingRetransmission() &&
!streams_with_pending_retransmission_.contains(
frame.stream_frame.stream_id)) {
streams_with_pending_retransmission_.insert(
std::make_pair(frame.stream_frame.stream_id, true));
}
}
bool QuicSession::RetransmitFrames(const QuicFrames& frames,
TransmissionType type) {
QuicConnection::ScopedPacketFlusher retransmission_flusher(connection_);
for (const QuicFrame& frame : frames) {
if (frame.type == MESSAGE_FRAME) {
continue;
}
if (frame.type == CRYPTO_FRAME) {
if (!GetMutableCryptoStream()->RetransmitData(frame.crypto_frame, type)) {
return false;
}
continue;
}
if (frame.type != STREAM_FRAME) {
if (!control_frame_manager_.RetransmitControlFrame(frame, type)) {
return false;
}
continue;
}
QuicStream* stream = GetStream(frame.stream_frame.stream_id);
if (stream != nullptr &&
!stream->RetransmitStreamData(frame.stream_frame.offset,
frame.stream_frame.data_length,
frame.stream_frame.fin, type)) {
return false;
}
}
return true;
}
bool QuicSession::IsFrameOutstanding(const QuicFrame& frame) const {
if (frame.type == MESSAGE_FRAME) {
return false;
}
if (frame.type == CRYPTO_FRAME) {
return GetCryptoStream()->IsFrameOutstanding(
frame.crypto_frame->level, frame.crypto_frame->offset,
frame.crypto_frame->data_length);
}
if (frame.type != STREAM_FRAME) {
return control_frame_manager_.IsControlFrameOutstanding(frame);
}
QuicStream* stream = GetStream(frame.stream_frame.stream_id);
return stream != nullptr &&
stream->IsStreamFrameOutstanding(frame.stream_frame.offset,
frame.stream_frame.data_length,
frame.stream_frame.fin);
}
bool QuicSession::HasUnackedCryptoData() const {
const QuicCryptoStream* crypto_stream = GetCryptoStream();
return crypto_stream->IsWaitingForAcks() || crypto_stream->HasBufferedData();
}
bool QuicSession::HasUnackedStreamData() const {
for (const auto& it : stream_map_) {
if (it.second->IsWaitingForAcks()) {
return true;
}
}
return false;
}
HandshakeState QuicSession::GetHandshakeState() const {
return GetCryptoStream()->GetHandshakeState();
}
QuicByteCount QuicSession::GetFlowControlSendWindowSize(QuicStreamId id) {
auto it = stream_map_.find(id);
if (it == stream_map_.end()) {
return std::numeric_limits<QuicByteCount>::max();
}
return it->second->CalculateSendWindowSize();
}
WriteStreamDataResult QuicSession::WriteStreamData(QuicStreamId id,
QuicStreamOffset offset,
QuicByteCount data_length,
QuicDataWriter* writer) {
QuicStream* stream = GetStream(id);
if (stream == nullptr) {
QUIC_BUG(quic_bug_10866_13)
<< "Stream " << id << " does not exist when trying to write data."
<< " version:" << transport_version();
return STREAM_MISSING;
}
if (stream->WriteStreamData(offset, data_length, writer)) {
return WRITE_SUCCESS;
}
return WRITE_FAILED;
}
bool QuicSession::WriteCryptoData(EncryptionLevel level,
QuicStreamOffset offset,
QuicByteCount data_length,
QuicDataWriter* writer) {
return GetMutableCryptoStream()->WriteCryptoFrame(level, offset, data_length,
writer);
}
StatelessResetToken QuicSession::GetStatelessResetToken() const {
return QuicUtils::GenerateStatelessResetToken(connection_->connection_id());
}
bool QuicSession::CanWriteStreamData() const {
if (connection_->HasQueuedPackets()) {
return false;
}
if (HasPendingHandshake()) {
return true;
}
return connection_->CanWrite(HAS_RETRANSMITTABLE_DATA);
}
bool QuicSession::RetransmitLostData() {
QuicConnection::ScopedPacketFlusher retransmission_flusher(connection_);
bool uses_crypto_frames = QuicVersionUsesCryptoFrames(transport_version());
if (QuicCryptoStream* const crypto_stream = GetMutableCryptoStream();
uses_crypto_frames && crypto_stream->HasPendingCryptoRetransmission()) {
crypto_stream->WritePendingCryptoRetransmission();
}
if (!uses_crypto_frames &&
streams_with_pending_retransmission_.contains(
QuicUtils::GetCryptoStreamId(transport_version()))) {
QuicStream* const crypto_stream =
GetStream(QuicUtils::GetCryptoStreamId(transport_version()));
crypto_stream->OnCanWrite();
QUICHE_DCHECK(CheckStreamWriteBlocked(crypto_stream));
if (crypto_stream->HasPendingRetransmission()) {
return false;
} else {
streams_with_pending_retransmission_.erase(
QuicUtils::GetCryptoStreamId(transport_version()));
}
}
if (control_frame_manager_.HasPendingRetransmission()) {
control_frame_manager_.OnCanWrite();
if (control_frame_manager_.HasPendingRetransmission()) {
return false;
}
}
while (!streams_with_pending_retransmission_.empty()) {
if (!CanWriteStreamData()) {
break;
}
const QuicStreamId id = streams_with_pending_retransmission_.begin()->first;
QuicStream* stream = GetStream(id);
if (stream != nullptr) {
stream->OnCanWrite();
QUICHE_DCHECK(CheckStreamWriteBlocked(stream));
if (stream->HasPendingRetransmission()) {
break;
} else if (!streams_with_pending_retransmission_.empty() &&
streams_with_pending_retransmission_.begin()->first == id) {
streams_with_pending_retransmission_.pop_front();
}
} else {
QUIC_BUG(quic_bug_10866_14)
<< "Try to retransmit data of a closed stream";
streams_with_pending_retransmission_.pop_front();
}
}
return streams_with_pending_retransmission_.empty();
}
void QuicSession::NeuterUnencryptedData() {
QuicCryptoStream* crypto_stream = GetMutableCryptoStream();
crypto_stream->NeuterUnencryptedStreamData();
if (!crypto_stream->HasPendingRetransmission() &&
!QuicVersionUsesCryptoFrames(transport_version())) {
streams_with_pending_retransmission_.erase(
QuicUtils::GetCryptoStreamId(transport_version()));
}
connection_->NeuterUnencryptedPackets();
}
void QuicSession::SetTransmissionType(TransmissionType type) {
connection_->SetTransmissionType(type);
}
MessageResult QuicSession::SendMessage(
absl::Span<quiche::QuicheMemSlice> message) {
return SendMessage(message, false);
}
MessageResult QuicSession::SendMessage(quiche::QuicheMemSlice message) {
return SendMessage(absl::MakeSpan(&message, 1), false);
}
MessageResult QuicSession::SendMessage(
absl::Span<quiche::QuicheMemSlice> message, bool flush) {
QUICHE_DCHECK(connection_->connected())
<< ENDPOINT << "Try to write messages when connection is closed.";
if (!IsEncryptionEstablished()) {
return {MESSAGE_STATUS_ENCRYPTION_NOT_ESTABLISHED, 0};
}
QuicConnection::ScopedEncryptionLevelContext context(
connection(), GetEncryptionLevelToSendApplicationData());
MessageStatus result =
connection_->SendMessage(last_message_id_ + 1, message, flush);
if (result == MESSAGE_STATUS_SUCCESS) {
return {result, ++last_message_id_};
}
return {result, 0};
}
void QuicSession::OnMessageAcked(QuicMessageId message_id,
QuicTime ) {
QUIC_DVLOG(1) << ENDPOINT << "message " << message_id << " gets acked.";
}
void QuicSession::OnMessageLost(QuicMessageId message_id) {
QUIC_DVLOG(1) << ENDPOINT << "message " << message_id
<< " is considered lost";
}
void QuicSession::CleanUpClosedStreams() { closed_streams_.clear(); }
QuicPacketLength QuicSession::GetCurrentLargestMessagePayload() const {
return connection_->GetCurrentLargestMessagePayload();
}
QuicPacketLength QuicSession::GetGuaranteedLargestMessagePayload() const {
return connection_->GetGuaranteedLargestMessagePayload();
}
QuicStreamId QuicSession::next_outgoing_bidirectional_stream_id() const {
if (VersionHasIetfQuicFrames(transport_version())) {
return ietf_streamid_manager_.next_outgoing_bidirectional_stream_id();
}
return stream_id_manager_.next_outgoing_stream_id();
}
QuicStreamId QuicSession::next_outgoing_unidirectional_stream_id() const {
if (VersionHasIetfQuicFrames(transport_version())) {
return ietf_streamid_manager_.next_outgoing_unidirectional_stream_id();
}
return stream_id_manager_.next_outgoing_stream_id();
}
bool QuicSession::OnMaxStreamsFrame(const QuicMaxStreamsFrame& frame) {
const bool allow_new_streams =
frame.unidirectional
? ietf_streamid_manager_.MaybeAllowNewOutgoingUnidirectionalStreams(
frame.stream_count)
: ietf_streamid_manager_.MaybeAllowNewOutgoingBidirectionalStreams(
frame.stream_count);
if (allow_new_streams) {
OnCanCreateNewOutgoingStream(frame.unidirectional);
}
return true;
}
bool QuicSession::OnStreamsBlockedFrame(const QuicStreamsBlockedFrame& frame) {
std::string error_details;
if (ietf_streamid_manager_.OnStreamsBlockedFrame(frame, &error_details)) {
return true;
}
connection_->CloseConnection(
QUIC_STREAMS_BLOCKED_ERROR, error_details,
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return false;
}
size_t QuicSession::max_open_incoming_bidirectional_streams() const {
if (VersionHasIetfQuicFrames(transport_version())) {
return ietf_streamid_manager_.GetMaxAllowdIncomingBidirectionalStreams();
}
return stream_id_manager_.max_open_incoming_streams();
}
size_t QuicSession::max_open_incoming_unidirectional_streams() const {
if (VersionHasIetfQuicFrames(transport_version())) {
return ietf_streamid_manager_.GetMaxAllowdIncomingUnidirectionalStreams();
}
return stream_id_manager_.max_open_incoming_streams();
}
std::vector<absl::string_view>::const_iterator QuicSession::SelectAlpn(
const std::vector<absl::string_view>& alpns) const {
const std::string alpn = AlpnForVersion(connection()->version());
return std::find(alpns.cbegin(), alpns.cend(), alpn);
}
void QuicSession::OnAlpnSelected(absl::string_view alpn) {
QUIC_DLOG(INFO) << (perspective() == Perspective::IS_SERVER ? "Server: "
: "Client: ")
<< "ALPN selected: " << alpn;
}
void QuicSession::NeuterCryptoDataOfEncryptionLevel(EncryptionLevel level) {
GetMutableCryptoStream()->NeuterStreamDataOfEncryptionLevel(level);
}
void QuicSession::PerformActionOnActiveStreams(
quiche::UnretainedCallback<bool(QuicStream*)> action) {
std::vector<QuicStream*> active_streams;
for (const auto& it : stream_map_) {
if (!it.second->is_static() && !it.second->IsZombie()) {
active_streams.push_back(it.second.get());
}
}
for (QuicStream* stream : active_streams) {
if (!action(stream)) {
return;
}
}
}
void QuicSession::PerformActionOnActiveStreams(
quiche::UnretainedCallback<bool(QuicStream*)> action) const {
for (const auto& it : stream_map_) {
if (!it.second->is_static() && !it.second->IsZombie() &&
!action(it.second.get())) {
return;
}
}
}
EncryptionLevel QuicSession::GetEncryptionLevelToSendApplicationData() const {
return connection_->framer().GetEncryptionLevelToSendApplicationData();
}
void QuicSession::ProcessAllPendingStreams() {
std::vector<PendingStream*> pending_streams;
pending_streams.reserve(pending_stream_map_.size());
for (auto it = pending_stream_map_.begin(); it != pending_stream_map_.end();
++it) {
pending_streams.push_back(it->second.get());
}
for (auto* pending_stream : pending_streams) {
if (!MaybeProcessPendingStream(pending_stream)) {
return;
}
}
}
void QuicSession::ValidatePath(
std::unique_ptr<QuicPathValidationContext> context,
std::unique_ptr<QuicPathValidator::ResultDelegate> result_delegate,
PathValidationReason reason) {
connection_->ValidatePath(std::move(context), std::move(result_delegate),
reason);
}
bool QuicSession::HasPendingPathValidation() const {
return connection_->HasPendingPathValidation();
}
bool QuicSession::MigratePath(const QuicSocketAddress& self_address,
const QuicSocketAddress& peer_address,
QuicPacketWriter* writer, bool owns_writer) {
return connection_->MigratePath(self_address, peer_address, writer,
owns_writer);
}
bool QuicSession::ValidateToken(absl::string_view token) {
QUICHE_DCHECK_EQ(perspective_, Perspective::IS_SERVER);
if (GetQuicFlag(quic_reject_retry_token_in_initial_packet)) {
return false;
}
if (token.empty() || token[0] != kAddressTokenPrefix) {
return false;
}
const bool valid = GetCryptoStream()->ValidateAddressToken(
absl::string_view(token.data() + 1, token.length() - 1));
if (valid) {
const CachedNetworkParameters* cached_network_params =
GetCryptoStream()->PreviousCachedNetworkParams();
if (cached_network_params != nullptr &&
cached_network_params->timestamp() > 0) {
connection()->OnReceiveConnectionState(*cached_network_params);
}
}
return valid;
}
void QuicSession::OnServerPreferredAddressAvailable(
const QuicSocketAddress& server_preferred_address) {
QUICHE_DCHECK_EQ(perspective_, Perspective::IS_CLIENT);
if (visitor_ != nullptr) {
visitor_->OnServerPreferredAddressAvailable(server_preferred_address);
}
}
QuicStream* QuicSession::ProcessPendingStream(PendingStream* pending) {
QUICHE_DCHECK(VersionUsesHttp3(transport_version()));
QUICHE_DCHECK(connection()->connected());
QuicStreamId stream_id = pending->id();
QUIC_BUG_IF(bad pending stream, !IsIncomingStream(stream_id))
<< "Pending stream " << stream_id << " is not an incoming stream.";
StreamType stream_type = QuicUtils::GetStreamType(
stream_id, perspective(), true, version());
switch (stream_type) {
case BIDIRECTIONAL: {
return ProcessBidirectionalPendingStream(pending);
}
case READ_UNIDIRECTIONAL: {
return ProcessReadUnidirectionalPendingStream(pending);
}
case WRITE_UNIDIRECTIONAL:
ABSL_FALLTHROUGH_INTENDED;
case CRYPTO:
QUICHE_BUG(unexpected pending stream)
<< "Unexpected pending stream " << stream_id << " with type "
<< stream_type;
return nullptr;
}
return nullptr;
}
bool QuicSession::ExceedsPerLoopStreamLimit() const {
QUICHE_DCHECK(version().HasIetfQuicFrames());
return new_incoming_streams_in_current_loop_ >=
max_streams_accepted_per_loop_;
}
void QuicSession::OnStreamCountReset() {
const bool exceeded_per_loop_stream_limit = ExceedsPerLoopStreamLimit();
new_incoming_streams_in_current_loop_ = 0;
if (exceeded_per_loop_stream_limit) {
QUIC_CODE_COUNT_N(quic_pending_stream, 2, 3);
ProcessAllPendingStreams();
}
}
#undef ENDPOINT
} | #include "quiche/quic/core/quic_session.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/macros.h"
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/crypto/crypto_protocol.h"
#include "quiche/quic/core/crypto/null_decrypter.h"
#include "quiche/quic/core/crypto/null_encrypter.h"
#include "quiche/quic/core/crypto/transport_parameters.h"
#include "quiche/quic/core/frames/quic_max_streams_frame.h"
#include "quiche/quic/core/frames/quic_reset_stream_at_frame.h"
#include "quiche/quic/core/quic_constants.h"
#include "quiche/quic/core/quic_crypto_stream.h"
#include "quiche/quic/core/quic_data_writer.h"
#include "quiche/quic/core/quic_error_codes.h"
#include "quiche/quic/core/quic_packets.h"
#include "quiche/quic/core/quic_stream.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_expect_bug.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/mock_quic_session_visitor.h"
#include "quiche/quic/test_tools/quic_config_peer.h"
#include "quiche/quic/test_tools/quic_connection_peer.h"
#include "quiche/quic/test_tools/quic_flow_controller_peer.h"
#include "quiche/quic/test_tools/quic_session_peer.h"
#include "quiche/quic/test_tools/quic_stream_id_manager_peer.h"
#include "quiche/quic/test_tools/quic_stream_peer.h"
#include "quiche/quic/test_tools/quic_stream_send_buffer_peer.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/quiche_mem_slice_storage.h"
using spdy::kV3HighestPriority;
using spdy::SpdyPriority;
using ::testing::_;
using ::testing::AnyNumber;
using ::testing::AtLeast;
using ::testing::InSequence;
using ::testing::Invoke;
using ::testing::NiceMock;
using ::testing::Return;
using ::testing::StrictMock;
using ::testing::WithArg;
namespace quic {
namespace test {
namespace {
class TestCryptoStream : public QuicCryptoStream, public QuicCryptoHandshaker {
public:
explicit TestCryptoStream(QuicSession* session)
: QuicCryptoStream(session),
QuicCryptoHandshaker(this, session),
encryption_established_(false),
one_rtt_keys_available_(false),
params_(new QuicCryptoNegotiatedParameters) {
params_->cipher_suite = 1;
}
void EstablishZeroRttEncryption() {
encryption_established_ = true;
session()->connection()->SetEncrypter(
ENCRYPTION_ZERO_RTT,
std::make_unique<NullEncrypter>(session()->perspective()));
}
void OnHandshakeMessage(const CryptoHandshakeMessage& ) override {
encryption_established_ = true;
one_rtt_keys_available_ = true;
QuicErrorCode error;
std::string error_details;
session()->config()->SetInitialStreamFlowControlWindowToSend(
kInitialStreamFlowControlWindowForTest);
session()->config()->SetInitialSessionFlowControlWindowToSend(
kInitialSessionFlowControlWindowForTest);
if (session()->version().UsesTls()) {
if (session()->perspective() == Perspective::IS_CLIENT) {
session()->config()->SetOriginalConnectionIdToSend(
session()->connection()->connection_id());
session()->config()->SetInitialSourceConnectionIdToSend(
session()->connection()->connection_id());
} else {
session()->config()->SetInitialSourceConnectionIdToSend(
session()->connection()->client_connection_id());
}
TransportParameters transport_parameters;
EXPECT_TRUE(
session()->config()->FillTransportParameters(&transport_parameters));
error = session()->config()->ProcessTransportParameters(
transport_parameters, false, &error_details);
} else {
CryptoHandshakeMessage msg;
session()->config()->ToHandshakeMessage(&msg, transport_version());
error =
session()->config()->ProcessPeerHello(msg, CLIENT, &error_details);
}
EXPECT_THAT(error, IsQuicNoError());
session()->OnNewEncryptionKeyAvailable(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<NullEncrypter>(session()->perspective()));
session()->OnConfigNegotiated();
if (session()->connection()->version().handshake_protocol ==
PROTOCOL_TLS1_3) {
session()->OnTlsHandshakeComplete();
} else {
session()->SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
}
session()->DiscardOldEncryptionKey(ENCRYPTION_INITIAL);
}
ssl_early_data_reason_t EarlyDataReason() const override {
return ssl_early_data_unknown;
}
bool encryption_established() const override {
return encryption_established_;
}
bool one_rtt_keys_available() const override {
return one_rtt_keys_available_;
}
const QuicCryptoNegotiatedParameters& crypto_negotiated_params()
const override {
return *params_;
}
CryptoMessageParser* crypto_message_parser() override {
return QuicCryptoHandshaker::crypto_message_parser();
}
void OnPacketDecrypted(EncryptionLevel ) override {}
void OnOneRttPacketAcknowledged() override {}
void OnHandshakePacketSent() override {}
void OnHandshakeDoneReceived() override {}
void OnNewTokenReceived(absl::string_view ) override {}
std::string GetAddressToken(
const CachedNetworkParameters* )
const override {
return "";
}
bool ValidateAddressToken(absl::string_view ) const override {
return true;
}
const CachedNetworkParameters* PreviousCachedNetworkParams() const override {
return nullptr;
}
void SetPreviousCachedNetworkParams(
CachedNetworkParameters ) override {}
HandshakeState GetHandshakeState() const override {
return one_rtt_keys_available() ? HANDSHAKE_COMPLETE : HANDSHAKE_START;
}
void SetServerApplicationStateForResumption(
std::unique_ptr<ApplicationState> ) override {}
MOCK_METHOD(std::unique_ptr<QuicDecrypter>,
AdvanceKeysAndCreateCurrentOneRttDecrypter, (), (override));
MOCK_METHOD(std::unique_ptr<QuicEncrypter>, CreateCurrentOneRttEncrypter, (),
(override));
MOCK_METHOD(void, OnCanWrite, (), (override));
bool HasPendingCryptoRetransmission() const override { return false; }
MOCK_METHOD(bool, HasPendingRetransmission, (), (const, override));
void OnConnectionClosed(const QuicConnectionCloseFrame& ,
ConnectionCloseSource ) override {}
bool ExportKeyingMaterial(absl::string_view ,
absl::string_view ,
size_t ,
std::string* ) override {
return false;
}
SSL* GetSsl() const override { return nullptr; }
bool IsCryptoFrameExpectedForEncryptionLevel(
EncryptionLevel level) const override {
return level != ENCRYPTION_ZERO_RTT;
}
EncryptionLevel GetEncryptionLevelToSendCryptoDataOfSpace(
PacketNumberSpace space) const override {
switch (space) {
case INITIAL_DATA:
return ENCRYPTION_INITIAL;
case HANDSHAKE_DATA:
return ENCRYPTION_HANDSHAKE;
case APPLICATION_DATA:
return ENCRYPTION_FORWARD_SECURE;
default:
QUICHE_DCHECK(false);
return NUM_ENCRYPTION_LEVELS;
}
}
private:
using QuicCryptoStream::session;
bool encryption_established_;
bool one_rtt_keys_available_;
quiche::QuicheReferenceCountedPointer<QuicCryptoNegotiatedParameters> params_;
};
class TestStream : public QuicStream {
public:
TestStream(QuicStreamId id, QuicSession* session, StreamType type)
: TestStream(id, session, false, type) {}
TestStream(QuicStreamId id, QuicSession* session, bool is_static,
StreamType type)
: QuicStream(id, session, is_static, type) {}
TestStream(PendingStream* pending, QuicSession* session)
: QuicStream(pending, session, false) {}
using QuicStream::CloseWriteSide;
using QuicStream::WriteMemSlices;
void OnDataAvailable() override {}
MOCK_METHOD(void, OnCanWrite, (), (override));
MOCK_METHOD(bool, RetransmitStreamData,
(QuicStreamOffset, QuicByteCount, bool, TransmissionType),
(override));
MOCK_METHOD(bool, HasPendingRetransmission, (), (const, override));
};
class TestSession : public QuicSession {
public:
explicit TestSession(QuicConnection* connection,
MockQuicSessionVisitor* session_visitor)
: QuicSession(connection, session_visitor, DefaultQuicConfig(),
CurrentSupportedVersions(),
0),
crypto_stream_(this),
writev_consumes_all_data_(false),
uses_pending_streams_(false),
num_incoming_streams_created_(0) {
set_max_streams_accepted_per_loop(5);
Initialize();
this->connection()->SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<NullEncrypter>(connection->perspective()));
if (this->connection()->version().SupportsAntiAmplificationLimit()) {
QuicConnectionPeer::SetAddressValidated(this->connection());
}
}
~TestSession() override { DeleteConnection(); }
TestCryptoStream* GetMutableCryptoStream() override {
return &crypto_stream_;
}
const TestCryptoStream* GetCryptoStream() const override {
return &crypto_stream_;
}
TestStream* CreateOutgoingBidirectionalStream() {
QuicStreamId id = GetNextOutgoingBidirectionalStreamId();
if (id ==
QuicUtils::GetInvalidStreamId(connection()->transport_version())) {
return nullptr;
}
TestStream* stream = new TestStream(id, this, BIDIRECTIONAL);
ActivateStream(absl::WrapUnique(stream));
return stream;
}
TestStream* CreateOutgoingUnidirectionalStream() {
TestStream* stream = new TestStream(GetNextOutgoingUnidirectionalStreamId(),
this, WRITE_UNIDIRECTIONAL);
ActivateStream(absl::WrapUnique(stream));
return stream;
}
TestStream* CreateIncomingStream(QuicStreamId id) override {
if (!VersionHasIetfQuicFrames(connection()->transport_version()) &&
stream_id_manager().num_open_incoming_streams() + 1 >
max_open_incoming_bidirectional_streams()) {
connection()->CloseConnection(
QUIC_TOO_MANY_OPEN_STREAMS, "Too many streams!",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return nullptr;
}
TestStream* stream = new TestStream(
id, this,
DetermineStreamType(id, connection()->version(), perspective(),
true, BIDIRECTIONAL));
ActivateStream(absl::WrapUnique(stream));
++num_incoming_streams_created_;
return stream;
}
TestStream* CreateIncomingStream(PendingStream* pending) override {
TestStream* stream = new TestStream(pending, this);
ActivateStream(absl::WrapUnique(stream));
++num_incoming_streams_created_;
return stream;
}
QuicStream* ProcessBidirectionalPendingStream(
PendingStream* pending) override {
return CreateIncomingStream(pending);
}
QuicStream* ProcessReadUnidirectionalPendingStream(
PendingStream* pending) override {
struct iovec iov;
if (pending->sequencer()->GetReadableRegion(&iov)) {
return CreateIncomingStream(pending);
}
return nullptr;
}
bool IsClosedStream(QuicStreamId id) {
return QuicSession::IsClosedStream(id);
}
QuicStream* GetOrCreateStream(QuicStreamId stream_id) {
return QuicSession::GetOrCreateStream(stream_id);
}
bool ShouldKeepConnectionAlive() const override {
return GetNumActiveStreams() > 0;
}
QuicConsumedData WritevData(QuicStreamId id, size_t write_length,
QuicStreamOffset offset, StreamSendingState state,
TransmissionType type,
EncryptionLevel level) override {
bool fin = state != NO_FIN;
QuicConsumedData consumed(write_length, fin);
if (!writev_consumes_all_data_) {
consumed =
QuicSession::WritevData(id, write_length, offset, state, type, level);
}
QuicSessionPeer::GetWriteBlockedStreams(this)->UpdateBytesForStream(
id, consumed.bytes_consumed);
return consumed;
}
MOCK_METHOD(void, OnCanCreateNewOutgoingStream, (bool unidirectional),
(override));
void set_writev_consumes_all_data(bool val) {
writev_consumes_all_data_ = val;
}
QuicConsumedData SendStreamData(QuicStream* stream) {
if (!QuicUtils::IsCryptoStreamId(connection()->transport_version(),
stream->id()) &&
this->connection()->encryption_level() != ENCRYPTION_FORWARD_SECURE) {
this->connection()->SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
}
QuicStreamPeer::SendBuffer(stream).SaveStreamData("not empty");
QuicConsumedData consumed =
WritevData(stream->id(), 9, 0, FIN, NOT_RETRANSMISSION,
GetEncryptionLevelToSendApplicationData());
QuicStreamPeer::SendBuffer(stream).OnStreamDataConsumed(
consumed.bytes_consumed);
return consumed;
}
const QuicFrame& save_frame() { return save_frame_; }
bool SaveFrame(const QuicFrame& frame) {
save_frame_ = frame;
DeleteFrame(&const_cast<QuicFrame&>(frame));
return true;
}
QuicConsumedData SendLargeFakeData(QuicStream* stream, int bytes) {
QUICHE_DCHECK(writev_consumes_all_data_);
return WritevData(stream->id(), bytes, 0, FIN, NOT_RETRANSMISSION,
GetEncryptionLevelToSendApplicationData());
}
bool UsesPendingStreamForFrame(QuicFrameType type,
QuicStreamId stream_id) const override {
if (!uses_pending_streams_) {
return false;
}
bool is_incoming_stream = IsIncomingStream(stream_id);
StreamType stream_type = QuicUtils::GetStreamType(
stream_id, perspective(), is_incoming_stream, version());
switch (type) {
case STREAM_FRAME:
ABSL_FALLTHROUGH_INTENDED;
case RST_STREAM_FRAME:
return is_incoming_stream;
case STOP_SENDING_FRAME:
ABSL_FALLTHROUGH_INTENDED;
case WINDOW_UPDATE_FRAME:
return stream_type == BIDIRECTIONAL;
default:
return false;
}
}
void set_uses_pending_streams(bool uses_pending_streams) {
uses_pending_streams_ = uses_pending_streams;
}
int num_incoming_streams_created() const {
return num_incoming_streams_created_;
}
using QuicSession::ActivateStream;
using QuicSession::CanOpenNextOutgoingBidirectionalStream;
using QuicSession::CanOpenNextOutgoingUnidirectionalStream;
using QuicSession::closed_streams;
using QuicSession::GetNextOutgoingBidirectionalStreamId;
using QuicSession::GetNextOutgoingUnidirectionalStreamId;
private:
StrictMock<TestCryptoStream> crypto_stream_;
bool writev_consumes_all_data_;
bool uses_pending_streams_;
QuicFrame save_frame_;
int num_incoming_streams_created_;
};
MATCHER_P(IsFrame, type, "") { return arg.type == type; }
class QuicSessionTestBase : public QuicTestWithParam<ParsedQuicVersion> {
protected:
QuicSessionTestBase(Perspective perspective, bool configure_session)
: connection_(new StrictMock<MockQuicConnection>(
&helper_, &alarm_factory_, perspective,
SupportedVersions(GetParam()))),
session_(connection_, &session_visitor_),
configure_session_(configure_session) {
session_.config()->SetInitialStreamFlowControlWindowToSend(
kInitialStreamFlowControlWindowForTest);
session_.config()->SetInitialSessionFlowControlWindowToSend(
kInitialSessionFlowControlWindowForTest);
if (configure_session) {
if (VersionHasIetfQuicFrames(transport_version())) {
EXPECT_CALL(session_, OnCanCreateNewOutgoingStream(false)).Times(1);
EXPECT_CALL(session_, OnCanCreateNewOutgoingStream(true)).Times(1);
}
QuicConfigPeer::SetReceivedMaxBidirectionalStreams(
session_.config(), kDefaultMaxStreamsPerConnection);
QuicConfigPeer::SetReceivedMaxUnidirectionalStreams(
session_.config(), kDefaultMaxStreamsPerConnection);
QuicConfigPeer::SetReceivedInitialMaxStreamDataBytesUnidirectional(
session_.config(), kMinimumFlowControlSendWindow);
QuicConfigPeer::SetReceivedInitialMaxStreamDataBytesIncomingBidirectional(
session_.config(), kMinimumFlowControlSendWindow);
QuicConfigPeer::SetReceivedInitialMaxStreamDataBytesOutgoingBidirectional(
session_.config(), kMinimumFlowControlSendWindow);
QuicConfigPeer::SetReceivedInitialSessionFlowControlWindow(
session_.config(), kMinimumFlowControlSendWindow);
connection_->AdvanceTime(QuicTime::Delta::FromSeconds(1));
session_.OnConfigNegotiated();
}
TestCryptoStream* crypto_stream = session_.GetMutableCryptoStream();
EXPECT_CALL(*crypto_stream, HasPendingRetransmission())
.Times(testing::AnyNumber());
testing::Mock::VerifyAndClearExpectations(&session_);
}
~QuicSessionTestBase() {
if (configure_session_) {
EXPECT_TRUE(session_.is_configured());
}
}
void CheckClosedStreams() {
QuicStreamId first_stream_id = QuicUtils::GetFirstBidirectionalStreamId(
connection_->transport_version(), Perspective::IS_CLIENT);
if (!QuicVersionUsesCryptoFrames(connection_->transport_version())) {
first_stream_id =
QuicUtils::GetCryptoStreamId(connection_->transport_version());
}
for (QuicStreamId i = first_stream_id; i < 100; i++) {
if (closed_streams_.find(i) == closed_streams_.end()) {
EXPECT_FALSE(session_.IsClosedStream(i)) << " stream id: " << i;
} else {
EXPECT_TRUE(session_.IsClosedStream(i)) << " stream id: " << i;
}
}
}
void CloseStream(QuicStreamId id) {
if (VersionHasIetfQuicFrames(transport_version())) {
if (QuicUtils::GetStreamType(
id, session_.perspective(), session_.IsIncomingStream(id),
connection_->version()) == READ_UNIDIRECTIONAL) {
EXPECT_CALL(*connection_, SendControlFrame(IsFrame(STOP_SENDING_FRAME)))
.Times(1)
.WillOnce(Invoke(&ClearControlFrame));
EXPECT_CALL(*connection_, OnStreamReset(id, _)).Times(1);
} else if (QuicUtils::GetStreamType(
id, session_.perspective(), session_.IsIncomingStream(id),
connection_->version()) == WRITE_UNIDIRECTIONAL) {
EXPECT_CALL(*connection_, SendControlFrame(IsFrame(RST_STREAM_FRAME)))
.Times(1)
.WillOnce(Invoke(&ClearControlFrame));
EXPECT_CALL(*connection_, OnStreamReset(id, _));
} else {
EXPECT_CALL(*connection_, SendControlFrame(IsFrame(RST_STREAM_FRAME)))
.WillRepeatedly(Invoke(&ClearControlFrame));
EXPECT_CALL(*connection_, SendControlFrame(IsFrame(STOP_SENDING_FRAME)))
.WillRepeatedly(Invoke(&ClearControlFrame));
EXPECT_CALL(*connection_, OnStreamReset(id, _));
}
} else {
EXPECT_CALL(*connection_, SendControlFrame(_))
.WillOnce(Invoke(&ClearControlFrame));
EXPECT_CALL(*connection_, OnStreamReset(id, _));
}
session_.ResetStream(id, QUIC_STREAM_CANCELLED);
closed_streams_.insert(id);
}
void CompleteHandshake() {
CryptoHandshakeMessage msg;
if (connection_->version().UsesTls() &&
connection_->perspective() == Perspective::IS_SERVER) {
EXPECT_CALL(*connection_, SendControlFrame(_))
.WillOnce(Invoke(&ClearControlFrame));
}
session_.GetMutableCryptoStream()->OnHandshakeMessage(msg);
}
QuicTransportVersion transport_version() const {
return connection_->transport_version();
}
QuicStreamId GetNthClientInitiatedBidirectionalId(int n) {
return QuicUtils::GetFirstBidirectionalStreamId(
connection_->transport_version(), Perspective::IS_CLIENT) +
QuicUtils::StreamIdDelta(connection_->transport_version()) * n;
}
QuicStreamId GetNthClientInitiatedUnidirectionalId(int n) {
return QuicUtils::GetFirstUnidirectionalStreamId(
connection_->transport_version(), Perspective::IS_CLIENT) +
QuicUtils::StreamIdDelta(connection_->transport_version()) * n;
}
QuicStreamId GetNthServerInitiatedBidirectionalId(int n) {
return QuicUtils::GetFirstBidirectionalStreamId(
connection_->transport_version(), Perspective::IS_SERVER) +
QuicUtils::StreamIdDelta(connection_->transport_version()) * n;
}
QuicStreamId GetNthServerInitiatedUnidirectionalId(int n) {
return QuicUtils::GetFirstUnidirectionalStreamId(
connection_->transport_version(), Perspective::IS_SERVER) +
QuicUtils::StreamIdDelta(connection_->transport_version()) * n;
}
QuicStreamId StreamCountToId(QuicStreamCount stream_count,
Perspective perspective, bool bidirectional) {
QuicStreamId id =
((stream_count - 1) * QuicUtils::StreamIdDelta(transport_version()));
if (!bidirectional) {
id |= 0x2;
}
if (perspective == Perspective::IS_SERVER) {
id |= 0x1;
}
return id;
}
MockQuicConnectionHelper helper_;
MockAlarmFactory alarm_factory_;
NiceMock<MockQuicSessionVisitor> session_visitor_;
StrictMock<MockQuicConnection>* connection_;
TestSession session_;
std::set<QuicStreamId> closed_streams_;
bool configure_session_;
};
class QuicSessionTestServer : public QuicSessionTestBase {
public:
WriteResult CheckMultiPathResponse(const char* buffer, size_t buf_len,
const QuicIpAddress& ,
const QuicSocketAddress& ,
PerPacketOptions* ) {
QuicEncryptedPacket packet(buffer, buf_len);
{
InSequence s;
EXPECT_CALL(framer_visitor_, OnPacket());
EXPECT_CALL(framer_visitor_, OnUnauthenticatedPublicHeader(_));
EXPECT_CALL(framer_visitor_, OnUnauthenticatedHeader(_));
EXPECT_CALL(framer_visitor_, OnDecryptedPacket(_, _));
EXPECT_CALL(framer_visitor_, OnPacketHeader(_));
EXPECT_CALL(framer_visitor_, OnPathResponseFrame(_))
.WillOnce(
WithArg<0>(Invoke([this](const QuicPathResponseFrame& frame) {
EXPECT_EQ(path_frame_buffer1_, frame.data_buffer);
return true;
})));
EXPECT_CALL(framer_visitor_, OnPathResponseFrame(_))
.WillOnce(
WithArg<0>(Invoke([this](const QuicPathResponseFrame& frame) {
EXPECT_EQ(path_frame_buffer2_, frame.data_buffer);
return true;
})));
EXPECT_CALL(framer_visitor_, OnPacketComplete());
}
client_framer_.ProcessPacket(packet);
return WriteResult(WRITE_STATUS_OK, 0);
}
protected:
QuicSessionTestServer()
: QuicSessionTestBase(Perspective::IS_SERVER, true),
path_frame_buffer1_({0, 1, 2, 3, 4, 5, 6, 7}),
path_frame_buffer2_({8, 9, 10, 11, 12, 13, 14, 15}),
client_framer_(SupportedVersions(GetParam()), QuicTime::Zero(),
Perspective::IS_CLIENT, kQuicDefaultConnectionIdLength) {
client_framer_.set_visitor(&framer_visitor_);
client_framer_.SetInitialObfuscators(TestConnectionId());
if (client_framer_.version().KnowsWhichDecrypterToUse()) {
client_framer_.InstallDecrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<NullDecrypter>(Perspective::IS_CLIENT));
}
}
QuicPathFrameBuffer path_frame_buffer1_;
QuicPathFrameBuffer path_frame_buffer2_;
StrictMock<MockFramerVisitor> framer_visitor_;
QuicFramer client_framer_;
};
INSTANTIATE_TEST_SUITE_P(Tests, QuicSessionTestServer,
::testing::ValuesIn(AllSupportedVersions()),
::testing::PrintToStringParamName());
TEST_P(QuicSessionTestServer, PeerAddress) {
EXPECT_EQ(QuicSocketAddress(QuicIpAddress::Loopback4(), kTestPort),
session_.peer_address());
}
TEST_P(QuicSessionTestServer, SelfAddress) {
EXPECT_TRUE(session_.self_address().IsInitialized());
}
TEST_P(QuicSessionTestServer, DontCallOnWriteBlockedForDisconnectedConnection) {
EXPECT_CALL(*connection_, CloseConnection(_, _, _))
.WillOnce(
Invoke(connection_, &MockQuicConnection::ReallyCloseConnection));
connection_->CloseConnection(QUIC_NO_ERROR, "Everything is fine.",
ConnectionCloseBehavior::SILENT_CLOSE);
ASSERT_FALSE(connection_->connected());
EXPECT_CALL(session_visitor_, OnWriteBlocked(_)).Times(0);
session_.OnWriteBlocked();
}
TEST_P(QuicSessionTestServer, OneRttKeysAvailable) {
EXPECT_FALSE(session_.OneRttKeysAvailable());
CompleteHandshake();
EXPECT_TRUE(session_.OneRttKeysAvailable());
}
TEST_P(QuicSessionTestServer, IsClosedStreamDefault) {
QuicStreamId first_stream_id = QuicUtils::GetFirstBidirectionalStreamId(
connection_->transport_version(), Perspective::IS_CLIENT);
if (!QuicVersionUsesCryptoFrames(connection_->transport_version())) {
first_stream_id =
QuicUtils::GetCryptoStreamId(connection_->transport_version());
}
for (QuicStreamId i = first_stream_id; i < 100; i++) {
EXPECT_FALSE(session_.IsClosedStream(i)) << "stream id: " << i;
}
}
TEST_P(QuicSessionTestServer, AvailableBidirectionalStreams) {
ASSERT_TRUE(session_.GetOrCreateStream(
GetNthClientInitiatedBidirectionalId(3)) != nullptr);
EXPECT_TRUE(QuicSessionPeer::IsStreamAvailable(
&session_, GetNthClientInitiatedBidirectionalId(1)));
EXPECT_TRUE(QuicSessionPeer::IsStreamAvailable(
&session_, GetNthClientInitiatedBidirectionalId(2)));
ASSERT_TRUE(session_.GetOrCreateStream(
GetNthClientInitiatedBidirectionalId(2)) != nullptr);
ASSERT_TRUE(session_.GetOrCreateStream(
GetNthClientInitiatedBidirectionalId(1)) != nullptr);
}
TEST_P(QuicSessionTestServer, AvailableUnidirectionalStreams) {
ASSERT_TRUE(session_.GetOrCreateStream(
GetNthClientInitiatedUnidirectionalId(3)) != nullptr);
EXPECT_TRUE(QuicSessionPeer::IsStreamAvailable(
&session_, GetNthClientInitiatedUnidirectionalId(1)));
EXPECT_TRUE(QuicSessionPeer::IsStreamAvailable(
&session_, GetNthClientInitiatedUnidirectionalId(2)));
ASSERT_TRUE(session_.GetOrCreateStream(
GetNthClientInitiatedUnidirectionalId(2)) != nullptr);
ASSERT_TRUE(session_.GetOrCreateStream(
GetNthClientInitiatedUnidirectionalId(1)) != nullptr);
}
TEST_P(QuicSessionTestServer, MaxAvailableBidirectionalStreams) {
if (VersionHasIetfQuicFrames(transport_version())) {
EXPECT_EQ(session_.max_open_incoming_bidirectional_streams(),
session_.MaxAvailableBidirectionalStreams());
} else {
EXPECT_EQ(session_.max_open_incoming_bidirectional_streams() *
kMaxAvailableStreamsMultiplier,
session_.MaxAvailableBidirectionalStreams());
}
}
TEST_P(QuicSessionTestServer, MaxAvailableUnidirectionalStreams) {
if (VersionHasIetfQuicFrames(transport_version())) {
EXPECT_EQ(session_.max_open_incoming_unidirectional_streams(),
session_.MaxAvailableUnidirectionalStreams());
} else {
EXPECT_EQ(session_.max_open_incoming_unidirectional_streams() *
kMaxAvailableStreamsMultiplier,
session_.MaxAvailableUnidirectionalStreams());
}
}
TEST_P(QuicSessionTestServer, IsClosedBidirectionalStreamLocallyCreated) {
CompleteHandshake();
TestStream* stream2 = session_.CreateOutgoingBidirectionalStream();
EXPECT_EQ(GetNthServerInitiatedBidirectionalId(0), stream2->id());
TestStream* stream4 = session_.CreateOutgoingBidirectionalStream();
EXPECT_EQ(GetNthServerInitiatedBidirectionalId(1), stream4->id());
CheckClosedStreams();
CloseStream(GetNthServerInitiatedBidirectionalId(0));
CheckClosedStreams();
CloseStream(GetNthServerInitiatedBidirectionalId(1));
CheckClosedStreams();
}
TEST_P(QuicSessionTestServer, IsClosedUnidirectionalStreamLocallyCreated) {
CompleteHandshake();
TestStream* stream2 = session_.CreateOutgoingUnidirectionalStream();
EXPECT_EQ(GetNthServerInitiatedUnidirectionalId(0), stream2->id());
TestStream* stream4 = session_.CreateOutgoingUnidirectionalStream();
EXPECT_EQ(GetNthServerInitiatedUnidirectionalId(1), stream4->id());
CheckClosedStreams();
CloseStream(GetNthServerInitiatedUnidirectionalId(0));
CheckClosedStreams();
CloseStream(GetNthServerInitiatedUnidirectionalId(1));
CheckClosedStreams();
}
TEST_P(QuicSessionTestServer, IsClosedBidirectionalStreamPeerCreated) {
CompleteHandshake();
QuicStreamId stream_id1 = GetNthClientInitiatedBidirectionalId(0);
QuicStreamId stream_id2 = GetNthClientInitiatedBidirectionalId(1);
session_.GetOrCreateStream(stream_id1);
session_.GetOrCreateStream(stream_id2);
CheckClosedStreams();
CloseStream(stream_id1);
CheckClosedStreams();
CloseStream(stream_id2);
QuicStream* stream3 = session_.GetOrCreateStream(
stream_id2 +
2 * QuicUtils::StreamIdDelta(connection_->transport_version()));
CheckClosedStreams();
CloseStream(stream3->id());
CheckClosedStreams();
}
TEST_P(QuicSessionTestServer, IsClosedUnidirectionalStreamPeerCreated) {
CompleteHandshake();
QuicStreamId stream_id1 = GetNthClientInitiatedUnidirectionalId(0);
QuicStreamId stream_id2 = GetNthClientInitiatedUnidirectionalId(1);
session_.GetOrCreateStream(stream_id1);
session_.GetOrCreateStream(stream_id2);
CheckClosedStreams();
CloseStream(stream_id1);
CheckClosedStreams();
CloseStream(stream_id2);
QuicStream* stream3 = session_.GetOrCreateStream(
stream_id2 +
2 * QuicUtils::StreamIdDelta(connection_->transport_version()));
CheckClosedStreams();
CloseStream(stream3->id());
CheckClosedStreams();
}
TEST_P(QuicSessionTestServer, MaximumAvailableOpenedBidirectionalStreams) {
QuicStreamId stream_id = GetNthClientInitiatedBidirectionalId(0);
session_.GetOrCreateStream(stream_id);
EXPECT_CALL(*connection_, CloseConnection(_, _, _)).Times(0);
EXPECT_NE(nullptr,
session_.GetOrCreateStream(GetNthClientInitiatedBidirectionalId(
session_.max_open_incoming_bidirectional_streams() - 1)));
}
TEST_P(QuicSessionTestServer, MaximumAvailableOpenedUnidirectionalStreams) {
QuicStreamId stream_id = GetNthClientInitiatedUnidirectionalId(0);
session_.GetOrCreateStream(stream_id);
EXPECT_CALL(*connection_, CloseConnection(_, _, _)).Times(0);
EXPECT_NE(nullptr,
session_.GetOrCreateStream(GetNthClientInitiatedUnidirectionalId(
session_.max_open_incoming_unidirectional_streams() - 1)));
}
TEST_P(QuicSessionTestServer, TooManyAvailableBidirectionalStreams) {
QuicStreamId stream_id1 = GetNthClientInitiatedBidirectionalId(0);
QuicStreamId stream_id2;
EXPECT_NE(nullptr, session_.GetOrCreateStream(stream_id1));
stream_id2 = GetNthClientInitiatedBidirectionalId(
session_.MaxAvailableBidirectionalStreams() + 2);
if (VersionHasIetfQuicFrames(transport_version())) {
EXPECT_CALL(*connection_, CloseConnection(QUIC_INVALID_STREAM_ID, _, _));
} else {
EXPECT_CALL(*connection_,
CloseConnection(QUIC_TOO_MANY_AVAILABLE_STREAMS, _, _));
}
EXPECT_EQ(nullptr, session_.GetOrCreateStream(stream_id2));
}
TEST_P(QuicSessionTestServer, TooManyAvailableUnidirectionalStreams) {
QuicStreamId stream_id1 = GetNthClientInitiatedUnidirectionalId(0);
QuicStreamId stream_id2;
EXPECT_NE(nullptr, session_.GetOrCreateStream(stream_id1));
stream_id2 = GetNthClientInitiatedUnidirectionalId(
session_.MaxAvailableUnidirectionalStreams() + 2);
if (VersionHasIetfQuicFrames(transport_version())) {
EXPECT_CALL(*connection_, CloseConnection(QUIC_INVALID_STREAM_ID, _, _));
} else {
EXPECT_CALL(*connection_,
CloseConnection(QUIC_TOO_MANY_AVAILABLE_STREAMS, _, _));
}
EXPECT_EQ(nullptr, session_.GetOrCreateStream(stream_id2));
}
TEST_P(QuicSessionTestServer, ManyAvailableBidirectionalStreams) {
if (VersionHasIetfQuicFrames(transport_version())) {
QuicSessionPeer::SetMaxOpenIncomingBidirectionalStreams(&session_, 200);
QuicSessionPeer::SetMaxOpenIncomingUnidirectionalStreams(&session_, 50);
} else {
QuicSessionPeer::SetMaxOpenIncomingStreams(&session_, 200);
}
QuicStreamId stream_id = GetNthClientInitiatedBidirectionalId(0);
EXPECT_NE(nullptr, session_.GetOrCreateStream(stream_id));
EXPECT_CALL(*connection_, CloseConnection(_, _, _)).Times(0);
EXPECT_NE(nullptr, session_.GetOrCreateStream(
GetNthClientInitiatedBidirectionalId(199)));
if (VersionHasIetfQuicFrames(transport_version())) {
stream_id = GetNthClientInitiatedUnidirectionalId(0);
EXPECT_NE(nullptr, session_.GetOrCreateStream(stream_id));
EXPECT_NE(nullptr, session_.GetOrCreateStream(
GetNthClientInitiatedUnidirectionalId(49)));
EXPECT_CALL(
*connection_,
CloseConnection(QUIC_INVALID_STREAM_ID,
"Stream id 798 would exceed stream count limit 50",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET))
.Times(1);
EXPECT_EQ(nullptr, session_.GetOrCreateStream(
GetNthClientInitiatedUnidirectionalId(199)));
}
}
TEST_P(QuicSessionTestServer, ManyAvailableUnidirectionalStreams) {
if (VersionHasIetfQuicFrames(transport_version())) {
QuicSessionPeer::SetMaxOpenIncomingUnidirectionalStreams(&session_, 200);
QuicSessionPeer::SetMaxOpenIncomingBidirectionalStreams(&session_, 50);
} else {
QuicSessionPeer::SetMaxOpenIncomingStreams(&session_, 200);
}
QuicStreamId stream_id = GetNthClientInitiatedUnidirectionalId(0);
EXPECT_NE(nullptr, session_.GetOrCreateStream(stream_id));
EXPECT_CALL(*connection_, CloseConnection(_, _, _)).Times(0);
EXPECT_NE(nullptr, session_.GetOrCreateStream(
GetNthClientInitiatedUnidirectionalId(199)));
if (VersionHasIetfQuicFrames(transport_version())) {
stream_id = GetNthClientInitiatedBidirectionalId(0);
EXPECT_NE(nullptr, session_.GetOrCreateStream(stream_id));
EXPECT_NE(nullptr, session_.GetOrCreateStream(
GetNthClientInitiatedBidirectionalId(49)));
std::string error_detail;
if (QuicVersionUsesCryptoFrames(transport_version())) {
error_detail = "Stream id 796 would exceed stream count limit 50";
} else {
error_detail = "Stream id 800 would exceed stream count limit 50";
}
EXPECT_CALL(
*connection_,
CloseConnection(QUIC_INVALID_STREAM_ID, error_detail,
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET))
.Times(1);
EXPECT_EQ(nullptr, session_.GetOrCreateStream(
GetNthClientInitiatedBidirectionalId(199)));
}
}
TEST_P(QuicSessionTestServer, DebugDFatalIfMarkingClosedStreamWriteBlocked) {
CompleteHandshake();
TestStream* stream2 = session_.CreateOutgoingBidirectionalStream();
QuicStreamId closed_stream_id = stream2->id();
EXPECT_CALL(*connection_, SendControlFrame(_));
EXPECT_CALL(*connection_, OnStreamReset(closed_stream_id, _));
stream2->Reset(QUIC_BAD_APPLICATION_PAYLOAD);
std::string msg =
absl::StrCat("Marking unknown stream ", closed_stream_id, " blocked.");
EXPECT_QUIC_BUG(session_.MarkConnectionLevelWriteBlocked(closed_stream_id),
msg);
}
TEST_P(QuicSessionTestServer, OnCanWrite) {
CompleteHandshake();
session_.set_writev_consumes_all_data(true);
TestStream* stream2 = session_.CreateOutgoingBidirectionalStream();
TestStream* stream4 = session_.CreateOutgoingBidirectionalStream();
TestStream* stream6 = session_.CreateOutgoingBidirectionalStream();
session_.MarkConnectionLevelWriteBlocked(stream2->id());
session_.MarkConnectionLevelWriteBlocked(stream6->id());
session_.MarkConnectionLevelWriteBlocked(stream4->id());
InSequence s;
EXPECT_CALL(*stream2, OnCanWrite()).WillOnce(Invoke([this, stream2]() {
session_.SendStreamData(stream2);
session_.MarkConnectionLevelWriteBlocked(stream2->id());
}));
if (!GetQuicReloadableFlag(quic_disable_batch_write) ||
GetQuicReloadableFlag(quic_priority_respect_incremental)) {
EXPECT_CALL(*stream2, OnCanWrite()).WillOnce(Invoke([this, stream2]() {
session_.SendStreamData(stream2);
}));
EXPECT_CALL(*stream6, OnCanWrite()).WillOnce(Invoke([this, stream6]() {
session_.SendStreamData(stream6);
}));
} else {
EXPECT_CALL(*stream6, OnCanWrite()).WillOnce(Invoke([this, stream6]() {
session_.SendStreamData(stream6);
}));
EXPECT_CALL(*stream4, OnCanWrite()).WillOnce(Invoke([this, stream4]() {
session_.SendStreamData(stream4);
}));
}
session_.OnCanWrite();
EXPECT_TRUE(session_.WillingAndAbleToWrite());
}
TEST_P(QuicSessionTestServer, TestBatchedWrites) {
session_.set_writev_consumes_all_data(true);
TestStream* stream2 = session_.CreateOutgoingBidirectionalStream();
TestStream* stream4 = session_.CreateOutgoingBidirectionalStream();
TestStream* stream6 = session_.CreateOutgoingBidirectionalStream();
const QuicStreamPriority priority(
HttpStreamPriority{HttpStreamPriority::kDefaultUrgency,
true});
stream2->SetPriority(priority);
stream4->SetPriority(priority);
stream6->SetPriority(priority);
session_.set_writev_consumes_all_data(true);
session_.MarkConnectionLevelWriteBlocked(stream2->id());
session_.MarkConnectionLevelWriteBlocked(stream4->id());
InSequence s;
EXPECT_CALL(*stream2, OnCanWrite()).WillOnce(Invoke([this, stream2]() {
session_.SendLargeFakeData(stream2, 6000);
session_.MarkConnectionLevelWriteBlocked(stream2->id());
}));
if (GetQuicReloadableFlag(quic_disable_batch_write)) {
EXPECT_CALL(*stream4, OnCanWrite()).WillOnce(Invoke([this, stream4]() {
session_.SendLargeFakeData(stream4, 6000);
session_.MarkConnectionLevelWriteBlocked(stream4->id());
}));
} else {
EXPECT_CALL(*stream2, OnCanWrite()).WillOnce(Invoke([this, stream2]() {
session_.SendLargeFakeData(stream2, 6000);
session_.MarkConnectionLevelWriteBlocked(stream2->id());
}));
}
session_.OnCanWrite();
EXPECT_CALL(*stream2, OnCanWrite()).WillOnce(Invoke([this, stream2]() {
session_.SendLargeFakeData(stream2, 6000);
session_.MarkConnectionLevelWriteBlocked(stream2->id());
}));
EXPECT_CALL(*stream4, OnCanWrite()).WillOnce(Invoke([this, stream4]() {
session_.SendLargeFakeData(stream4, 6000);
session_.MarkConnectionLevelWriteBlocked(stream4->id());
}));
session_.OnCanWrite();
stream6->SetPriority(QuicStreamPriority(HttpStreamPriority{
kV3HighestPriority, HttpStreamPriority::kDefaultIncremental}));
if (GetQuicReloadableFlag(quic_disable_batch_write)) {
EXPECT_CALL(*stream2, OnCanWrite())
.WillOnce(Invoke([this, stream2, stream6]() {
session_.SendLargeFakeData(stream2, 6000);
session_.MarkConnectionLevelWriteBlocked(stream2->id());
session_.MarkConnectionLevelWriteBlocked(stream6->id());
}));
} else {
EXPECT_CALL(*stream4, OnCanWrite())
.WillOnce(Invoke([this, stream4, stream6]() {
session_.SendLargeFakeData(stream4, 6000);
session_.MarkConnectionLevelWriteBlocked(stream4->id());
session_.MarkConnectionLevelWriteBlocked(stream6->id());
}));
}
EXPECT_CALL(*stream6, OnCanWrite())
.WillOnce(Invoke([this, stream4, stream6]() {
session_.SendStreamData(stream6);
session_.SendLargeFakeData(stream4, 6000);
}));
session_.OnCanWrite();
EXPECT_CALL(*stream4, OnCanWrite()).WillOnce(Invoke([this, stream4]() {
session_.SendLargeFakeData(stream4, 12000);
session_.MarkConnectionLevelWriteBlocked(stream4->id());
}));
EXPECT_CALL(*stream2, OnCanWrite()).WillOnce(Invoke([this, stream2]() {
session_.SendLargeFakeData(stream2, 6000);
session_.MarkConnectionLevelWriteBlocked(stream2->id());
}));
session_.OnCanWrite();
}
TEST_P(QuicSessionTestServer, OnCanWriteBundlesStreams) {
CompleteHandshake();
MockPacketWriter* writer = static_cast<MockPacketWriter*>(
QuicConnectionPeer::GetWriter(session_.connection()));
MockSendAlgorithm* send_algorithm = new StrictMock<MockSendAlgorithm>;
QuicConnectionPeer::SetSendAlgorithm(session_.connection(), send_algorithm);
TestStream* stream2 = session_.CreateOutgoingBidirectionalStream();
TestStream* stream4 = session_.CreateOutgoingBidirectionalStream();
TestStream* stream6 = session_.CreateOutgoingBidirectionalStream();
session_.MarkConnectionLevelWriteBlocked(stream2->id());
session_.MarkConnectionLevelWriteBlocked(stream6->id());
session_.MarkConnectionLevelWriteBlocked(stream4->id());
EXPECT_CALL(*send_algorithm, CanSend(_)).WillRepeatedly(Return(true));
EXPECT_CALL(*send_algorithm, GetCongestionWindow())
.WillRepeatedly(Return(kMaxOutgoingPacketSize * 10));
EXPECT_CALL(*send_algorithm, InRecovery()).WillRepeatedly(Return(false));
EXPECT_CALL(*stream2, OnCanWrite()).WillOnce(Invoke([this, stream2]() {
session_.SendStreamData(stream2);
}));
EXPECT_CALL(*stream4, OnCanWrite()).WillOnce(Invoke([this, stream4]() {
session_.SendStreamData(stream4);
}));
EXPECT_CALL(*stream6, OnCanWrite()).WillOnce(Invoke([this, stream6]() {
session_.SendStreamData(stream6);
}));
EXPECT_CALL(*writer, WritePacket(_, _, _, _, _, _))
.WillOnce(Return(WriteResult(WRITE_STATUS_OK, 0)));
EXPECT_CALL(*send_algorithm, OnPacketSent(_, _, _, _, _));
EXPECT_CALL(*send_algorithm, OnApplicationLimited(_));
session_.OnCanWrite();
EXPECT_FALSE(session_.WillingAndAbleToWrite());
}
TEST_P(QuicSessionTestServer, OnCanWriteCongestionControlBlocks) {
CompleteHandshake();
session_.set_writev_consumes_all_data(true);
InSequence s;
MockSendAlgorithm* send_algorithm = new StrictMock<MockSendAlgorithm>;
QuicConnectionPeer::SetSendAlgorithm(session_.connection(), send_algorithm);
TestStream* stream2 = session_.CreateOutgoingBidirectionalStream();
TestStream* stream4 = session_.CreateOutgoingBidirectionalStream();
TestStream* stream6 = session_.CreateOutgoingBidirectionalStream();
session_.MarkConnectionLevelWriteBlocked(stream2->id());
session_.MarkConnectionLevelWriteBlocked(stream6->id());
session_.MarkConnectionLevelWriteBlocked(stream4->id());
EXPECT_CALL(*send_algorithm, CanSend(_)).WillOnce(Return(true));
EXPECT_CALL(*stream2, OnCanWrite()).WillOnce(Invoke([this, stream2]() {
session_.SendStreamData(stream2);
}));
EXPECT_CALL(*send_algorithm, GetCongestionWindow()).Times(AnyNumber());
EXPECT_CALL(*send_algorithm, CanSend(_)).WillOnce(Return(true));
EXPECT_CALL(*stream6, OnCanWrite()).WillOnce(Invoke([this, stream6]() {
session_.SendStreamData(stream6);
}));
EXPECT_CALL(*send_algorithm, CanSend(_)).WillOnce(Return(false));
session_.OnCanWrite();
EXPECT_TRUE(session_.WillingAndAbleToWrite());
EXPECT_CALL(*send_algorithm, CanSend(_)).WillOnce(Return(false));
session_.OnCanWrite();
EXPECT_TRUE(session_.WillingAndAbleToWrite());
EXPECT_CALL(*send_algorithm, CanSend(_)).WillOnce(Return(true));
EXPECT_CALL(*stream4, OnCanWrite()).WillOnce(Invoke([this, stream4]() {
session_.SendStreamData(stream4);
}));
EXPECT_CALL(*send_algorithm, OnApplicationLimited(_));
session_.OnCanWrite();
EXPECT_FALSE(session_.WillingAndAbleToWrite());
}
TEST_P(QuicSessionTestServer, OnCanWriteWriterBlocks) {
CompleteHandshake();
MockSendAlgorithm* send_algorithm = new StrictMock<MockSendAlgorithm>;
QuicConnectionPeer::SetSendAlgorithm(session_.connection(), send_algorithm);
EXPECT_CALL(*send_algorithm, CanSend(_)).WillRepeatedly(Return(true));
MockPacketWriter* writer = static_cast<MockPacketWriter*>(
QuicConnectionPeer::GetWriter(session_.connection()));
EXPECT_CALL(*writer, IsWriteBlocked()).WillRepeatedly(Return(true));
EXPECT_CALL(*writer, WritePacket(_, _, _, _, _, _)).Times(0);
TestStream* stream2 = session_.CreateOutgoingBidirectionalStream();
session_.MarkConnectionLevelWriteBlocked(stream2->id());
EXPECT_CALL(*stream2, OnCanWrite()).Times(0);
EXPECT_CALL(*send_algorithm, OnApplicationLimited(_)).Times(0);
session_.OnCanWrite();
EXPECT_TRUE(session_.WillingAndAbleToWrite());
}
TEST_P(QuicSessionTestServer, SendStreamsBlocked) {
if (!VersionHasIetfQuicFrames(transport_version())) {
return;
}
CompleteHandshake();
for (size_t i = 0; i < kDefaultMaxStreamsPerConnection; ++i) {
ASSERT_TRUE(session_.CanOpenNextOutgoingBidirectionalStream());
session_.GetNextOutgoingBidirectionalStreamId();
}
EXPECT_CALL(*connection_, SendControlFrame(_))
.WillOnce(Invoke([](const QuicFrame& frame) {
EXPECT_FALSE(frame.streams_blocked_frame.unidirectional);
EXPECT_EQ(kDefaultMaxStreamsPerConnection,
frame.streams_blocked_frame.stream_count);
ClearControlFrame(frame);
return true;
}));
EXPECT_FALSE(session_.CanOpenNextOutgoingBidirectionalStream());
for (size_t i = 0; i < kDefaultMaxStreamsPerConnection; ++i) {
ASSERT_TRUE(session_.CanOpenNextOutgoingUnidirectionalStream());
session_.GetNextOutgoingUnidirectionalStreamId();
}
EXPECT_CALL(*connection_, SendControlFrame(_))
.WillOnce(Invoke([](const QuicFrame& frame) {
EXPECT_TRUE(frame.streams_blocked_frame.unidirectional);
EXPECT_EQ(kDefaultMaxStreamsPerConnection,
frame.streams_blocked_frame.stream_count);
ClearControlFrame(frame);
return true;
}));
EXPECT_FALSE(session_.CanOpenNextOutgoingUnidirectionalStream());
}
TEST_P(QuicSessionTestServer, LimitMaxStreams) {
if (!VersionHasIetfQuicFrames(transport_version())) {
return;
}
CompleteHandshake();
const QuicStreamId kMaxStreams = 4;
QuicSessionPeer::SetMaxOpenIncomingBidirectionalStreams(&session_,
kMaxStreams);
EXPECT_EQ(kMaxStreams, QuicSessionPeer::ietf_streamid_manager(&session_)
->advertised_max_incoming_bidirectional_streams());
std::vector<QuicMaxStreamsFrame> max_stream_frames;
EXPECT_CALL(*connection_, SendControlFrame(IsFrame(MAX_STREAMS_FRAME)))
.Times(2)
.WillRepeatedly(Invoke([&max_stream_frames](const QuicFrame& frame) {
max_stream_frames.push_back(frame.max_streams_frame);
ClearControlFrame(frame);
return true;
}));
for (size_t i = 0; i < kMaxStreams; ++i) {
QuicStreamId stream_id = GetNthClientInitiatedBidirectionalId(i);
QuicStreamFrame data1(stream_id, true, 0, absl::string_view("HT"));
session_.OnStreamFrame(data1);
CloseStream(stream_id);
}
EXPECT_EQ(2 * kMaxStreams,
QuicSessionPeer::ietf_streamid_manager(&session_)
->advertised_max_incoming_bidirectional_streams());
QuicAlarm* alarm = QuicSessionPeer::GetStreamCountResetAlarm(&session_);
if (alarm->IsSet()) {
alarm_factory_.FireAlarm(alarm);
}
for (size_t i = 0; i < kMaxStreams; ++i) {
QuicStreamId stream_id =
GetNthClientInitiatedBidirectionalId(i + kMaxStreams);
QuicStreamFrame data1(stream_id, true, 0, absl::string_view("HT"));
session_.OnStreamFrame(data1);
CloseStream(stream_id);
}
EXPECT_CALL(*connection_, SendControlFrame(IsFrame(MAX_STREAMS_FRAME)))
.WillOnce(Invoke(&ClearControlFrame));
session_.OnFrameAcked(QuicFrame(max_stream_frames[0]),
QuicTime::Delta::Zero(), QuicTime::Zero());
EXPECT_EQ(3 * kMaxStreams,
QuicSessionPeer::ietf_streamid_manager(&session_)
->advertised_max_incoming_bidirectional_streams());
if (alarm->IsSet()) {
alarm_factory_.FireAlarm(alarm);
}
for (size_t i = 0; i < kMaxStreams; ++i) {
QuicStreamId stream_id =
GetNthClientInitiatedBidirectionalId(i + 2 * kMaxStreams);
QuicStreamFrame data1(stream_id, true, 0, absl::string_view("HT"));
session_.OnStreamFrame(data1);
}
session_.OnFrameAcked(QuicFrame(max_stream_frames[1]),
QuicTime::Delta::Zero(), QuicTime::Zero());
}
TEST_P(QuicSessionTestServer, BufferedHandshake) {
if (QuicVersionUsesCryptoFrames(connection_->transport_version())) {
return;
}
session_.set_writev_consumes_all_data(true);
EXPECT_FALSE(session_.HasPendingHandshake());
TestStream* stream2 = session_.CreateOutgoingBidirectionalStream();
session_.MarkConnectionLevelWriteBlocked(stream2->id());
EXPECT_FALSE(session_.HasPendingHandshake());
TestStream* stream3 = session_.CreateOutgoingBidirectionalStream();
session_.MarkConnectionLevelWriteBlocked(stream3->id());
EXPECT_FALSE(session_.HasPendingHandshake());
session_.MarkConnectionLevelWriteBlocked(
QuicUtils::GetCryptoStreamId(connection_->transport_version()));
EXPECT_TRUE(session_.HasPendingHandshake());
TestStream* stream4 = session_.CreateOutgoingBidirectionalStream();
session_.MarkConnectionLevelWriteBlocked(stream4->id());
EXPECT_TRUE(session_.HasPendingHandshake());
InSequence s;
TestCryptoStream* crypto_stream = session_.GetMutableCryptoStream();
EXPECT_CALL(*crypto_stream, OnCanWrite());
EXPECT_CALL(*stream2, OnCanWrite()).WillOnce(Invoke([this, stream2]() {
session_.SendStreamData(stream2);
}));
EXPECT_CALL(*stream3, OnCanWrite()).WillOnce(Invoke([this, stream3]() {
session_.SendStreamData(stream3);
}));
EXPECT_CALL(*stream4, OnCanWrite()).WillOnce(Invoke([this, stream4]() {
session_.SendStreamData(stream4);
session_.MarkConnectionLevelWriteBlocked(stream4->id());
}));
session_.OnCanWrite();
EXPECT_TRUE(session_.WillingAndAbleToWrite());
EXPECT_FALSE(session_.HasPendingHandshake());
}
TEST_P(QuicSessionTestServer, OnCanWriteWithClosedStream) {
CompleteHandshake();
session_.set_writev_consumes_all_data(true);
TestStream* stream2 = session_.CreateOutgoingBidirectionalStream();
TestStream* stream4 = session_.CreateOutgoingBidirectionalStream();
TestStream* stream6 = session_.CreateOutgoingBidirectionalStream();
session_.MarkConnectionLevelWriteBlocked(stream2->id());
session_.MarkConnectionLevelWriteBlocked(stream6->id());
session_.MarkConnectionLevelWriteBlocked(stream4->id());
CloseStream(stream6->id());
InSequence s;
EXPECT_CALL(*connection_, SendControlFrame(_))
.WillRepeatedly(Invoke(&ClearControlFrame));
EXPECT_CALL(*stream2, OnCanWrite()).WillOnce(Invoke([this, stream2]() {
session_.SendStreamData(stream2);
}));
EXPECT_CALL(*stream4, OnCanWrite()).WillOnce(Invoke([this, stream4]() {
session_.SendStreamData(stream4);
}));
session_.OnCanWrite();
EXPECT_FALSE(session_.WillingAndAbleToWrite());
}
TEST_P(QuicSessionTestServer, OnCanWriteLimitsNumWritesIfFlowControlBlocked) {
MockSendAlgorithm* send_algorithm = new StrictMock<MockSendAlgorithm>;
QuicConnectionPeer::SetSendAlgorithm(session_.connection(), send_algorithm);
EXPECT_CALL(*send_algorithm, CanSend(_)).WillRepeatedly(Return(true));
QuicFlowControllerPeer::SetSendWindowOffset(session_.flow_controller(), 0);
EXPECT_TRUE(session_.flow_controller()->IsBlocked());
EXPECT_TRUE(session_.IsConnectionFlowControlBlocked());
EXPECT_FALSE(session_.IsStreamFlowControlBlocked());
if (!QuicVersionUsesCryptoFrames(connection_->transport_version())) {
session_.MarkConnectionLevelWriteBlocked(
QuicUtils::GetCryptoStreamId(connection_->transport_version()));
}
TestStream* stream = session_.CreateOutgoingBidirectionalStream();
session_.MarkConnectionLevelWriteBlocked(stream->id());
EXPECT_CALL(*stream, OnCanWrite()).Times(0);
if (!QuicVersionUsesCryptoFrames(connection_->transport_version())) {
TestCryptoStream* crypto_stream = session_.GetMutableCryptoStream();
EXPECT_CALL(*crypto_stream, OnCanWrite());
}
EXPECT_CALL(*send_algorithm, OnApplicationLimited(_));
session_.OnCanWrite();
EXPECT_FALSE(session_.WillingAndAbleToWrite());
}
TEST_P(QuicSessionTestServer, SendGoAway) {
if (VersionHasIetfQuicFrames(transport_version())) {
return;
}
CompleteHandshake();
connection_->SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
MockPacketWriter* writer = static_cast<MockPacketWriter*>(
QuicConnectionPeer::GetWriter(session_.connection()));
EXPECT_CALL(*writer, WritePacket(_, _, _, _, _, _))
.WillOnce(Return(WriteResult(WRITE_STATUS_OK, 0)));
EXPECT_CALL(*connection_, SendControlFrame(_))
.WillOnce(
Invoke(connection_, &MockQuicConnection::ReallySendControlFrame));
session_.SendGoAway(QUIC_PEER_GOING_AWAY, "Going Away.");
EXPECT_TRUE(session_.transport_goaway_sent());
const QuicStreamId kTestStreamId = 5u;
EXPECT_CALL(*connection_, SendControlFrame(_)).Times(0);
EXPECT_CALL(*connection_,
OnStreamReset(kTestStreamId, QUIC_STREAM_PEER_GOING_AWAY))
.Times(0);
EXPECT_TRUE(session_.GetOrCreateStream(kTestStreamId));
}
TEST_P(QuicSessionTestServer, DoNotSendGoAwayTwice) {
CompleteHandshake();
if (VersionHasIetfQuicFrames(transport_version())) {
return;
}
EXPECT_CALL(*connection_, SendControlFrame(_))
.WillOnce(Invoke(&ClearControlFrame));
session_.SendGoAway(QUIC_PEER_GOING_AWAY, "Going Away.");
EXPECT_TRUE(session_.transport_goaway_sent());
session_.SendGoAway(QUIC_PEER_GOING_AWAY, "Going Away.");
}
TEST_P(QuicSessionTestServer, InvalidGoAway) {
if (VersionHasIetfQuicFrames(transport_version())) {
return;
}
QuicGoAwayFrame go_away(kInvalidControlFrameId, QUIC_PEER_GOING_AWAY,
session_.next_outgoing_bidirectional_stream_id(), "");
session_.OnGoAway(go_away);
}
TEST_P(QuicSessionTestServer, ServerReplyToConnectivityProbe) {
if (VersionHasIetfQuicFrames(transport_version()) ||
GetQuicReloadableFlag(quic_ignore_gquic_probing)) {
return;
}
connection_->SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
QuicSocketAddress old_peer_address =
QuicSocketAddress(QuicIpAddress::Loopback4(), kTestPort);
EXPECT_EQ(old_peer_address, session_.peer_address());
QuicSocketAddress new_peer_address =
QuicSocketAddress(QuicIpAddress::Loopback4(), kTestPort + 1);
MockPacketWriter* writer = static_cast<MockPacketWriter*>(
QuicConnectionPeer::GetWriter(session_.connection()));
EXPECT_CALL(*writer, WritePacket(_, _, _, new_peer_address, _, _))
.WillOnce(Return(WriteResult(WRITE_STATUS_OK, 0)));
EXPECT_CALL(*connection_, SendConnectivityProbingPacket(_, _))
.WillOnce(
Invoke(connection_,
&MockQuicConnection::ReallySendConnectivityProbingPacket));
session_.OnPacketReceived(session_.self_address(), new_peer_address,
true);
EXPECT_EQ(old_peer_address, session_.peer_address());
}
TEST_P(QuicSessionTestServer, IncreasedTimeoutAfterCryptoHandshake) {
EXPECT_EQ(kInitialIdleTimeoutSecs + 3,
QuicConnectionPeer::GetNetworkTimeout(connection_).ToSeconds());
CompleteHandshake();
EXPECT_EQ(kMaximumIdleTimeoutSecs + 3,
QuicConnectionPeer::GetNetworkTimeout(connection_).ToSeconds());
}
TEST_P(QuicSessionTestServer, OnStreamFrameFinStaticStreamId) {
if (VersionUsesHttp3(connection_->transport_version())) {
return;
}
QuicStreamId headers_stream_id =
QuicUtils::GetHeadersStreamId(connection_->transport_version());
std::unique_ptr<TestStream> fake_headers_stream =
std::make_unique<TestStream>(headers_stream_id, &session_,
true, BIDIRECTIONAL);
QuicSessionPeer::ActivateStream(&session_, std::move(fake_headers_stream));
QuicStreamFrame data1(headers_stream_id, true, 0, absl::string_view("HT"));
EXPECT_CALL(*connection_,
CloseConnection(
QUIC_INVALID_STREAM_ID, "Attempt to close a static stream",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET));
session_.OnStreamFrame(data1);
}
TEST_P(QuicSessionTestServer, OnStreamFrameInvalidStreamId) {
QuicStreamFrame data1(
QuicUtils::GetInvalidStreamId(connection_->transport_version()), true, 0,
absl::string_view("HT"));
EXPECT_CALL(*connection_,
CloseConnection(
QUIC_INVALID_STREAM_ID, "Received data for an invalid stream",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET));
session_.OnStreamFrame(data1);
}
TEST_P(QuicSessionTestServer, OnRstStreamInvalidStreamId) {
QuicRstStreamFrame rst1(
kInvalidControlFrameId,
QuicUtils::GetInvalidStreamId(connection_->transport_version()),
QUIC_ERROR_PROCESSING_STREAM, 0);
EXPECT_CALL(*connection_,
CloseConnection(
QUIC_INVALID_STREAM_ID, "Received data for an invalid stream",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET));
session_.OnRstStream(rst1);
}
TEST_P(QuicSessionTestServer, OnResetStreamAtInvalidStreamId) {
if (connection_->version().handshake_protocol != PROTOCOL_TLS1_3) {
return;
}
QuicResetStreamAtFrame rst1(
kInvalidControlFrameId,
QuicUtils::GetInvalidStreamId(connection_->transport_version()),
QUIC_ERROR_PROCESSING_STREAM, 10, 0);
EXPECT_CALL(*connection_,
CloseConnection(
QUIC_INVALID_STREAM_ID, "Received data for an invalid stream",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET));
session_.OnResetStreamAt(rst1);
}
TEST_P(QuicSessionTestServer, HandshakeUnblocksFlowControlBlockedStream) {
if (connection_->version().handshake_protocol == PROTOCOL_TLS1_3) {
return;
}
session_.set_writev_consumes_all_data(true);
session_.GetMutableCryptoStream()->EstablishZeroRttEncryption();
TestStream* stream2 = session_.CreateOutgoingBidirectionalStream();
std::string body(kMinimumFlowControlSendWindow, '.');
EXPECT_FALSE(stream2->IsFlowControlBlocked());
EXPECT_FALSE(session_.IsConnectionFlowControlBlocked());
EXPECT_FALSE(session_.IsStreamFlowControlBlocked());
EXPECT_CALL(*connection_, SendControlFrame(_)).Times(AtLeast(1));
stream2->WriteOrBufferData(body, false, nullptr);
EXPECT_TRUE(stream2->IsFlowControlBlocked());
EXPECT_TRUE(session_.IsConnectionFlowControlBlocked());
EXPECT_TRUE(session_.IsStreamFlowControlBlocked());
CompleteHandshake();
EXPECT_TRUE(QuicSessionPeer::IsStreamWriteBlocked(&session_, stream2->id()));
EXPECT_FALSE(stream2->IsFlowControlBlocked());
EXPECT_FALSE(session_.IsConnectionFlowControlBlocked());
EXPECT_FALSE(session_.IsStreamFlowControlBlocked());
}
TEST_P(QuicSessionTestServer, ConnectionFlowControlAccountingRstOutOfOrder) {
CompleteHandshake();
TestStream* stream = session_.CreateOutgoingBidirectionalStream();
const QuicStreamOffset kByteOffset =
1 + kInitialSessionFlowControlWindowForTest / 2;
EXPECT_CALL(*connection_, SendControlFrame(_))
.Times(2)
.WillRepeatedly(Invoke(&ClearControlFrame));
EXPECT_CALL(*connection_, OnStreamReset(stream->id(), _));
QuicRstStreamFrame rst_frame(kInvalidControlFrameId, stream->id(),
QUIC_STREAM_CANCELLED, kByteOffset);
session_.OnRstStream(rst_frame);
if (VersionHasIetfQuicFrames(transport_version())) {
QuicStopSendingFrame frame(kInvalidControlFrameId, stream->id(),
QUIC_STREAM_CANCELLED);
EXPECT_CALL(*connection_, CloseConnection(_, _, _)).Times(0);
session_.OnStopSendingFrame(frame);
}
EXPECT_EQ(kByteOffset, session_.flow_controller()->bytes_consumed());
}
TEST_P(QuicSessionTestServer, ConnectionFlowControlAccountingFinAndLocalReset) {
CompleteHandshake();
TestStream* stream = session_.CreateOutgoingBidirectionalStream();
const QuicStreamOffset kByteOffset =
kInitialSessionFlowControlWindowForTest / 2 - 1;
QuicStreamFrame frame(stream->id(), true, kByteOffset, ".");
session_.OnStreamFrame(frame);
EXPECT_TRUE(connection_->connected());
EXPECT_EQ(0u, session_.flow_controller()->bytes_consumed());
EXPECT_EQ(kByteOffset + frame.data_length,
stream->highest_received_byte_offset());
EXPECT_CALL(*connection_, SendControlFrame(_));
EXPECT_CALL(*connection_, OnStreamReset(stream->id(), _));
stream->Reset(QUIC_STREAM_CANCELLED);
EXPECT_EQ(kByteOffset + frame.data_length,
session_.flow_controller()->bytes_consumed());
}
TEST_P(QuicSessionTestServer, ConnectionFlowControlAccountingFinAfterRst) {
CompleteHandshake();
const uint64_t kInitialConnectionBytesConsumed = 567;
const uint64_t kInitialConnectionHighestReceivedOffset = 1234;
EXPECT_LT(kInitialConnectionBytesConsumed,
kInitialConnectionHighestReceivedOffset);
session_.flow_controller()->UpdateHighestReceivedOffset(
kInitialConnectionHighestReceivedOffset);
session_.flow_controller()->AddBytesConsumed(kInitialConnectionBytesConsumed);
TestStream* stream = session_.CreateOutgoingBidirectionalStream();
EXPECT_CALL(*connection_, SendControlFrame(_));
EXPECT_CALL(*connection_, OnStreamReset(stream->id(), _));
stream->Reset(QUIC_STREAM_CANCELLED);
const QuicStreamOffset kByteOffset = 5678;
std::string body = "hello";
QuicStreamFrame frame(stream->id(), true, kByteOffset,
absl::string_view(body));
session_.OnStreamFrame(frame);
QuicStreamOffset total_stream_bytes_sent_by_peer =
kByteOffset + body.length();
EXPECT_EQ(kInitialConnectionBytesConsumed + total_stream_bytes_sent_by_peer,
session_.flow_controller()->bytes_consumed());
EXPECT_EQ(
kInitialConnectionHighestReceivedOffset + total_stream_bytes_sent_by_peer,
session_.flow_controller()->highest_received_byte_offset());
}
TEST_P(QuicSessionTestServer, ConnectionFlowControlAccountingRstAfterRst) {
CompleteHandshake();
const uint64_t kInitialConnectionBytesConsumed = 567;
const uint64_t kInitialConnectionHighestReceivedOffset = 1234;
EXPECT_LT(kInitialConnectionBytesConsumed,
kInitialConnectionHighestReceivedOffset);
session_.flow_controller()->UpdateHighestReceivedOffset(
kInitialConnectionHighestReceivedOffset);
session_.flow_controller()->AddBytesConsumed(kInitialConnectionBytesConsumed);
TestStream* stream = session_.CreateOutgoingBidirectionalStream();
EXPECT_CALL(*connection_, SendControlFrame(_));
EXPECT_CALL(*connection_, OnStreamReset(stream->id(), _));
stream->Reset(QUIC_STREAM_CANCELLED);
EXPECT_TRUE(QuicStreamPeer::read_side_closed(stream));
const QuicStreamOffset kByteOffset = 5678;
QuicRstStreamFrame rst_frame(kInvalidControlFrameId, stream->id(),
QUIC_STREAM_CANCELLED, kByteOffset);
session_.OnRstStream(rst_frame);
EXPECT_EQ(kInitialConnectionBytesConsumed + kByteOffset,
session_.flow_controller()->bytes_consumed());
EXPECT_EQ(kInitialConnectionHighestReceivedOffset + kByteOffset,
session_.flow_controller()->highest_received_byte_offset());
}
TEST_P(QuicSessionTestServer, InvalidStreamFlowControlWindowInHandshake) {
const uint32_t kInvalidWindow = kMinimumFlowControlSendWindow - 1;
QuicConfigPeer::SetReceivedInitialStreamFlowControlWindow(session_.config(),
kInvalidWindow);
if (connection_->version().handshake_protocol != PROTOCOL_TLS1_3) {
EXPECT_CALL(*connection_,
CloseConnection(QUIC_FLOW_CONTROL_INVALID_WINDOW, _, _));
} else {
EXPECT_CALL(*connection_, CloseConnection(_, _, _)).Times(0);
}
connection_->SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
session_.OnConfigNegotiated();
}
TEST_P(QuicSessionTestServer, CustomFlowControlWindow) {
QuicTagVector copt;
copt.push_back(kIFW7);
QuicConfigPeer::SetReceivedConnectionOptions(session_.config(), copt);
connection_->SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
session_.OnConfigNegotiated();
EXPECT_EQ(192 * 1024u, QuicFlowControllerPeer::ReceiveWindowSize(
session_.flow_controller()));
}
TEST_P(QuicSessionTestServer, FlowControlWithInvalidFinalOffset) {
CompleteHandshake();
const uint64_t kLargeOffset = kInitialSessionFlowControlWindowForTest + 1;
EXPECT_CALL(*connection_,
CloseConnection(QUIC_FLOW_CONTROL_RECEIVED_TOO_MUCH_DATA, _, _))
.Times(2);
TestStream* stream = session_.CreateOutgoingBidirectionalStream();
EXPECT_CALL(*connection_, SendControlFrame(_));
EXPECT_CALL(*connection_, OnStreamReset(stream->id(), _));
stream->Reset(QUIC_STREAM_CANCELLED);
QuicStreamFrame frame(stream->id(), true, kLargeOffset, absl::string_view());
session_.OnStreamFrame(frame);
QuicRstStreamFrame rst_frame(kInvalidControlFrameId, stream->id(),
QUIC_STREAM_CANCELLED, kLargeOffset);
session_.OnRstStream(rst_frame);
}
TEST_P(QuicSessionTestServer, TooManyUnfinishedStreamsCauseServerRejectStream) {
CompleteHandshake();
const QuicStreamId kMaxStreams = 5;
if (VersionHasIetfQuicFrames(transport_version())) {
QuicSessionPeer::SetMaxOpenIncomingBidirectionalStreams(&session_,
kMaxStreams);
} else {
QuicSessionPeer::SetMaxOpenIncomingStreams(&session_, kMaxStreams);
}
const QuicStreamId kFirstStreamId = GetNthClientInitiatedBidirectionalId(0);
const QuicStreamId kFinalStreamId =
GetNthClientInitiatedBidirectionalId(kMaxStreams);
for (QuicStreamId i = kFirstStreamId; i < kFinalStreamId;
i += QuicUtils::StreamIdDelta(connection_->transport_version())) {
QuicStreamFrame data1(i, false, 0, absl::string_view("HT"));
session_.OnStreamFrame(data1);
CloseStream(i);
}
if (VersionHasIetfQuicFrames(transport_version())) {
EXPECT_CALL(
*connection_,
CloseConnection(QUIC_INVALID_STREAM_ID,
"Stream id 20 would exceed stream count limit 5", _));
} else {
EXPECT_CALL(*connection_, SendControlFrame(_)).Times(1);
EXPECT_CALL(*connection_,
OnStreamReset(kFinalStreamId, QUIC_REFUSED_STREAM))
.Times(1);
}
QuicStreamFrame data1(kFinalStreamId, false, 0, absl::string_view("HT"));
session_.OnStreamFrame(data1);
}
TEST_P(QuicSessionTestServer, DrainingStreamsDoNotCountAsOpenedOutgoing) {
CompleteHandshake();
TestStream* stream = session_.CreateOutgoingBidirectionalStream();
QuicStreamId stream_id = stream->id();
QuicStreamFrame data1(stream_id, true, 0, absl::string_view("HT"));
session_.OnStreamFrame(data1);
if (!VersionHasIetfQuicFrames(transport_version())) {
EXPECT_CALL(session_, OnCanCreateNewOutgoingStream(false)).Times(1);
}
session_.StreamDraining(stream_id, false);
}
TEST_P(QuicSessionTestServer, NoPendingStreams) {
session_.set_uses_pending_streams(false);
QuicStreamId stream_id = QuicUtils::GetFirstUnidirectionalStreamId(
transport_version(), Perspective::IS_CLIENT);
QuicStreamFrame data1(stream_id, true, 10, absl::string_view("HT"));
session_.OnStreamFrame(data1);
EXPECT_EQ(1, session_.num_incoming_streams_created());
QuicStreamFrame data2(stream_id, false, 0, absl::string_view("HT"));
session_.OnStreamFrame(data2);
EXPECT_EQ(1, session_.num_incoming_streams_created());
}
TEST_P(QuicSessionTestServer, PendingStreams) {
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
session_.set_uses_pending_streams(true);
QuicStreamId stream_id = QuicUtils::GetFirstUnidirectionalStreamId(
transport_version(), Perspective::IS_CLIENT);
QuicStreamFrame data1(stream_id, true, 10, absl::string_view("HT"));
session_.OnStreamFrame(data1);
EXPECT_TRUE(QuicSessionPeer::GetPendingStream(&session_, stream_id));
EXPECT_EQ(0, session_.num_incoming_streams_created());
QuicStreamFrame data2(stream_id, false, 0, absl::string_view("HT"));
session_.OnStreamFrame(data2);
EXPECT_FALSE(QuicSessionPeer::GetPendingStream(&session_, stream_id));
EXPECT_EQ(1, session_.num_incoming_streams_created());
}
TEST_P(QuicSessionTestServer, BufferAllIncomingStreams) {
if (!VersionUsesHttp3(transport_version())) {
return;
}
session_.set_uses_pending_streams(true);
QuicStreamId stream_id = QuicUtils::GetFirstUnidirectionalStreamId(
transport_version(), Perspective::IS_CLIENT);
QuicStreamFrame data1(stream_id, true, 10, absl::string_view("HT"));
session_.OnStreamFrame(data1);
EXPECT_TRUE(QuicSessionPeer::GetPendingStream(&session_, stream_id));
EXPECT_EQ(0, session_.num_incoming_streams_created());
QuicStreamFrame data2(stream_id, false, 0, absl::string_view("HT"));
session_.OnStreamFrame(data2);
EXPECT_TRUE(QuicSessionPeer::GetPendingStream(&session_, stream_id));
EXPECT_EQ(0, session_.num_incoming_streams_created());
QuicStreamId bidirectional_stream_id =
QuicUtils::GetFirstBidirectionalStreamId(transport_version(),
Perspective::IS_CLIENT);
QuicStreamFrame data3(bidirectional_stream_id, false, 0,
absl::string_view("HT"));
session_.OnStreamFrame(data3);
EXPECT_TRUE(
QuicSessionPeer::GetPendingStream(&session_, bidirectional_stream_id));
EXPECT_EQ(0, session_.num_incoming_streams_created());
connection_->AdvanceTime(QuicTime::Delta::FromMilliseconds(1));
session_.ProcessAllPendingStreams();
EXPECT_FALSE(QuicSessionPeer::GetPendingStream(&session_, stream_id));
EXPECT_FALSE(
QuicSessionPeer::GetPendingStream(&session_, bidirectional_stream_id));
EXPECT_EQ(2, session_.num_incoming_streams_created());
EXPECT_EQ(1, QuicSessionPeer::GetStream(&session_, stream_id)
->pending_duration()
.ToMilliseconds());
EXPECT_EQ(1, QuicSessionPeer::GetStream(&session_, bidirectional_stream_id)
->pending_duration()
.ToMilliseconds());
EXPECT_EQ(2, session_.connection()->GetStats().num_total_pending_streams);
}
TEST_P(QuicSessionTestServer, RstPendingStreams) {
if (!VersionUsesHttp3(transport_version())) {
return;
}
session_.set_uses_pending_streams(true);
QuicStreamId stream_id = QuicUtils::GetFirstUnidirectionalStreamId(
transport_version(), Perspective::IS_CLIENT);
QuicStreamFrame data1(stream_id, true, 10, absl::string_view("HT"));
session_.OnStreamFrame(data1);
EXPECT_TRUE(QuicSessionPeer::GetPendingStream(&session_, stream_id));
EXPECT_EQ(0, session_.num_incoming_streams_created());
EXPECT_EQ(0u, QuicSessionPeer::GetNumOpenDynamicStreams(&session_));
QuicRstStreamFrame rst1(kInvalidControlFrameId, stream_id,
QUIC_ERROR_PROCESSING_STREAM, 12);
session_.OnRstStream(rst1);
EXPECT_FALSE(QuicSessionPeer::GetPendingStream(&session_, stream_id));
EXPECT_EQ(0, session_.num_incoming_streams_created());
EXPECT_EQ(0u, QuicSessionPeer::GetNumOpenDynamicStreams(&session_));
QuicStreamFrame data2(stream_id, false, 0, absl::string_view("HT"));
session_.OnStreamFrame(data2);
EXPECT_FALSE(QuicSessionPeer::GetPendingStream(&session_, stream_id));
EXPECT_EQ(0, session_.num_incoming_streams_created());
EXPECT_EQ(0u, QuicSessionPeer::GetNumOpenDynamicStreams(&session_));
session_.ProcessAllPendingStreams();
QuicStreamId bidirectional_stream_id =
QuicUtils::GetFirstBidirectionalStreamId(transport_version(),
Perspective::IS_CLIENT);
QuicStreamFrame data3(bidirectional_stream_id, false, 0,
absl::string_view("HT"));
session_.OnStreamFrame(data3);
EXPECT_TRUE(
QuicSessionPeer::GetPendingStream(&session_, bidirectional_stream_id));
EXPECT_EQ(0, session_.num_incoming_streams_created());
QuicRstStreamFrame rst2(kInvalidControlFrameId, bidirectional_stream_id,
QUIC_ERROR_PROCESSING_STREAM, 12);
session_.OnRstStream(rst2);
EXPECT_FALSE(
QuicSessionPeer::GetPendingStream(&session_, bidirectional_stream_id));
EXPECT_EQ(0, session_.num_incoming_streams_created());
EXPECT_EQ(0u, QuicSessionPeer::GetNumOpenDynamicStreams(&session_));
}
TEST_P(QuicSessionTestServer, OnFinPendingStreamsReadUnidirectional) {
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
session_.set_uses_pending_streams(true);
QuicStreamId stream_id = QuicUtils::GetFirstUnidirectionalStreamId(
transport_version(), Perspective::IS_CLIENT);
QuicStreamFrame data(stream_id, true, 0, "");
session_.OnStreamFrame(data);
EXPECT_FALSE(QuicSessionPeer::GetPendingStream(&session_, stream_id));
EXPECT_EQ(0, session_.num_incoming_streams_created());
EXPECT_EQ(0u, QuicSessionPeer::GetNumOpenDynamicStreams(&session_));
EXPECT_EQ(nullptr, QuicSessionPeer::GetStream(&session_, stream_id));
}
TEST_P(QuicSessionTestServer, OnFinPendingStreamsBidirectional) {
if (!VersionUsesHttp3(transport_version())) {
return;
}
session_.set_uses_pending_streams(true);
QuicStreamId bidirectional_stream_id =
QuicUtils::GetFirstBidirectionalStreamId(transport_version(),
Perspective::IS_CLIENT);
QuicStreamFrame data2(bidirectional_stream_id, true, 0,
absl::string_view("HT"));
session_.OnStreamFrame(data2);
EXPECT_TRUE(
QuicSessionPeer::GetPendingStream(&session_, bidirectional_stream_id));
EXPECT_EQ(0, session_.num_incoming_streams_created());
session_.ProcessAllPendingStreams();
EXPECT_FALSE(
QuicSessionPeer::GetPendingStream(&session_, bidirectional_stream_id));
EXPECT_EQ(1, session_.num_incoming_streams_created());
QuicStream* bidirectional_stream =
QuicSessionPeer::GetStream(&session_, bidirectional_stream_id);
EXPECT_TRUE(bidirectional_stream->fin_received());
}
TEST_P(QuicSessionTestServer, UnidirectionalPendingStreamOnWindowUpdate) {
if (!VersionUsesHttp3(transport_version())) {
return;
}
session_.set_uses_pending_streams(true);
QuicStreamId stream_id = QuicUtils::GetFirstUnidirectionalStreamId(
transport_version(), Perspective::IS_CLIENT);
QuicStreamFrame data1(stream_id, true, 10, absl::string_view("HT"));
session_.OnStreamFrame(data1);
EXPECT_TRUE(QuicSessionPeer::GetPendingStream(&session_, stream_id));
EXPECT_EQ(0, session_.num_incoming_streams_created());
QuicWindowUpdateFrame window_update_frame(kInvalidControlFrameId, stream_id,
0);
EXPECT_CALL(
*connection_,
CloseConnection(
QUIC_WINDOW_UPDATE_RECEIVED_ON_READ_UNIDIRECTIONAL_STREAM,
"WindowUpdateFrame received on READ_UNIDIRECTIONAL stream.", _));
session_.OnWindowUpdateFrame(window_update_frame);
}
TEST_P(QuicSessionTestServer, BidirectionalPendingStreamOnWindowUpdate) {
if (!VersionUsesHttp3(transport_version())) {
return;
}
session_.set_uses_pending_streams(true);
QuicStreamId stream_id = QuicUtils::GetFirstBidirectionalStreamId(
transport_version(), Perspective::IS_CLIENT);
QuicStreamFrame data(stream_id, true, 10, absl::string_view("HT"));
session_.OnStreamFrame(data);
QuicWindowUpdateFrame window_update_frame(kInvalidControlFrameId, stream_id,
kDefaultFlowControlSendWindow * 2);
session_.OnWindowUpdateFrame(window_update_frame);
EXPECT_TRUE(QuicSessionPeer::GetPendingStream(&session_, stream_id));
EXPECT_EQ(0, session_.num_incoming_streams_created());
session_.ProcessAllPendingStreams();
EXPECT_FALSE(QuicSessionPeer::GetPendingStream(&session_, stream_id));
EXPECT_EQ(1, session_.num_incoming_streams_created());
QuicStream* bidirectional_stream =
QuicSessionPeer::GetStream(&session_, stream_id);
QuicByteCount send_window =
QuicStreamPeer::SendWindowSize(bidirectional_stream);
EXPECT_EQ(send_window, kDefaultFlowControlSendWindow * 2);
}
TEST_P(QuicSessionTestServer, UnidirectionalPendingStreamOnStopSending) {
if (!VersionUsesHttp3(transport_version())) {
return;
}
session_.set_uses_pending_streams(true);
QuicStreamId stream_id = QuicUtils::GetFirstUnidirectionalStreamId(
transport_version(), Perspective::IS_CLIENT);
QuicStreamFrame data1(stream_id, true, 10, absl::string_view("HT"));
session_.OnStreamFrame(data1);
EXPECT_TRUE(QuicSessionPeer::GetPendingStream(&session_, stream_id));
EXPECT_EQ(0, session_.num_incoming_streams_created());
QuicStopSendingFrame stop_sending_frame(kInvalidControlFrameId, stream_id,
QUIC_STREAM_CANCELLED);
EXPECT_CALL(
*connection_,
CloseConnection(QUIC_INVALID_STREAM_ID,
"Received STOP_SENDING for a read-only stream", _));
session_.OnStopSendingFrame(stop_sending_frame);
}
TEST_P(QuicSessionTestServer, BidirectionalPendingStreamOnStopSending) {
if (!VersionUsesHttp3(transport_version())) {
return;
}
session_.set_uses_pending_streams(true);
QuicStreamId stream_id = QuicUtils::GetFirstBidirectionalStreamId(
transport_version(), Perspective::IS_CLIENT);
QuicStreamFrame data(stream_id, true, 0, absl::string_view("HT"));
session_.OnStreamFrame(data);
QuicStopSendingFrame stop_sending_frame(kInvalidControlFrameId, stream_id,
QUIC_STREAM_CANCELLED);
session_.OnStopSendingFrame(stop_sending_frame);
EXPECT_TRUE(QuicSessionPeer::GetPendingStream(&session_, stream_id));
EXPECT_EQ(0, session_.num_incoming_streams_created());
EXPECT_CALL(*connection_, OnStreamReset(stream_id, _));
session_.ProcessAllPendingStreams();
EXPECT_FALSE(QuicSessionPeer::GetPendingStream(&session_, stream_id));
EXPECT_EQ(1, session_.num_incoming_streams_created());
QuicStream* bidirectional_stream =
QuicSessionPeer::GetStream(&session_, stream_id);
EXPECT_TRUE(bidirectional_stream->write_side_closed());
}
TEST_P(QuicSessionTestServer, DrainingStreamsDoNotCountAsOpened) {
CompleteHandshake();
if (VersionHasIetfQuicFrames(transport_version())) {
EXPECT_CALL(*connection_, SendControlFrame(_)).Times(1);
} else {
EXPECT_CALL(*connection_, SendControlFrame(_)).Times(0);
}
EXPECT_CALL(*connection_, OnStreamReset(_, QUIC_REFUSED_STREAM)).Times(0);
const QuicStreamId kMaxStreams = 5;
if (VersionHasIetfQuicFrames(transport_version())) {
QuicSessionPeer::SetMaxOpenIncomingBidirectionalStreams(&session_,
kMaxStreams);
} else {
QuicSessionPeer::SetMaxOpenIncomingStreams(&session_, kMaxStreams);
}
const QuicStreamId kFirstStreamId = GetNthClientInitiatedBidirectionalId(0);
const QuicStreamId kFinalStreamId =
GetNthClientInitiatedBidirectionalId(2 * kMaxStreams + 1);
for (QuicStreamId i = kFirstStreamId; i < kFinalStreamId;
i += QuicUtils::StreamIdDelta(connection_->transport_version())) {
QuicStreamFrame data1(i, true, 0, absl::string_view("HT"));
session_.OnStreamFrame(data1);
EXPECT_EQ(1u, QuicSessionPeer::GetNumOpenDynamicStreams(&session_));
session_.StreamDraining(i, false);
EXPECT_EQ(0u, QuicSessionPeer::GetNumOpenDynamicStreams(&session_));
QuicAlarm* alarm = QuicSessionPeer::GetStreamCountResetAlarm(&session_);
if (alarm->IsSet()) {
alarm_factory_.FireAlarm(alarm);
}
}
}
class QuicSessionTestClient : public QuicSessionTestBase {
protected:
QuicSessionTestClient()
: QuicSessionTestBase(Perspective::IS_CLIENT,
true) {}
};
INSTANTIATE_TEST_SUITE_P(Tests, QuicSessionTestClient,
::testing::ValuesIn(AllSupportedVersions()),
::testing::PrintToStringParamName());
TEST_P(QuicSessionTestClient, AvailableBidirectionalStreamsClient) {
ASSERT_TRUE(session_.GetOrCreateStream(
GetNthServerInitiatedBidirectionalId(2)) != nullptr);
EXPECT_TRUE(QuicSessionPeer::IsStreamAvailable(
&session_, GetNthServerInitiatedBidirectionalId(0)));
EXPECT_TRUE(QuicSessionPeer::IsStreamAvailable(
&session_, GetNthServerInitiatedBidirectionalId(1)));
ASSERT_TRUE(session_.GetOrCreateStream(
GetNthServerInitiatedBidirectionalId(0)) != nullptr);
ASSERT_TRUE(session_.GetOrCreateStream(
GetNthServerInitiatedBidirectionalId(1)) != nullptr);
EXPECT_FALSE(QuicSessionPeer::IsStreamAvailable(
&session_, GetNthClientInitiatedBidirectionalId(1)));
}
TEST_P(QuicSessionTestClient, DonotSendRetireCIDFrameWhenConnectionClosed) {
if (!VersionHasIetfQuicFrames(transport_version())) {
return;
}
connection_->ReallyCloseConnection(QUIC_NO_ERROR, "closing",
ConnectionCloseBehavior::SILENT_CLOSE);
EXPECT_FALSE(connection_->connected());
if (!GetQuicReloadableFlag(
quic_no_write_control_frame_upon_connection_close2)) {
EXPECT_QUIC_BUG(session_.SendRetireConnectionId(20),
"Try to write control frame");
} else {
session_.SendRetireConnectionId(20);
}
}
TEST_P(QuicSessionTestClient, NewStreamCreationResumesMultiPortProbing) {
if (!VersionHasIetfQuicFrames(transport_version())) {
return;
}
session_.config()->SetClientConnectionOptions({kMPQC});
session_.Initialize();
connection_->CreateConnectionIdManager();
connection_->SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
connection_->OnHandshakeComplete();
session_.OnConfigNegotiated();
EXPECT_CALL(*connection_, MaybeProbeMultiPortPath());
session_.CreateOutgoingBidirectionalStream();
}
TEST_P(QuicSessionTestClient, InvalidSessionFlowControlWindowInHandshake) {
const uint32_t kInvalidWindow = kMinimumFlowControlSendWindow - 1;
QuicConfigPeer::SetReceivedInitialSessionFlowControlWindow(session_.config(),
kInvalidWindow);
EXPECT_CALL(
*connection_,
CloseConnection(connection_->version().AllowsLowFlowControlLimits()
? QUIC_ZERO_RTT_RESUMPTION_LIMIT_REDUCED
: QUIC_FLOW_CONTROL_INVALID_WINDOW,
_, _));
connection_->SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
session_.OnConfigNegotiated();
}
TEST_P(QuicSessionTestClient, InvalidBidiStreamLimitInHandshake) {
if (!VersionHasIetfQuicFrames(transport_version())) {
return;
}
QuicConfigPeer::SetReceivedMaxBidirectionalStreams(
session_.config(), kDefaultMaxStreamsPerConnection - 1);
EXPECT_CALL(*connection_,
CloseConnection(QUIC_ZERO_RTT_RESUMPTION_LIMIT_REDUCED, _, _));
connection_->SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
session_.OnConfigNegotiated();
}
TEST_P(QuicSessionTestClient, InvalidUniStreamLimitInHandshake) {
if (!VersionHasIetfQuicFrames(transport_version())) {
return;
}
QuicConfigPeer::SetReceivedMaxUnidirectionalStreams(
session_.config(), kDefaultMaxStreamsPerConnection - 1);
EXPECT_CALL(*connection_,
CloseConnection(QUIC_ZERO_RTT_RESUMPTION_LIMIT_REDUCED, _, _));
connection_->SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
session_.OnConfigNegotiated();
}
TEST_P(QuicSessionTestClient, InvalidStreamFlowControlWindowInHandshake) {
if (!VersionHasIetfQuicFrames(transport_version())) {
return;
}
session_.CreateOutgoingBidirectionalStream();
session_.CreateOutgoingBidirectionalStream();
QuicConfigPeer::SetReceivedInitialMaxStreamDataBytesOutgoingBidirectional(
session_.config(), kMinimumFlowControlSendWindow - 1);
EXPECT_CALL(*connection_, CloseConnection(_, _, _))
.WillOnce(
Invoke(connection_, &MockQuicConnection::ReallyCloseConnection));
EXPECT_CALL(*connection_, SendConnectionClosePacket(_, _, _));
connection_->SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
session_.OnConfigNegotiated();
}
TEST_P(QuicSessionTestClient, OnMaxStreamFrame) {
if (!VersionUsesHttp3(transport_version())) {
return;
}
QuicMaxStreamsFrame frame;
frame.unidirectional = false;
frame.stream_count = 120;
EXPECT_CALL(session_, OnCanCreateNewOutgoingStream(false)).Times(1);
session_.OnMaxStreamsFrame(frame);
QuicMaxStreamsFrame frame2;
frame2.unidirectional = false;
frame2.stream_count = 110;
EXPECT_CALL(session_, OnCanCreateNewOutgoingStream(false)).Times(0);
session_.OnMaxStreamsFrame(frame2);
}
TEST_P(QuicSessionTestClient, AvailableUnidirectionalStreamsClient) {
ASSERT_TRUE(session_.GetOrCreateStream(
GetNthServerInitiatedUnidirectionalId(2)) != nullptr);
EXPECT_TRUE(QuicSessionPeer::IsStreamAvailable(
&session_, GetNthServerInitiatedUnidirectionalId(0)));
EXPECT_TRUE(QuicSessionPeer::IsStreamAvailable(
&session_, GetNthServerInitiatedUnidirectionalId(1)));
ASSERT_TRUE(session_.GetOrCreateStream(
GetNthServerInitiatedUnidirectionalId(0)) != nullptr);
ASSERT_TRUE(session_.GetOrCreateStream(
GetNthServerInitiatedUnidirectionalId(1)) != nullptr);
EXPECT_FALSE(QuicSessionPeer::IsStreamAvailable(
&session_, GetNthClientInitiatedUnidirectionalId(1)));
}
TEST_P(QuicSessionTestClient, RecordFinAfterReadSideClosed) {
CompleteHandshake();
TestStream* stream = session_.CreateOutgoingBidirectionalStream();
QuicStreamId stream_id = stream->id();
QuicStreamPeer::CloseReadSide(stream);
QuicStreamFrame frame(stream_id, true, 0, absl::string_view());
session_.OnStreamFrame(frame);
EXPECT_TRUE(stream->fin_received());
EXPECT_CALL(*connection_, SendControlFrame(_));
EXPECT_CALL(*connection_, OnStreamReset(stream->id(), _));
stream->Reset(QUIC_STREAM_CANCELLED);
EXPECT_TRUE(QuicStreamPeer::read_side_closed(stream));
EXPECT_TRUE(connection_->connected());
EXPECT_TRUE(QuicSessionPeer::IsStreamClosed(&session_, stream_id));
EXPECT_FALSE(QuicSessionPeer::IsStreamCreated(&session_, stream_id));
EXPECT_EQ(
0u,
QuicSessionPeer::GetLocallyClosedStreamsHighestOffset(&session_).size());
}
TEST_P(QuicSessionTestClient, IncomingStreamWithClientInitiatedStreamId) {
const QuicErrorCode expected_error =
VersionHasIetfQuicFrames(transport_version())
? QUIC_HTTP_STREAM_WRONG_DIRECTION
: QUIC_INVALID_STREAM_ID;
EXPECT_CALL(
*connection_,
CloseConnection(expected_error, "Data for nonexistent stream",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET));
QuicStreamFrame frame(GetNthClientInitiatedBidirectionalId(1),
false, 0,
absl::string_view("foo"));
session_.OnStreamFrame(frame);
}
TEST_P(QuicSessionTestClient, MinAckDelaySetOnTheClientQuicConfig) {
if (!session_.version().HasIetfQuicFrames()) {
return;
}
session_.config()->SetClientConnectionOptions({kAFFE});
session_.Initialize();
ASSERT_EQ(session_.config()->GetMinAckDelayToSendMs(),
kDefaultMinAckDelayTimeMs);
ASSERT_TRUE(session_.connection()->can_receive_ack_frequency_frame());
}
TEST_P(QuicSessionTestClient, FailedToCreateStreamIfTooCloseToIdleTimeout) {
connection_->SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
EXPECT_TRUE(session_.CanOpenNextOutgoingBidirectionalStream());
QuicTime deadline = QuicConnectionPeer::GetIdleNetworkDeadline(connection_);
ASSERT_TRUE(deadline.IsInitialized());
QuicTime::Delta timeout = deadline - helper_.GetClock()->ApproximateNow();
connection_->AdvanceTime(timeout - QuicTime::Delta::FromMilliseconds(1));
EXPECT_CALL(*connection_, SendConnectivityProbingPacket(_, _)).Times(1);
EXPECT_FALSE(session_.CanOpenNextOutgoingBidirectionalStream());
EXPECT_CALL(session_, OnCanCreateNewOutgoingStream(false));
QuicConnectionPeer::GetIdleNetworkDetector(connection_)
.OnPacketReceived(helper_.GetClock()->ApproximateNow());
session_.OnPacketDecrypted(ENCRYPTION_FORWARD_SECURE);
EXPECT_TRUE(session_.CanOpenNextOutgoingBidirectionalStream());
}
TEST_P(QuicSessionTestServer, ZombieStreams) {
CompleteHandshake();
TestStream* stream2 = session_.CreateOutgoingBidirectionalStream();
QuicStreamPeer::SetStreamBytesWritten(3, stream2);
EXPECT_TRUE(stream2->IsWaitingForAcks());
CloseStream(stream2->id());
ASSERT_EQ(1u, session_.closed_streams()->size());
EXPECT_EQ(stream2->id(), session_.closed_streams()->front()->id());
session_.MaybeCloseZombieStream(stream2->id());
EXPECT_EQ(1u, session_.closed_streams()->size());
EXPECT_EQ(stream2->id(), session_.closed_streams()->front()->id());
}
TEST_P(QuicSessionTestServer, RstStreamReceivedAfterRstStreamSent) {
CompleteHandshake();
TestStream* stream2 = session_.CreateOutgoingBidirectionalStream();
QuicStreamPeer::SetStreamBytesWritten(3, stream2);
EXPECT_TRUE(stream2->IsWaitingForAcks());
EXPECT_CALL(*connection_, SendControlFrame(_));
EXPECT_CALL(*connection_, OnStreamReset(stream2->id(), _));
EXPECT_CALL(session_, OnCanCreateNewOutgoingStream(false)).Times(0);
stream2->Reset(quic::QUIC_STREAM_CANCELLED);
QuicRstStreamFrame rst1(kInvalidControlFrameId, stream2->id(),
QUIC_ERROR_PROCESSING_STREAM, 0);
if (!VersionHasIetfQuicFrames(transport_version())) {
EXPECT_CALL(session_, OnCanCreateNewOutgoingStream(false)).Times(1);
}
session_.OnRstStream(rst1);
}
TEST_P(QuicSessionTestServer, TestZombieStreams) {
CompleteHandshake();
session_.set_writev_consumes_all_data(true);
TestStream* stream2 = session_.CreateOutgoingBidirectionalStream();
std::string body(100, '.');
stream2->WriteOrBufferData(body, false, nullptr);
EXPECT_TRUE(stream2->IsWaitingForAcks());
EXPECT_EQ(1u, QuicStreamPeer::SendBuffer(stream2).size());
QuicRstStreamFrame rst_frame(kInvalidControlFrameId, stream2->id(),
QUIC_STREAM_CANCELLED, 1234);
EXPECT_CALL(*connection_, SendControlFrame(_))
.WillOnce(Invoke(&ClearControlFrame));
if (VersionHasIetfQuicFrames(transport_version())) {
EXPECT_CALL(*connection_,
OnStreamReset(stream2->id(), QUIC_STREAM_CANCELLED));
} else {
EXPECT_CALL(*connection_,
OnStreamReset(stream2->id(), QUIC_RST_ACKNOWLEDGEMENT));
}
stream2->OnStreamReset(rst_frame);
if (VersionHasIetfQuicFrames(transport_version())) {
QuicStopSendingFrame frame(kInvalidControlFrameId, stream2->id(),
QUIC_STREAM_CANCELLED);
EXPECT_CALL(*connection_, CloseConnection(_, _, _)).Times(0);
session_.OnStopSendingFrame(frame);
}
ASSERT_EQ(1u, session_.closed_streams()->size());
EXPECT_EQ(stream2->id(), session_.closed_streams()->front()->id());
TestStream* stream4 = session_.CreateOutgoingBidirectionalStream();
if (VersionHasIetfQuicFrames(transport_version())) {
EXPECT_CALL(*connection_, SendControlFrame(_))
.Times(2)
.WillRepeatedly(Invoke(&ClearControlFrame));
} else {
EXPECT_CALL(*connection_, SendControlFrame(_)).Times(1);
}
EXPECT_CALL(*connection_,
OnStreamReset(stream4->id(), QUIC_STREAM_CANCELLED));
stream4->WriteOrBufferData(body, false, nullptr);
stream4->Reset(QUIC_STREAM_CANCELLED);
EXPECT_EQ(2u, session_.closed_streams()->size());
}
TEST_P(QuicSessionTestServer, OnStreamFrameLost) {
CompleteHandshake();
InSequence s;
MockSendAlgorithm* send_algorithm = new StrictMock<MockSendAlgorithm>;
QuicConnectionPeer::SetSendAlgorithm(session_.connection(), send_algorithm);
TestCryptoStream* crypto_stream = session_.GetMutableCryptoStream();
TestStream* stream2 = session_.CreateOutgoingBidirectionalStream();
TestStream* stream4 = session_.CreateOutgoingBidirectionalStream();
QuicStreamFrame frame1;
if (!QuicVersionUsesCryptoFrames(connection_->transport_version())) {
frame1 = QuicStreamFrame(
QuicUtils::GetCryptoStreamId(connection_->transport_version()), false,
0, 1300);
}
QuicStreamFrame frame2(stream2->id(), false, 0, 9);
QuicStreamFrame frame3(stream4->id(), false, 0, 9);
EXPECT_CALL(*stream4, HasPendingRetransmission()).WillOnce(Return(true));
if (!QuicVersionUsesCryptoFrames(connection_->transport_version())) {
EXPECT_CALL(*crypto_stream, HasPendingRetransmission())
.WillOnce(Return(true));
}
EXPECT_CALL(*stream2, HasPendingRetransmission()).WillOnce(Return(true));
session_.OnFrameLost(QuicFrame(frame3));
if (!QuicVersionUsesCryptoFrames(connection_->transport_version())) {
session_.OnFrameLost(QuicFrame(frame1));
} else {
QuicCryptoFrame crypto_frame(ENCRYPTION_INITIAL, 0, 1300);
session_.OnFrameLost(QuicFrame(&crypto_frame));
}
session_.OnFrameLost(QuicFrame(frame2));
EXPECT_TRUE(session_.WillingAndAbleToWrite());
session_.MarkConnectionLevelWriteBlocked(stream2->id());
session_.MarkConnectionLevelWriteBlocked(stream4->id());
EXPECT_CALL(*send_algorithm, CanSend(_)).Times(0);
if (!QuicVersionUsesCryptoFrames(connection_->transport_version())) {
EXPECT_CALL(*crypto_stream, OnCanWrite());
EXPECT_CALL(*crypto_stream, HasPendingRetransmission())
.WillOnce(Return(false));
}
EXPECT_CALL(*send_algorithm, CanSend(_)).WillOnce(Return(true));
EXPECT_CALL(*stream4, OnCanWrite());
EXPECT_CALL(*stream4, HasPendingRetransmission()).WillOnce(Return(false));
EXPECT_CALL(*send_algorithm, CanSend(_)).WillRepeatedly(Return(false));
session_.OnCanWrite();
EXPECT_TRUE(session_.WillingAndAbleToWrite());
EXPECT_CALL(*send_algorithm, CanSend(_)).WillOnce(Return(true));
EXPECT_CALL(*stream2, OnCanWrite());
EXPECT_CALL(*stream2, HasPendingRetransmission()).WillOnce(Return(false));
EXPECT_CALL(*send_algorithm, CanSend(_)).WillOnce(Return(true));
EXPECT_CALL(*stream2, OnCanWrite());
EXPECT_CALL(*send_algorithm, CanSend(_)).WillOnce(Return(true));
EXPECT_CALL(*stream4, OnCanWrite());
EXPECT_CALL(*send_algorithm, OnApplicationLimited(_));
session_.OnCanWrite();
EXPECT_FALSE(session_.WillingAndAbleToWrite());
}
TEST_P(QuicSessionTestServer, DonotRetransmitDataOfClosedStreams) {
CompleteHandshake();
InSequence s;
TestStream* stream2 = session_.CreateOutgoingBidirectionalStream();
TestStream* stream4 = session_.CreateOutgoingBidirectionalStream();
TestStream* stream6 = session_.CreateOutgoingBidirectionalStream();
QuicStreamFrame frame1(stream2->id(), false, 0, 9);
QuicStreamFrame frame2(stream4->id(), false, 0, 9);
QuicStreamFrame frame3(stream6->id(), false, 0, 9);
EXPECT_CALL(*stream6, HasPendingRetransmission()).WillOnce(Return(true));
EXPECT_CALL(*stream4, HasPendingRetransmission()).WillOnce(Return(true));
EXPECT_CALL(*stream2, HasPendingRetransmission()).WillOnce(Return(true));
session_.OnFrameLost(QuicFrame(frame3));
session_.OnFrameLost(QuicFrame(frame2));
session_.OnFrameLost(QuicFrame(frame1));
session_.MarkConnectionLevelWriteBlocked(stream2->id());
session_.MarkConnectionLevelWriteBlocked(stream4->id());
session_.MarkConnectionLevelWriteBlocked(stream6->id());
EXPECT_CALL(*connection_, SendControlFrame(_));
EXPECT_CALL(*connection_, OnStreamReset(stream4->id(), _));
stream4->Reset(QUIC_STREAM_CANCELLED);
EXPECT_CALL(*stream6, OnCanWrite());
EXPECT_CALL(*stream6, HasPendingRetransmission()).WillOnce(Return(false));
EXPECT_CALL(*stream2, OnCanWrite());
EXPECT_CALL(*stream2, HasPendingRetransmission()).WillOnce(Return(false));
EXPECT_CALL(*connection_, SendControlFrame(_))
.WillRepeatedly(Invoke(&ClearControlFrame));
EXPECT_CALL(*stream2, OnCanWrite());
EXPECT_CALL(*stream6, OnCanWrite());
session_.OnCanWrite();
}
TEST_P(QuicSessionTestServer, RetransmitFrames) {
CompleteHandshake();
MockSendAlgorithm* send_algorithm = new StrictMock<MockSendAlgorithm>;
QuicConnectionPeer::SetSendAlgorithm(session_.connection(), send_algorithm);
InSequence s;
TestStream* stream2 = session_.CreateOutgoingBidirectionalStream();
TestStream* stream4 = session_.CreateOutgoingBidirectionalStream();
TestStream* stream6 = session_.CreateOutgoingBidirectionalStream();
EXPECT_CALL(*connection_, SendControlFrame(_))
.WillOnce(Invoke(&ClearControlFrame));
session_.SendWindowUpdate(stream2->id(), 9);
QuicStreamFrame frame1(stream2->id(), false, 0, 9);
QuicStreamFrame frame2(stream4->id(), false, 0, 9);
QuicStreamFrame frame3(stream6->id(), false, 0, 9);
QuicWindowUpdateFrame window_update(1, stream2->id(), 9);
QuicFrames frames;
frames.push_back(QuicFrame(frame1));
frames.push_back(QuicFrame(window_update));
frames.push_back(QuicFrame(frame2));
frames.push_back(QuicFrame(frame3));
EXPECT_FALSE(session_.WillingAndAbleToWrite());
EXPECT_CALL(*stream2, RetransmitStreamData(_, _, _, _))
.WillOnce(Return(true));
EXPECT_CALL(*connection_, SendControlFrame(_))
.WillOnce(Invoke(&ClearControlFrame));
EXPECT_CALL(*stream4, RetransmitStreamData(_, _, _, _))
.WillOnce(Return(true));
EXPECT_CALL(*stream6, RetransmitStreamData(_, _, _, _))
.WillOnce(Return(true));
EXPECT_CALL(*send_algorithm, OnApplicationLimited(_));
session_.RetransmitFrames(frames, PTO_RETRANSMISSION);
}
TEST_P(QuicSessionTestServer, RetransmitLostDataCausesConnectionClose) {
CompleteHandshake();
TestStream* stream = session_.CreateOutgoingBidirectionalStream();
QuicStreamFrame frame(stream->id(), false, 0, 9);
EXPECT_CALL(*stream, HasPendingRetransmission())
.Times(2)
.WillOnce(Return(true))
.WillOnce(Return(false));
session_.OnFrameLost(QuicFrame(frame));
EXPECT_CALL(*stream, OnCanWrite()).WillOnce(Invoke([this, stream]() {
session_.ResetStream(stream->id(), QUIC_STREAM_CANCELLED);
}));
if (VersionHasIetfQuicFrames(transport_version())) {
EXPECT_CALL(*connection_, SendControlFrame(_))
.Times(2)
.WillRepeatedly(Invoke(&session_, &TestSession::SaveFrame));
} else {
EXPECT_CALL(*connection_, SendControlFrame(_))
.WillOnce(Invoke(&session_, &TestSession::SaveFrame));
}
EXPECT_CALL(*connection_, OnStreamReset(stream->id(), _));
session_.OnCanWrite();
}
TEST_P(QuicSessionTestServer, SendMessage) {
EXPECT_FALSE(session_.OneRttKeysAvailable());
EXPECT_EQ(MessageResult(MESSAGE_STATUS_ENCRYPTION_NOT_ESTABLISHED, 0),
session_.SendMessage(MemSliceFromString("")));
CompleteHandshake();
EXPECT_TRUE(session_.OneRttKeysAvailable());
EXPECT_CALL(*connection_, SendMessage(1, _, false))
.WillOnce(Return(MESSAGE_STATUS_SUCCESS));
EXPECT_EQ(MessageResult(MESSAGE_STATUS_SUCCESS, 1),
session_.SendMessage(MemSliceFromString("")));
EXPECT_CALL(*connection_, SendMessage(2, _, false))
.WillOnce(Return(MESSAGE_STATUS_TOO_LARGE));
EXPECT_EQ(MessageResult(MESSAGE_STATUS_TOO_LARGE, 0),
session_.SendMessage(MemSliceFromString("")));
EXPECT_CALL(*connection_, SendMessage(2, _, false))
.WillOnce(Return(MESSAGE_STATUS_SUCCESS));
EXPECT_EQ(MessageResult(MESSAGE_STATUS_SUCCESS, 2),
session_.SendMessage(MemSliceFromString("")));
QuicMessageFrame frame(1);
QuicMessageFrame frame2(2);
EXPECT_FALSE(session_.IsFrameOutstanding(QuicFrame(&frame)));
EXPECT_FALSE(session_.IsFrameOutstanding(QuicFrame(&frame2)));
session_.OnMessageLost(2);
EXPECT_FALSE(session_.IsFrameOutstanding(QuicFrame(&frame2)));
session_.OnMessageAcked(1, QuicTime::Zero());
EXPECT_FALSE(session_.IsFrameOutstanding(QuicFrame(&frame)));
}
TEST_P(QuicSessionTestServer, LocallyResetZombieStreams) {
CompleteHandshake();
session_.set_writev_consumes_all_data(true);
TestStream* stream2 = session_.CreateOutgoingBidirectionalStream();
std::string body(100, '.');
QuicStreamPeer::CloseReadSide(stream2);
stream2->WriteOrBufferData(body, true, nullptr);
EXPECT_TRUE(stream2->IsWaitingForAcks());
auto& stream_map = QuicSessionPeer::stream_map(&session_);
ASSERT_TRUE(stream_map.contains(stream2->id()));
auto* stream = stream_map.find(stream2->id())->second.get();
EXPECT_TRUE(stream->IsZombie());
QuicStreamFrame frame(stream2->id(), true, 0, 100);
EXPECT_CALL(*stream2, HasPendingRetransmission())
.WillRepeatedly(Return(true));
session_.OnFrameLost(QuicFrame(frame));
EXPECT_CALL(*connection_, SendControlFrame(_))
.WillRepeatedly(Invoke(&ClearControlFrame));
EXPECT_CALL(*connection_, OnStreamReset(stream2->id(), _));
stream2->Reset(QUIC_STREAM_CANCELLED);
EXPECT_TRUE(session_.IsClosedStream(stream2->id()));
EXPECT_CALL(*stream2, OnCanWrite()).Times(0);
session_.OnCanWrite();
}
TEST_P(QuicSessionTestServer, CleanUpClosedStreamsAlarm) {
CompleteHandshake();
EXPECT_FALSE(
QuicSessionPeer::GetCleanUpClosedStreamsAlarm(&session_)->IsSet());
session_.set_writev_consumes_all_data(true);
TestStream* stream2 = session_.CreateOutgoingBidirectionalStream();
EXPECT_FALSE(stream2->IsWaitingForAcks());
CloseStream(stream2->id());
EXPECT_EQ(1u, session_.closed_streams()->size());
EXPECT_TRUE(
QuicSessionPeer::GetCleanUpClosedStreamsAlarm(&session_)->IsSet());
alarm_factory_.FireAlarm(
QuicSessionPeer::GetCleanUpClosedStreamsAlarm(&session_));
EXPECT_TRUE(session_.closed_streams()->empty());
}
TEST_P(QuicSessionTestServer, WriteUnidirectionalStream) {
session_.set_writev_consumes_all_data(true);
TestStream* stream4 = new TestStream(GetNthServerInitiatedUnidirectionalId(1),
&session_, WRITE_UNIDIRECTIONAL);
session_.ActivateStream(absl::WrapUnique(stream4));
std::string body(100, '.');
stream4->WriteOrBufferData(body, false, nullptr);
stream4->WriteOrBufferData(body, true, nullptr);
auto& stream_map = QuicSessionPeer::stream_map(&session_);
ASSERT_TRUE(stream_map.contains(stream4->id()));
auto* stream = stream_map.find(stream4->id())->second.get();
EXPECT_TRUE(stream->IsZombie());
}
TEST_P(QuicSessionTestServer, ReceivedDataOnWriteUnidirectionalStream) {
TestStream* stream4 = new TestStream(GetNthServerInitiatedUnidirectionalId(1),
&session_, WRITE_UNIDIRECTIONAL);
session_.ActivateStream(absl::WrapUnique(stream4));
EXPECT_CALL(
*connection_,
CloseConnection(QUIC_DATA_RECEIVED_ON_WRITE_UNIDIRECTIONAL_STREAM, _, _))
.Times(1);
QuicStreamFrame stream_frame(GetNthServerInitiatedUnidirectionalId(1), false,
0, 2);
session_.OnStreamFrame(stream_frame);
}
TEST_P(QuicSessionTestServer, ReadUnidirectionalStream) {
TestStream* stream4 = new TestStream(GetNthClientInitiatedUnidirectionalId(1),
&session_, READ_UNIDIRECTIONAL);
session_.ActivateStream(absl::WrapUnique(stream4));
EXPECT_FALSE(stream4->IsWaitingForAcks());
stream4->StopReading();
std::string data(100, '.');
QuicStreamFrame stream_frame(GetNthClientInitiatedUnidirectionalId(1), false,
0, data);
stream4->OnStreamFrame(stream_frame);
EXPECT_TRUE(session_.closed_streams()->empty());
QuicStreamFrame stream_frame2(GetNthClientInitiatedUnidirectionalId(1), true,
100, data);
stream4->OnStreamFrame(stream_frame2);
EXPECT_EQ(1u, session_.closed_streams()->size());
}
TEST_P(QuicSessionTestServer, WriteOrBufferDataOnReadUnidirectionalStream) {
TestStream* stream4 = new TestStream(GetNthClientInitiatedUnidirectionalId(1),
&session_, READ_UNIDIRECTIONAL);
session_.ActivateStream(absl::WrapUnique(stream4));
EXPECT_CALL(*connection_,
CloseConnection(
QUIC_TRY_TO_WRITE_DATA_ON_READ_UNIDIRECTIONAL_STREAM, _, _))
.Times(1);
std::string body(100, '.');
stream4->WriteOrBufferData(body, false, nullptr);
}
TEST_P(QuicSessionTestServer, WritevDataOnReadUnidirectionalStream) {
TestStream* stream4 = new TestStream(GetNthClientInitiatedUnidirectionalId(1),
&session_, READ_UNIDIRECTIONAL);
session_.ActivateStream(absl::WrapUnique(stream4));
EXPECT_CALL(*connection_,
CloseConnection(
QUIC_TRY_TO_WRITE_DATA_ON_READ_UNIDIRECTIONAL_STREAM, _, _))
.Times(1);
std::string body(100, '.');
struct iovec iov = {const_cast<char*>(body.data()), body.length()};
quiche::QuicheMemSliceStorage storage(
&iov, 1, session_.connection()->helper()->GetStreamSendBufferAllocator(),
1024);
stream4->WriteMemSlices(storage.ToSpan(), false);
}
TEST_P(QuicSessionTestServer, WriteMemSlicesOnReadUnidirectionalStream) {
TestStream* stream4 = new TestStream(GetNthClientInitiatedUnidirectionalId(1),
&session_, READ_UNIDIRECTIONAL);
session_.ActivateStream(absl::WrapUnique(stream4));
EXPECT_CALL(*connection_,
CloseConnection(
QUIC_TRY_TO_WRITE_DATA_ON_READ_UNIDIRECTIONAL_STREAM, _, _))
.Times(1);
std::string data(1024, 'a');
std::vector<quiche::QuicheMemSlice> buffers;
buffers.push_back(MemSliceFromString(data));
buffers.push_back(MemSliceFromString(data));
stream4->WriteMemSlices(absl::MakeSpan(buffers), false);
}
TEST_P(QuicSessionTestServer, NewStreamIdBelowLimit) {
if (!VersionHasIetfQuicFrames(transport_version())) {
return;
}
QuicStreamId bidirectional_stream_id = StreamCountToId(
QuicSessionPeer::ietf_streamid_manager(&session_)
->advertised_max_incoming_bidirectional_streams() -
1,
Perspective::IS_CLIENT,
true);
QuicStreamFrame bidirectional_stream_frame(bidirectional_stream_id, false, 0,
"Random String");
EXPECT_CALL(*connection_, CloseConnection(_, _, _)).Times(0);
session_.OnStreamFrame(bidirectional_stream_frame);
QuicStreamId unidirectional_stream_id = StreamCountToId(
QuicSessionPeer::ietf_streamid_manager(&session_)
->advertised_max_incoming_unidirectional_streams() -
1,
Perspective::IS_CLIENT,
false);
QuicStreamFrame unidirectional_stream_frame(unidirectional_stream_id, false,
0, "Random String");
EXPECT_CALL(*connection_, CloseConnection(_, _, _)).Times(0);
session_.OnStreamFrame(unidirectional_stream_frame);
}
TEST_P(QuicSessionTestServer, NewStreamIdAtLimit) {
if (!VersionHasIetfQuicFrames(transport_version())) {
return;
}
QuicStreamId bidirectional_stream_id =
StreamCountToId(QuicSessionPeer::ietf_streamid_manager(&session_)
->advertised_max_incoming_bidirectional_streams(),
Perspective::IS_CLIENT, true);
QuicStreamFrame bidirectional_stream_frame(bidirectional_stream_id, false, 0,
"Random String");
EXPECT_CALL(*connection_, CloseConnection(_, _, _)).Times(0);
session_.OnStreamFrame(bidirectional_stream_frame);
QuicStreamId unidirectional_stream_id =
StreamCountToId(QuicSessionPeer::ietf_streamid_manager(&session_)
->advertised_max_incoming_unidirectional_streams(),
Perspective::IS_CLIENT, false);
QuicStreamFrame unidirectional_stream_frame(unidirectional_stream_id, false,
0, "Random String");
EXPECT_CALL(*connection_, CloseConnection(_, _, _)).Times(0);
session_.OnStreamFrame(unidirectional_stream_frame);
}
TEST_P(QuicSessionTestServer, NewStreamIdAboveLimit) {
if (!VersionHasIetfQuicFrames(transport_version())) {
return;
}
QuicStreamId bidirectional_stream_id = StreamCountToId(
QuicSessionPeer::ietf_streamid_manager(&session_)
->advertised_max_incoming_bidirectional_streams() +
1,
Perspective::IS_CLIENT, true);
QuicStreamFrame bidirectional_stream_frame(bidirectional_stream_id, false, 0,
"Random String");
EXPECT_CALL(
*connection_,
CloseConnection(QUIC_INVALID_STREAM_ID,
"Stream id 400 would exceed stream count limit 100", _));
session_.OnStreamFrame(bidirectional_stream_frame);
QuicStreamId unidirectional_stream_id = StreamCountToId(
QuicSessionPeer::ietf_streamid_manager(&session_)
->advertised_max_incoming_unidirectional_streams() +
1,
Perspective::IS_CLIENT, false);
QuicStreamFrame unidirectional_stream_frame(unidirectional_stream_id, false,
0, "Random String");
EXPECT_CALL(
*connection_,
CloseConnection(QUIC_INVALID_STREAM_ID,
"Stream id 402 would exceed stream count limit 100", _));
session_.OnStreamFrame(unidirectional_stream_frame);
}
TEST_P(QuicSessionTestServer, OnStopSendingInvalidStreamId) {
if (!VersionHasIetfQuicFrames(transport_version())) {
return;
}
QuicStopSendingFrame frame(1, -1, QUIC_STREAM_CANCELLED);
EXPECT_CALL(
*connection_,
CloseConnection(QUIC_INVALID_STREAM_ID,
"Received STOP_SENDING for an invalid stream", _));
session_.OnStopSendingFrame(frame);
}
TEST_P(QuicSessionTestServer, OnStopSendingReadUnidirectional) {
if (!VersionHasIetfQuicFrames(transport_version())) {
return;
}
QuicStopSendingFrame frame(1, GetNthClientInitiatedUnidirectionalId(1),
QUIC_STREAM_CANCELLED);
EXPECT_CALL(
*connection_,
CloseConnection(QUIC_INVALID_STREAM_ID,
"Received STOP_SENDING for a read-only stream", _));
session_.OnStopSendingFrame(frame);
}
TEST_P(QuicSessionTestServer, OnStopSendingStaticStreams) {
if (!VersionHasIetfQuicFrames(transport_version())) {
return;
}
QuicStreamId stream_id = 0;
std::unique_ptr<TestStream> fake_static_stream = std::make_unique<TestStream>(
stream_id, &session_, true, BIDIRECTIONAL);
QuicSessionPeer::ActivateStream(&session_, std::move(fake_static_stream));
QuicStopSendingFrame frame(1, stream_id, QUIC_STREAM_CANCELLED);
EXPECT_CALL(*connection_,
CloseConnection(QUIC_INVALID_STREAM_ID,
"Received STOP_SENDING for a static stream", _));
session_.OnStopSendingFrame(frame);
}
TEST_P(QuicSessionTestServer, OnStopSendingForWriteClosedStream) {
if (!VersionHasIetfQuicFrames(transport_version())) {
return;
}
TestStream* stream = session_.CreateOutgoingBidirectionalStream();
QuicStreamId stream_id = stream->id();
QuicStreamPeer::SetFinSent(stream);
stream->CloseWriteSide();
EXPECT_TRUE(stream->write_side_closed());
QuicStopSendingFrame frame(1, stream_id, QUIC_STREAM_CANCELLED);
EXPECT_CALL(*connection_, CloseConnection(_, _, _)).Times(0);
session_.OnStopSendingFrame(frame);
}
TEST_P(QuicSessionTestServer, OnStopSendingForZombieStreams) {
if (!VersionHasIetfQuicFrames(transport_version())) {
return;
}
CompleteHandshake();
session_.set_writev_consumes_all_data(true);
TestStream* stream = session_.CreateOutgoingBidirectionalStream();
std::string body(100, '.');
QuicStreamPeer::CloseReadSide(stream);
stream->WriteOrBufferData(body, true, nullptr);
EXPECT_TRUE(stream->IsWaitingForAcks());
EXPECT_TRUE(stream->IsZombie());
ASSERT_EQ(0u, session_.closed_streams()->size());
QuicStopSendingFrame frame(1, stream->id(), QUIC_STREAM_CANCELLED);
EXPECT_CALL(*connection_, CloseConnection(_, _, _)).Times(0);
if (GetQuicReloadableFlag(quic_deliver_stop_sending_to_zombie_streams)) {
EXPECT_CALL(*connection_, SendControlFrame(_)).Times(1);
EXPECT_CALL(*connection_, OnStreamReset(_, _)).Times(1);
} else {
EXPECT_CALL(*connection_, SendControlFrame(_)).Times(0);
EXPECT_CALL(*connection_, OnStreamReset(_, _)).Times(0);
}
session_.OnStopSendingFrame(frame);
if (GetQuicReloadableFlag(quic_deliver_stop_sending_to_zombie_streams)) {
EXPECT_FALSE(stream->IsZombie());
EXPECT_EQ(1u, session_.closed_streams()->size());
} else {
EXPECT_TRUE(stream->IsZombie());
EXPECT_EQ(0u, session_.closed_streams()->size());
}
}
TEST_P(QuicSessionTestServer, OnStopSendingClosedStream) {
if (!VersionHasIetfQuicFrames(transport_version())) {
return;
}
CompleteHandshake();
TestStream* stream = session_.CreateOutgoingBidirectionalStream();
QuicStreamId stream_id = stream->id();
CloseStream(stream_id);
QuicStopSendingFrame frame(1, stream_id, QUIC_STREAM_CANCELLED);
EXPECT_CALL(*connection_, CloseConnection(_, _, _)).Times(0);
session_.OnStopSendingFrame(frame);
}
TEST_P(QuicSessionTestServer, OnStopSendingInputNonExistentLocalStream) {
if (!VersionHasIetfQuicFrames(transport_version())) {
return;
}
QuicStopSendingFrame frame(1, GetNthServerInitiatedBidirectionalId(123456),
QUIC_STREAM_CANCELLED);
EXPECT_CALL(*connection_, CloseConnection(QUIC_HTTP_STREAM_WRONG_DIRECTION,
"Data for nonexistent stream", _))
.Times(1);
session_.OnStopSendingFrame(frame);
}
TEST_P(QuicSessionTestServer, OnStopSendingNewStream) {
CompleteHandshake();
if (!VersionHasIetfQuicFrames(transport_version())) {
return;
}
QuicStopSendingFrame frame(1, GetNthClientInitiatedBidirectionalId(1),
QUIC_STREAM_CANCELLED);
EXPECT_CALL(*connection_, SendControlFrame(_)).Times(1);
EXPECT_CALL(*connection_, OnStreamReset(_, _)).Times(1);
session_.OnStopSendingFrame(frame);
QuicStream* stream =
session_.GetOrCreateStream(GetNthClientInitiatedBidirectionalId(1));
EXPECT_TRUE(stream);
EXPECT_TRUE(stream->write_side_closed());
}
TEST_P(QuicSessionTestServer, OnStopSendingInputValidStream) {
CompleteHandshake();
if (!VersionHasIetfQuicFrames(transport_version())) {
return;
}
TestStream* stream = session_.CreateOutgoingBidirectionalStream();
EXPECT_FALSE(stream->write_side_closed());
EXPECT_FALSE(QuicStreamPeer::read_side_closed(stream));
QuicStreamId stream_id = stream->id();
QuicStopSendingFrame frame(1, stream_id, QUIC_STREAM_CANCELLED);
EXPECT_CALL(*connection_, SendControlFrame(_));
EXPECT_CALL(*connection_, OnStreamReset(stream_id, QUIC_STREAM_CANCELLED));
EXPECT_CALL(*connection_, CloseConnection(_, _, _)).Times(0);
session_.OnStopSendingFrame(frame);
EXPECT_FALSE(QuicStreamPeer::read_side_closed(stream));
EXPECT_TRUE(stream->write_side_closed());
}
TEST_P(QuicSessionTestServer, WriteBufferedCryptoFrames) {
if (!QuicVersionUsesCryptoFrames(connection_->transport_version())) {
return;
}
std::string data(1350, 'a');
TestCryptoStream* crypto_stream = session_.GetMutableCryptoStream();
EXPECT_CALL(*connection_, SendCryptoData(ENCRYPTION_INITIAL, 1350, 0))
.WillOnce(Return(1000));
crypto_stream->WriteCryptoData(ENCRYPTION_INITIAL, data);
EXPECT_TRUE(session_.HasPendingHandshake());
EXPECT_TRUE(session_.WillingAndAbleToWrite());
EXPECT_CALL(*connection_, SendCryptoData(_, _, _)).Times(0);
connection_->SetEncrypter(
ENCRYPTION_ZERO_RTT,
std::make_unique<NullEncrypter>(connection_->perspective()));
crypto_stream->WriteCryptoData(ENCRYPTION_ZERO_RTT, data);
EXPECT_CALL(*connection_, SendCryptoData(ENCRYPTION_INITIAL, 350, 1000))
.WillOnce(Return(350));
EXPECT_CALL(
*connection_,
SendCryptoData(crypto_stream->GetEncryptionLevelToSendCryptoDataOfSpace(
QuicUtils::GetPacketNumberSpace(ENCRYPTION_ZERO_RTT)),
1350, 0))
.WillOnce(Return(1350));
session_.OnCanWrite();
EXPECT_FALSE(session_.HasPendingHandshake());
EXPECT_FALSE(session_.WillingAndAbleToWrite());
}
TEST_P(QuicSessionTestServer, StreamFrameReceivedAfterFin) {
TestStream* stream = session_.CreateOutgoingBidirectionalStream();
QuicStreamFrame frame(stream->id(), true, 0, ",");
session_.OnStreamFrame(frame);
QuicStreamFrame frame1(stream->id(), false, 1, ",");
EXPECT_CALL(*connection_,
CloseConnection(QUIC_STREAM_DATA_BEYOND_CLOSE_OFFSET, _, _));
session_.OnStreamFrame(frame1);
}
TEST_P(QuicSessionTestServer, ResetForIETFStreamTypes) {
CompleteHandshake();
if (!VersionHasIetfQuicFrames(transport_version())) {
return;
}
QuicStreamId read_only = GetNthClientInitiatedUnidirectionalId(0);
EXPECT_CALL(*connection_, SendControlFrame(_))
.Times(1)
.WillOnce(Invoke(&ClearControlFrame));
EXPECT_CALL(*connection_, OnStreamReset(read_only, _));
session_.ResetStream(read_only, QUIC_STREAM_CANCELLED);
QuicStreamId write_only = GetNthServerInitiatedUnidirectionalId(0);
EXPECT_CALL(*connection_, SendControlFrame(_))
.Times(1)
.WillOnce(Invoke(&ClearControlFrame));
EXPECT_CALL(*connection_, OnStreamReset(write_only, _));
session_.ResetStream(write_only, QUIC_STREAM_CANCELLED);
QuicStreamId bidirectional = GetNthClientInitiatedBidirectionalId(0);
EXPECT_CALL(*connection_, SendControlFrame(_))
.Times(2)
.WillRepeatedly(Invoke(&ClearControlFrame));
EXPECT_CALL(*connection_, OnStreamReset(bidirectional, _));
session_.ResetStream(bidirectional, QUIC_STREAM_CANCELLED);
}
TEST_P(QuicSessionTestServer, DecryptionKeyAvailableBeforeEncryptionKey) {
if (connection_->version().handshake_protocol != PROTOCOL_TLS1_3) {
return;
}
ASSERT_FALSE(connection_->framer().HasEncrypterOfEncryptionLevel(
ENCRYPTION_HANDSHAKE));
EXPECT_FALSE(session_.OnNewDecryptionKeyAvailable(
ENCRYPTION_HANDSHAKE, nullptr,
false, false));
}
TEST_P(QuicSessionTestServer, IncomingStreamWithServerInitiatedStreamId) {
const QuicErrorCode expected_error =
VersionHasIetfQuicFrames(transport_version())
? QUIC_HTTP_STREAM_WRONG_DIRECTION
: QUIC_INVALID_STREAM_ID;
EXPECT_CALL(
*connection_,
CloseConnection(expected_error, "Data for nonexistent stream",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET));
QuicStreamFrame frame(GetNthServerInitiatedBidirectionalId(1),
false, 0,
absl::string_view("foo"));
session_.OnStreamFrame(frame);
}
TEST_P(QuicSessionTestServer, BlockedFrameCausesWriteError) {
CompleteHandshake();
MockPacketWriter* writer = static_cast<MockPacketWriter*>(
QuicConnectionPeer::GetWriter(session_.connection()));
EXPECT_CALL(*writer, WritePacket(_, _, _, _, _, _))
.WillOnce(Return(WriteResult(WRITE_STATUS_OK, 0)));
const uint64_t kWindow = 36;
QuicFlowControllerPeer::SetSendWindowOffset(session_.flow_controller(),
kWindow);
auto stream =
session_.GetOrCreateStream(GetNthClientInitiatedBidirectionalId(0));
const uint64_t kOverflow = 15;
std::string body(kWindow + kOverflow, 'a');
EXPECT_CALL(*connection_, SendControlFrame(_))
.WillOnce(testing::InvokeWithoutArgs([this]() {
connection_->ReallyCloseConnection(
QUIC_PACKET_WRITE_ERROR, "write error",
ConnectionCloseBehavior::SILENT_CLOSE);
return false;
}));
stream->WriteOrBufferData(body, false, nullptr);
}
TEST_P(QuicSessionTestServer, BufferedCryptoFrameCausesWriteError) {
if (!VersionHasIetfQuicFrames(transport_version())) {
return;
}
std::string data(1350, 'a');
TestCryptoStream* crypto_stream = session_.GetMutableCryptoStream();
EXPECT_CALL(*connection_, SendCryptoData(ENCRYPTION_FORWARD_SECURE, 1350, 0))
.WillOnce(Return(1000));
crypto_stream->WriteCryptoData(ENCRYPTION_FORWARD_SECURE, data);
EXPECT_TRUE(session_.HasPendingHandshake());
EXPECT_TRUE(session_.WillingAndAbleToWrite());
EXPECT_CALL(*connection_,
SendCryptoData(ENCRYPTION_FORWARD_SECURE, 350, 1000))
.WillOnce(Return(0));
EXPECT_CALL(*connection_, SendControlFrame(_)).WillOnce(Return(false));
CryptoHandshakeMessage msg;
session_.GetMutableCryptoStream()->OnHandshakeMessage(msg);
EXPECT_CALL(*connection_,
SendCryptoData(ENCRYPTION_FORWARD_SECURE, 350, 1000))
.WillOnce(testing::InvokeWithoutArgs([this]() {
connection_->ReallyCloseConnection(
QUIC_PACKET_WRITE_ERROR, "write error",
ConnectionCloseBehavior::SILENT_CLOSE);
return 350;
}));
if (!GetQuicReloadableFlag(
quic_no_write_control_frame_upon_connection_close)) {
EXPECT_CALL(*connection_, SendControlFrame(_)).WillOnce(Return(false));
EXPECT_QUIC_BUG(session_.OnCanWrite(), "Try to write control frame");
} else {
session_.OnCanWrite();
}
}
TEST_P(QuicSessionTestServer, DonotPtoStreamDataBeforeHandshakeConfirmed) {
if (!session_.version().UsesTls()) {
return;
}
EXPECT_NE(HANDSHAKE_CONFIRMED, session_.GetHandshakeState());
TestCryptoStream* crypto_stream = session_.GetMutableCryptoStream();
EXPECT_FALSE(crypto_stream->HasBufferedCryptoFrames());
std::string data(1350, 'a');
EXPECT_CALL(*connection_, SendCryptoData(ENCRYPTION_INITIAL, 1350, 0))
.WillOnce(Return(1000));
crypto_stream->WriteCryptoData(ENCRYPTION_INITIAL, data);
ASSERT_TRUE(crypto_stream->HasBufferedCryptoFrames());
TestStream* stream = session_.CreateOutgoingBidirectionalStream();
session_.MarkConnectionLevelWriteBlocked(stream->id());
EXPECT_CALL(*connection_, SendCryptoData(ENCRYPTION_INITIAL, _, _))
.WillOnce(Return(350));
EXPECT_CALL(*stream, OnCanWrite()).Times(0);
QuicConnectionPeer::SetInProbeTimeOut(connection_, true);
session_.OnCanWrite();
EXPECT_FALSE(crypto_stream->HasBufferedCryptoFrames());
}
TEST_P(QuicSessionTestServer, SetStatelessResetTokenToSend) {
if (!session_.version().HasIetfQuicFrames()) {
return;
}
EXPECT_TRUE(session_.config()->HasStatelessResetTokenToSend());
}
TEST_P(QuicSessionTestServer,
SetServerPreferredAddressAccordingToAddressFamily) {
if (!session_.version().HasIetfQuicFrames()) {
return;
}
EXPECT_EQ(quiche::IpAddressFamily::IP_V4,
connection_->peer_address().host().address_family());
QuicConnectionPeer::SetEffectivePeerAddress(connection_,
connection_->peer_address());
QuicTagVector copt;
copt.push_back(kSPAD);
QuicConfigPeer::SetReceivedConnectionOptions(session_.config(), copt);
QuicSocketAddress preferred_address(QuicIpAddress::Loopback4(), 12345);
session_.config()->SetIPv4AlternateServerAddressToSend(preferred_address);
session_.config()->SetIPv6AlternateServerAddressToSend(
QuicSocketAddress(QuicIpAddress::Loopback6(), 12345));
connection_->SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
session_.OnConfigNegotiated();
EXPECT_EQ(QuicSocketAddress(QuicIpAddress::Loopback4(), 12345),
session_.config()
->GetPreferredAddressToSend(quiche::IpAddressFamily::IP_V4)
.value());
EXPECT_FALSE(session_.config()
->GetPreferredAddressToSend(quiche::IpAddressFamily::IP_V6)
.has_value());
EXPECT_EQ(preferred_address,
connection_->expected_server_preferred_address());
}
TEST_P(QuicSessionTestServer,
SetDNatServerPreferredAddressAccordingToAddressFamily) {
if (!session_.version().HasIetfQuicFrames()) {
return;
}
EXPECT_EQ(quiche::IpAddressFamily::IP_V4,
connection_->peer_address().host().address_family());
QuicConnectionPeer::SetEffectivePeerAddress(connection_,
connection_->peer_address());
QuicTagVector copt;
copt.push_back(kSPAD);
QuicConfigPeer::SetReceivedConnectionOptions(session_.config(), copt);
QuicSocketAddress sent_preferred_address(QuicIpAddress::Loopback4(), 12345);
QuicSocketAddress expected_preferred_address(QuicIpAddress::Loopback4(),
12346);
session_.config()->SetIPv4AlternateServerAddressForDNat(
sent_preferred_address, expected_preferred_address);
session_.config()->SetIPv6AlternateServerAddressForDNat(
QuicSocketAddress(QuicIpAddress::Loopback6(), 12345),
QuicSocketAddress(QuicIpAddress::Loopback6(), 12346));
connection_->SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
session_.OnConfigNegotiated();
EXPECT_EQ(QuicSocketAddress(QuicIpAddress::Loopback4(), 12345),
session_.config()
->GetPreferredAddressToSend(quiche::IpAddressFamily::IP_V4)
.value());
EXPECT_FALSE(session_.config()
->GetPreferredAddressToSend(quiche::IpAddressFamily::IP_V6)
.has_value());
EXPECT_EQ(expected_preferred_address,
connection_->expected_server_preferred_address());
}
TEST_P(QuicSessionTestServer, NoServerPreferredAddressIfAddressFamilyMismatch) {
if (!session_.version().HasIetfQuicFrames()) {
return;
}
EXPECT_EQ(quiche::IpAddressFamily::IP_V4,
connection_->peer_address().host().address_family());
QuicConnectionPeer::SetEffectivePeerAddress(connection_,
connection_->peer_address());
QuicTagVector copt;
copt.push_back(kSPAD);
QuicConfigPeer::SetReceivedConnectionOptions(session_.config(), copt);
session_.config()->SetIPv6AlternateServerAddressToSend(
QuicSocketAddress(QuicIpAddress::Loopback6(), 12345));
connection_->SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
session_.OnConfigNegotiated();
EXPECT_FALSE(session_.config()
->GetPreferredAddressToSend(quiche::IpAddressFamily::IP_V4)
.has_value());
EXPECT_FALSE(session_.config()
->GetPreferredAddressToSend(quiche::IpAddressFamily::IP_V6)
.has_value());
EXPECT_FALSE(
connection_->expected_server_preferred_address().IsInitialized());
}
TEST_P(QuicSessionTestServer, OpenStreamLimitPerEventLoop) {
if (!VersionHasIetfQuicFrames(transport_version())) {
return;
}
session_.set_uses_pending_streams(true);
CompleteHandshake();
QuicStreamId unidirectional_stream_id =
QuicUtils::GetFirstUnidirectionalStreamId(transport_version(),
Perspective::IS_CLIENT);
QuicStreamFrame data1(unidirectional_stream_id, false, 10,
absl::string_view("HT"));
session_.OnStreamFrame(data1);
EXPECT_TRUE(
QuicSessionPeer::GetPendingStream(&session_, unidirectional_stream_id));
EXPECT_EQ(0, session_.num_incoming_streams_created());
size_t i = 0u;
for (; i < 10u; ++i) {
QuicStreamId bidi_stream_id = GetNthClientInitiatedBidirectionalId(i);
QuicStreamFrame data(bidi_stream_id, false, 0, "aaaa");
session_.OnStreamFrame(data);
if (i > 4u) {
EXPECT_TRUE(QuicSessionPeer::GetPendingStream(&session_, bidi_stream_id));
}
}
EXPECT_EQ(5u, session_.num_incoming_streams_created());
EXPECT_EQ(GetNthClientInitiatedBidirectionalId(i - 1),
QuicSessionPeer::GetLargestPeerCreatedStreamId(&session_, false));
EXPECT_TRUE(session_.GetActiveStream(GetNthClientInitiatedBidirectionalId(4))
->pending_duration()
.IsZero());
QuicStreamFrame data2(unidirectional_stream_id, false, 0,
absl::string_view("HT"));
session_.OnStreamFrame(data2);
EXPECT_TRUE(
QuicSessionPeer::GetPendingStream(&session_, unidirectional_stream_id));
helper_.GetClock()->AdvanceTime(QuicTime::Delta::FromMicroseconds(100));
QuicAlarm* alarm = QuicSessionPeer::GetStreamCountResetAlarm(&session_);
EXPECT_TRUE(alarm->IsSet());
alarm_factory_.FireAlarm(alarm);
EXPECT_EQ(10u, session_.num_incoming_streams_created());
EXPECT_NE(nullptr, session_.GetActiveStream(unidirectional_stream_id));
EXPECT_EQ(100, session_.GetActiveStream(unidirectional_stream_id)
->pending_duration()
.ToMicroseconds());
EXPECT_EQ(
100,
session_.GetActiveStream(GetNthClientInitiatedBidirectionalId(i - 2))
->pending_duration()
.ToMicroseconds());
EXPECT_EQ(nullptr, session_.GetActiveStream(
GetNthClientInitiatedBidirectionalId(i - 1)));
}
class QuicSessionTestClientUnconfigured : public QuicSessionTestBase {
protected:
QuicSessionTestClientUnconfigured()
: QuicSessionTestBase(Perspective::IS_CLIENT,
false) {}
};
INSTANTIATE_TEST_SUITE_P(Tests, QuicSessionTestClientUnconfigured,
::testing::ValuesIn(AllSupportedVersions()),
::testing::PrintToStringParamName());
TEST_P(QuicSessionTestClientUnconfigured, StreamInitiallyBlockedThenUnblocked) {
if (!connection_->version().AllowsLowFlowControlLimits()) {
return;
}
QuicSessionPeer::SetMaxOpenOutgoingBidirectionalStreams(&session_, 10);
TestStream* stream2 = session_.CreateOutgoingBidirectionalStream();
EXPECT_TRUE(stream2->IsFlowControlBlocked());
EXPECT_TRUE(session_.IsConnectionFlowControlBlocked());
EXPECT_TRUE(session_.IsStreamFlowControlBlocked());
QuicConfigPeer::SetReceivedInitialMaxStreamDataBytesOutgoingBidirectional(
session_.config(), kMinimumFlowControlSendWindow);
QuicConfigPeer::SetReceivedInitialSessionFlowControlWindow(
session_.config(), kMinimumFlowControlSendWindow);
session_.OnConfigNegotiated();
EXPECT_FALSE(stream2->IsFlowControlBlocked());
EXPECT_FALSE(session_.IsConnectionFlowControlBlocked());
EXPECT_FALSE(session_.IsStreamFlowControlBlocked());
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_session.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_session_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
750c58e8-c353-449d-8065-8f1bc4edfbe2 | cpp | google/tensorstore | indirect_data_writer | tensorstore/kvstore/ocdbt/io/indirect_data_writer.cc | tensorstore/kvstore/ocdbt/io/indirect_data_writer_test.cc | #include "tensorstore/kvstore/ocdbt/io/indirect_data_writer.h"
#include <stddef.h>
#include <cassert>
#include <string>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/synchronization/mutex.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/internal/metrics/histogram.h"
#include "tensorstore/internal/metrics/metadata.h"
#include "tensorstore/internal/mutex.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/ocdbt/format/data_file_id.h"
#include "tensorstore/kvstore/ocdbt/format/indirect_data_reference.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_ocdbt {
namespace {
auto& indirect_data_writer_histogram =
internal_metrics::Histogram<internal_metrics::DefaultBucketer>::New(
"/tensorstore/kvstore/ocdbt/indirect_data_write_size",
internal_metrics::MetricMetadata(
"Histogram of OCDBT buffered write sizes.",
internal_metrics::Units::kBytes));
ABSL_CONST_INIT internal_log::VerboseFlag ocdbt_logging("ocdbt");
}
class IndirectDataWriter
: public internal::AtomicReferenceCount<IndirectDataWriter> {
public:
explicit IndirectDataWriter(kvstore::KvStore kvstore, std::string prefix,
size_t target_size)
: kvstore_(std::move(kvstore)),
prefix_(std::move(prefix)),
target_size_(target_size) {}
kvstore::KvStore kvstore_;
std::string prefix_;
size_t target_size_;
absl::Mutex mutex_;
size_t in_flight_ = 0;
bool flush_requested_ = false;
absl::Cord buffer_;
Promise<void> promise_;
DataFileId data_file_id_;
};
void intrusive_ptr_increment(IndirectDataWriter* p) {
intrusive_ptr_increment(
static_cast<internal::AtomicReferenceCount<IndirectDataWriter>*>(p));
}
void intrusive_ptr_decrement(IndirectDataWriter* p) {
intrusive_ptr_decrement(
static_cast<internal::AtomicReferenceCount<IndirectDataWriter>*>(p));
}
namespace {
void MaybeFlush(IndirectDataWriter& self, UniqueWriterLock<absl::Mutex> lock) {
bool buffer_at_target =
self.target_size_ > 0 && self.buffer_.size() >= self.target_size_;
ABSL_LOG_IF(INFO, ocdbt_logging)
<< "MaybeFlush: flush_requested=" << self.flush_requested_
<< ", in_flight=" << self.in_flight_
<< ", buffer_at_target=" << buffer_at_target;
if (buffer_at_target) {
} else if (!self.flush_requested_ || self.in_flight_ > 0) {
return;
}
self.in_flight_++;
self.flush_requested_ = false;
Promise<void> promise = std::exchange(self.promise_, {});
absl::Cord buffer = std::exchange(self.buffer_, {});
DataFileId data_file_id = self.data_file_id_;
lock.unlock();
indirect_data_writer_histogram.Observe(buffer.size());
ABSL_LOG_IF(INFO, ocdbt_logging)
<< "Flushing " << buffer.size() << " bytes to " << data_file_id;
auto write_future =
kvstore::Write(self.kvstore_, data_file_id.FullPath(), std::move(buffer));
write_future.Force();
write_future.ExecuteWhenReady(
[promise = std::move(promise), data_file_id = std::move(data_file_id),
self = internal::IntrusivePtr<IndirectDataWriter>(&self)](
ReadyFuture<TimestampedStorageGeneration> future) {
auto& r = future.result();
ABSL_LOG_IF(INFO, ocdbt_logging)
<< "Done flushing data to " << data_file_id << ": " << r.status();
if (!r.ok()) {
promise.SetResult(r.status());
} else if (StorageGeneration::IsUnknown(r->generation)) {
promise.SetResult(absl::UnavailableError("Non-unique file id"));
} else {
promise.SetResult(absl::OkStatus());
}
UniqueWriterLock lock{self->mutex_};
assert(self->in_flight_ > 0);
self->in_flight_--;
MaybeFlush(*self, std::move(lock));
});
}
}
Future<const void> Write(IndirectDataWriter& self, absl::Cord data,
IndirectDataReference& ref) {
ABSL_LOG_IF(INFO, ocdbt_logging)
<< "Write indirect data: size=" << data.size();
if (data.empty()) {
ref.file_id = DataFileId{};
ref.offset = 0;
ref.length = 0;
return absl::OkStatus();
}
UniqueWriterLock lock{self.mutex_};
Future<const void> future;
if (self.promise_.null() || (future = self.promise_.future()).null()) {
self.data_file_id_ = GenerateDataFileId(self.prefix_);
auto p = PromiseFuturePair<void>::Make();
self.promise_ = std::move(p.promise);
future = std::move(p.future);
self.promise_.ExecuteWhenForced(
[self = internal::IntrusivePtr<IndirectDataWriter>(&self)](
Promise<void> promise) {
ABSL_LOG_IF(INFO, ocdbt_logging) << "Force called";
UniqueWriterLock lock{self->mutex_};
if (!HaveSameSharedState(promise, self->promise_)) return;
self->flush_requested_ = true;
MaybeFlush(*self, std::move(lock));
});
}
ref.file_id = self.data_file_id_;
ref.offset = self.buffer_.size();
ref.length = data.size();
self.buffer_.Append(std::move(data));
if (self.target_size_ > 0 && self.buffer_.size() >= self.target_size_) {
MaybeFlush(self, std::move(lock));
}
return future;
}
IndirectDataWriterPtr MakeIndirectDataWriter(kvstore::KvStore kvstore,
std::string prefix,
size_t target_size) {
return internal::MakeIntrusivePtr<IndirectDataWriter>(
std::move(kvstore), std::move(prefix), target_size);
}
}
} | #include "tensorstore/kvstore/ocdbt/io/indirect_data_writer.h"
#include <algorithm>
#include <cstring>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/cord.h"
#include "tensorstore/internal/flat_cord_builder.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/memory/memory_key_value_store.h"
#include "tensorstore/kvstore/mock_kvstore.h"
#include "tensorstore/kvstore/ocdbt/format/indirect_data_reference.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/status_testutil.h"
using ::tensorstore::Future;
using ::tensorstore::internal::FlatCordBuilder;
using ::tensorstore::internal::MockKeyValueStore;
using ::tensorstore::internal_ocdbt::IndirectDataReference;
using ::tensorstore::internal_ocdbt::MakeIndirectDataWriter;
using ::tensorstore::internal_ocdbt::Write;
namespace {
absl::Cord GetCord(size_t size) {
FlatCordBuilder cord_builder(size);
memset(cord_builder.data(), 0x37, cord_builder.size());
return std::move(cord_builder).Build();
}
template <typename T>
std::vector<std::string> ListEntriesToFiles(T& entries) {
std::vector<std::string> files;
files.reserve(entries.size());
for (auto& e : entries) {
files.push_back(std::move(e.key));
}
std::sort(files.begin(), files.end());
return files;
}
TEST(IndirectDataWriter, UnlimitedSize) {
auto data = GetCord(260);
auto memory_store = tensorstore::GetMemoryKeyValueStore();
auto mock_key_value_store = MockKeyValueStore::Make();
auto writer = MakeIndirectDataWriter(
tensorstore::kvstore::KvStore(mock_key_value_store), "d/", 0);
std::vector<Future<const void>> futures;
std::vector<std::string> refs;
for (int i = 0; i < 1000; ++i) {
IndirectDataReference ref;
auto f = Write(*writer, data, ref);
if (refs.empty() || refs.back() != ref.file_id.FullPath()) {
refs.push_back(ref.file_id.FullPath());
}
f.Force();
futures.push_back(std::move(f));
}
std::sort(refs.begin(), refs.end());
EXPECT_THAT(refs, ::testing::SizeIs(::testing::Eq(2)));
while (!mock_key_value_store->write_requests.empty()) {
EXPECT_THAT(mock_key_value_store->write_requests.size(), ::testing::Eq(1));
auto r = mock_key_value_store->write_requests.pop();
r(memory_store);
}
for (auto& f : futures) {
TENSORSTORE_ASSERT_OK(f.status());
}
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto entries,
tensorstore::kvstore::ListFuture(memory_store.get()).result());
auto files = ListEntriesToFiles(entries);
EXPECT_THAT(files, ::testing::SizeIs(2));
EXPECT_THAT(files, ::testing::ElementsAreArray(refs));
}
TEST(IndirectDataWriter, LimitedSize) {
constexpr size_t kTargetSize = 1024;
auto data = GetCord(260);
auto memory_store = tensorstore::GetMemoryKeyValueStore();
auto mock_key_value_store = MockKeyValueStore::Make();
auto writer = MakeIndirectDataWriter(
tensorstore::kvstore::KvStore(mock_key_value_store), "d/", kTargetSize);
std::vector<Future<const void>> futures;
std::vector<std::string> refs;
for (int i = 0; i < 1000; ++i) {
IndirectDataReference ref;
auto f = Write(*writer, data, ref);
EXPECT_THAT(ref.offset, testing::Le(kTargetSize));
if (refs.empty() || refs.back() != ref.file_id.FullPath()) {
refs.push_back(ref.file_id.FullPath());
}
f.Force();
futures.push_back(std::move(f));
}
std::sort(refs.begin(), refs.end());
EXPECT_THAT(refs, ::testing::SizeIs(::testing::Ge(250)));
EXPECT_THAT(mock_key_value_store->write_requests.size(), ::testing::Gt(1));
while (!mock_key_value_store->write_requests.empty()) {
auto r = mock_key_value_store->write_requests.pop();
r(memory_store);
}
for (auto& f : futures) {
TENSORSTORE_ASSERT_OK(f.status());
}
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto entries,
tensorstore::kvstore::ListFuture(memory_store.get()).result());
auto files = ListEntriesToFiles(entries);
EXPECT_THAT(files, ::testing::SizeIs(refs.size()));
EXPECT_THAT(files, ::testing::ElementsAreArray(refs));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/ocdbt/io/indirect_data_writer.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/ocdbt/io/indirect_data_writer_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
aad1395a-30ae-4ff6-8d6a-8503c99579ca | cpp | abseil/abseil-cpp | discrete_distribution | absl/random/discrete_distribution.cc | absl/random/discrete_distribution_test.cc | #include "absl/random/discrete_distribution.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace random_internal {
std::vector<std::pair<double, size_t>> InitDiscreteDistribution(
std::vector<double>* probabilities) {
assert(probabilities);
assert(!probabilities->empty());
double sum = std::accumulate(std::begin(*probabilities),
std::end(*probabilities), 0.0);
if (std::fabs(sum - 1.0) > 1e-6) {
for (double& item : *probabilities) {
item = item / sum;
}
}
const size_t n = probabilities->size();
std::vector<std::pair<double, size_t>> q;
q.reserve(n);
std::vector<size_t> over;
std::vector<size_t> under;
size_t idx = 0;
for (const double item : *probabilities) {
assert(item >= 0);
const double v = item * n;
q.emplace_back(v, 0);
if (v < 1.0) {
under.push_back(idx++);
} else {
over.push_back(idx++);
}
}
while (!over.empty() && !under.empty()) {
auto lo = under.back();
under.pop_back();
auto hi = over.back();
over.pop_back();
q[lo].second = hi;
const double r = q[hi].first - (1.0 - q[lo].first);
q[hi].first = r;
if (r < 1.0) {
under.push_back(hi);
} else {
over.push_back(hi);
}
}
for (auto i : over) {
q[i] = {1.0, i};
}
for (auto i : under) {
q[i] = {1.0, i};
}
return q;
}
}
ABSL_NAMESPACE_END
} | #include "absl/random/discrete_distribution.h"
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <numeric>
#include <random>
#include <sstream>
#include <string>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/log/log.h"
#include "absl/random/internal/chi_square.h"
#include "absl/random/internal/distribution_test_util.h"
#include "absl/random/internal/pcg_engine.h"
#include "absl/random/internal/sequence_urbg.h"
#include "absl/random/random.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/strip.h"
namespace {
template <typename IntType>
class DiscreteDistributionTypeTest : public ::testing::Test {};
using IntTypes = ::testing::Types<int8_t, uint8_t, int16_t, uint16_t, int32_t,
uint32_t, int64_t, uint64_t>;
TYPED_TEST_SUITE(DiscreteDistributionTypeTest, IntTypes);
TYPED_TEST(DiscreteDistributionTypeTest, ParamSerializeTest) {
using param_type =
typename absl::discrete_distribution<TypeParam>::param_type;
absl::discrete_distribution<TypeParam> empty;
EXPECT_THAT(empty.probabilities(), testing::ElementsAre(1.0));
absl::discrete_distribution<TypeParam> before({1.0, 2.0, 1.0});
double s = 0;
for (const auto& x : before.probabilities()) {
s += x;
}
EXPECT_EQ(s, 1.0);
EXPECT_THAT(before.probabilities(), testing::ElementsAre(0.25, 0.5, 0.25));
{
std::vector<double> data({1.0, 2.0, 1.0});
absl::discrete_distribution<TypeParam> via_param{
param_type(std::begin(data), std::end(data))};
EXPECT_EQ(via_param, before);
}
std::stringstream ss;
ss << before;
absl::discrete_distribution<TypeParam> after;
EXPECT_NE(before, after);
ss >> after;
EXPECT_EQ(before, after);
}
TYPED_TEST(DiscreteDistributionTypeTest, Constructor) {
auto fn = [](double x) { return x; };
{
absl::discrete_distribution<int> unary(0, 1.0, 9.0, fn);
EXPECT_THAT(unary.probabilities(), testing::ElementsAre(1.0));
}
{
absl::discrete_distribution<int> unary(2, 1.0, 9.0, fn);
EXPECT_THAT(unary.probabilities(), testing::ElementsAre(0.3, 0.7));
}
}
TEST(DiscreteDistributionTest, InitDiscreteDistribution) {
using testing::_;
using testing::Pair;
{
std::vector<double> p({1.0, 2.0, 3.0});
std::vector<std::pair<double, size_t>> q =
absl::random_internal::InitDiscreteDistribution(&p);
EXPECT_THAT(p, testing::ElementsAre(1 / 6.0, 2 / 6.0, 3 / 6.0));
EXPECT_THAT(q, testing::ElementsAre(Pair(0.5, 2),
Pair(1.0, _),
Pair(1.0, _)));
}
{
std::vector<double> p({1.0, 2.0, 3.0, 5.0, 2.0});
std::vector<std::pair<double, size_t>> q =
absl::random_internal::InitDiscreteDistribution(&p);
EXPECT_THAT(p, testing::ElementsAre(1 / 13.0, 2 / 13.0, 3 / 13.0, 5 / 13.0,
2 / 13.0));
constexpr double b0 = 1.0 / 13.0 / 0.2;
constexpr double b1 = 2.0 / 13.0 / 0.2;
constexpr double b3 = (5.0 / 13.0 / 0.2) - ((1 - b0) + (1 - b1) + (1 - b1));
EXPECT_THAT(q, testing::ElementsAre(Pair(b0, 3),
Pair(b1, 3),
Pair(1.0, _),
Pair(b3, 2),
Pair(b1, 3)));
}
}
TEST(DiscreteDistributionTest, ChiSquaredTest50) {
using absl::random_internal::kChiSquared;
constexpr size_t kTrials = 10000;
constexpr int kBuckets = 50;
const int kThreshold =
absl::random_internal::ChiSquareValue(kBuckets, 0.99999);
std::vector<double> weights(kBuckets, 0);
std::iota(std::begin(weights), std::end(weights), 1);
absl::discrete_distribution<int> dist(std::begin(weights), std::end(weights));
absl::random_internal::pcg64_2018_engine rng(0x2B7E151628AED2A6);
std::vector<int32_t> counts(kBuckets, 0);
for (size_t i = 0; i < kTrials; i++) {
auto x = dist(rng);
counts[x]++;
}
double sum = 0;
for (double x : weights) {
sum += x;
}
for (double& x : weights) {
x = kTrials * (x / sum);
}
double chi_square =
absl::random_internal::ChiSquare(std::begin(counts), std::end(counts),
std::begin(weights), std::end(weights));
if (chi_square > kThreshold) {
double p_value =
absl::random_internal::ChiSquarePValue(chi_square, kBuckets);
std::string msg;
for (size_t i = 0; i < counts.size(); i++) {
absl::StrAppend(&msg, i, ": ", counts[i], " vs ", weights[i], "\n");
}
absl::StrAppend(&msg, kChiSquared, " p-value ", p_value, "\n");
absl::StrAppend(&msg, "High ", kChiSquared, " value: ", chi_square, " > ",
kThreshold);
LOG(INFO) << msg;
FAIL() << msg;
}
}
TEST(DiscreteDistributionTest, StabilityTest) {
absl::random_internal::sequence_urbg urbg(
{0x0003eb76f6f7f755ull, 0xFFCEA50FDB2F953Bull, 0xC332DDEFBE6C5AA5ull,
0x6558218568AB9702ull, 0x2AEF7DAD5B6E2F84ull, 0x1521B62829076170ull,
0xECDD4775619F1510ull, 0x13CCA830EB61BD96ull, 0x0334FE1EAA0363CFull,
0xB5735C904C70A239ull, 0xD59E9E0BCBAADE14ull, 0xEECC86BC60622CA7ull});
std::vector<int> output(6);
{
absl::discrete_distribution<int32_t> dist({1.0, 2.0, 3.0, 5.0, 2.0});
EXPECT_EQ(0, dist.min());
EXPECT_EQ(4, dist.max());
for (auto& v : output) {
v = dist(urbg);
}
EXPECT_EQ(12, urbg.invocations());
}
EXPECT_THAT(output, testing::ElementsAre(3, 3, 1, 3, 3, 3));
{
urbg.reset();
absl::discrete_distribution<int64_t> dist({1.0, 2.0, 3.0, 5.0, 2.0});
EXPECT_EQ(0, dist.min());
EXPECT_EQ(4, dist.max());
for (auto& v : output) {
v = dist(urbg);
}
EXPECT_EQ(12, urbg.invocations());
}
EXPECT_THAT(output, testing::ElementsAre(3, 3, 0, 3, 0, 4));
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/discrete_distribution.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/discrete_distribution_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
831874c0-a4e8-420e-a31a-f79ad8866b8a | cpp | tensorflow/tensorflow | stringprintf | third_party/xla/third_party/tsl/tsl/platform/stringprintf.cc | third_party/xla/third_party/tsl/tsl/platform/stringprintf_test.cc | #include "tsl/platform/stringprintf.h"
#include <errno.h>
#include <stdarg.h>
#include <stdio.h>
namespace tsl {
namespace strings {
void Appendv(string* dst, const char* format, va_list ap) {
static const int kSpaceLength = 1024;
char space[kSpaceLength];
va_list backup_ap;
va_copy(backup_ap, ap);
int result = vsnprintf(space, kSpaceLength, format, backup_ap);
va_end(backup_ap);
if (result < kSpaceLength) {
if (result >= 0) {
dst->append(space, result);
return;
}
#ifdef _MSC_VER
va_copy(backup_ap, ap);
result = vsnprintf(nullptr, 0, format, backup_ap);
va_end(backup_ap);
#endif
if (result < 0) {
return;
}
}
int length = result + 1;
char* buf = new char[length];
va_copy(backup_ap, ap);
result = vsnprintf(buf, length, format, backup_ap);
va_end(backup_ap);
if (result >= 0 && result < length) {
dst->append(buf, result);
}
delete[] buf;
}
string Printf(const char* format, ...) {
va_list ap;
va_start(ap, format);
string result;
Appendv(&result, format, ap);
va_end(ap);
return result;
}
void Appendf(string* dst, const char* format, ...) {
va_list ap;
va_start(ap, format);
Appendv(dst, format, ap);
va_end(ap);
}
}
} | #include "tsl/platform/stringprintf.h"
#include <string>
#include "tsl/platform/test.h"
namespace tsl {
namespace strings {
namespace {
TEST(PrintfTest, Empty) {
EXPECT_EQ("", Printf("%s", string().c_str()));
EXPECT_EQ("", Printf("%s", ""));
}
TEST(PrintfTest, Misc) {
#if !defined(_MSC_VER)
EXPECT_EQ("123hello w", Printf("%3$d%2$s %1$c", 'w', "hello", 123));
#endif
}
TEST(AppendfTest, Empty) {
string value("Hello");
const char* empty = "";
Appendf(&value, "%s", empty);
EXPECT_EQ("Hello", value);
}
TEST(AppendfTest, EmptyString) {
string value("Hello");
Appendf(&value, "%s", "");
EXPECT_EQ("Hello", value);
}
TEST(AppendfTest, String) {
string value("Hello");
Appendf(&value, " %s", "World");
EXPECT_EQ("Hello World", value);
}
TEST(AppendfTest, Int) {
string value("Hello");
Appendf(&value, " %d", 123);
EXPECT_EQ("Hello 123", value);
}
TEST(PrintfTest, Multibyte) {
char* old_locale = setlocale(LC_CTYPE, nullptr);
setlocale(LC_CTYPE, "en_US.utf8");
const char kInvalidCodePoint[] = "\375\067s";
string value = Printf("%.*s", 3, kInvalidCodePoint);
EXPECT_TRUE(value.empty() || value == kInvalidCodePoint);
int n = 2048;
char* buf = new char[n + 1];
memset(buf, ' ', n - 3);
memcpy(buf + n - 3, kInvalidCodePoint, 4);
value = Printf("%.*s", n, buf);
EXPECT_TRUE(value.empty() || value == buf);
delete[] buf;
setlocale(LC_CTYPE, old_locale);
}
TEST(PrintfTest, NoMultibyte) {
char* old_locale = setlocale(LC_CTYPE, nullptr);
setlocale(LC_CTYPE, "POSIX");
string value = Printf("%.*s", 3, "\375\067s");
setlocale(LC_CTYPE, old_locale);
EXPECT_EQ("\375\067s", value);
}
TEST(PrintfTest, DontOverwriteErrno) {
errno = ECHILD;
string value = Printf("Hello, %s!", "World");
EXPECT_EQ(ECHILD, errno);
}
TEST(PrintfTest, LargeBuf) {
int n = 2048;
char* buf = new char[n + 1];
memset(buf, ' ', n);
buf[n] = 0;
string value = Printf("%s", buf);
EXPECT_EQ(buf, value);
delete[] buf;
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/stringprintf.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/stringprintf_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b0a8a1a8-d85a-4b67-a2ca-d22ba5e3021d | cpp | tensorflow/tensorflow | representative_dataset | tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/representative_dataset.cc | tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/representative_dataset_test.cc | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/representative_dataset.h"
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
namespace stablehlo::quantization {
using ::tensorflow::quantization::RepresentativeDatasetFile;
absl::StatusOr<absl::flat_hash_map<std::string, RepresentativeDatasetFile>>
CreateRepresentativeDatasetFileMap(absl::Span<const RepresentativeDatasetConfig>
representative_dataset_configs) {
absl::flat_hash_map<std::string, RepresentativeDatasetFile>
repr_dataset_file_map{};
for (const RepresentativeDatasetConfig& dataset_config :
representative_dataset_configs) {
RepresentativeDatasetFile repr_dataset_file;
repr_dataset_file.set_tfrecord_file_path(dataset_config.tf_record().path());
const std::string signature_key = dataset_config.has_signature_key()
? dataset_config.signature_key()
: "serving_default";
if (repr_dataset_file_map.contains(signature_key)) {
return absl::InvalidArgumentError(
absl::StrCat("RepresentativeDatasetConfig should not contain "
"duplicate signature key: ",
signature_key));
}
repr_dataset_file_map[signature_key] = std::move(repr_dataset_file);
}
return repr_dataset_file_map;
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/representative_dataset.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tsl/platform/status_matchers.h"
namespace mlir::quant::stablehlo {
namespace {
using ::stablehlo::quantization::RepresentativeDatasetConfig;
using ::tensorflow::quantization::RepresentativeDatasetFile;
using ::testing::Contains;
using ::testing::HasSubstr;
using ::testing::Key;
using ::testing::SizeIs;
using ::testing::StrEq;
using ::tsl::testing::IsOk;
using ::tsl::testing::StatusIs;
TEST(CreateRepresentativeDatasetFileMapTest,
ConfigWithoutExplicitSignatureKeyMappedToServingDefault) {
std::vector<RepresentativeDatasetConfig> representative_dataset_configs;
RepresentativeDatasetConfig config{};
*(config.mutable_tf_record()->mutable_path()) = "test_path";
representative_dataset_configs.push_back(config);
const absl::StatusOr<
absl::flat_hash_map<std::string, RepresentativeDatasetFile>>
representative_dataset_file_map =
CreateRepresentativeDatasetFileMap(representative_dataset_configs);
ASSERT_THAT(representative_dataset_file_map, IsOk());
ASSERT_THAT(*representative_dataset_file_map, SizeIs(1));
EXPECT_THAT(*representative_dataset_file_map,
Contains(Key("serving_default")));
EXPECT_THAT(representative_dataset_file_map->at("serving_default")
.tfrecord_file_path(),
StrEq("test_path"));
}
TEST(CreateRepresentativeDatasetFileMapTest, ConfigWithExplicitSignatureKey) {
std::vector<RepresentativeDatasetConfig> representative_dataset_configs;
RepresentativeDatasetConfig config{};
config.set_signature_key("test_signature_key");
*(config.mutable_tf_record()->mutable_path()) = "test_path";
representative_dataset_configs.push_back(config);
const absl::StatusOr<
absl::flat_hash_map<std::string, RepresentativeDatasetFile>>
representative_dataset_file_map =
CreateRepresentativeDatasetFileMap(representative_dataset_configs);
ASSERT_THAT(representative_dataset_file_map, IsOk());
ASSERT_THAT(*representative_dataset_file_map, SizeIs(1));
EXPECT_THAT(*representative_dataset_file_map,
Contains(Key(StrEq("test_signature_key"))));
EXPECT_THAT(representative_dataset_file_map->at("test_signature_key")
.tfrecord_file_path(),
StrEq("test_path"));
}
TEST(CreateRepresentativeDatasetFileMapTest,
ConfigWithDuplicateSignatureKeyReturnsInvalidArgumentError) {
std::vector<RepresentativeDatasetConfig> representative_dataset_configs;
RepresentativeDatasetConfig config_1{};
config_1.set_signature_key("serving_default");
*(config_1.mutable_tf_record()->mutable_path()) = "test_path_1";
representative_dataset_configs.push_back(config_1);
RepresentativeDatasetConfig config_2{};
*(config_2.mutable_tf_record()->mutable_path()) = "test_path_2";
representative_dataset_configs.push_back(config_2);
const absl::StatusOr<
absl::flat_hash_map<std::string, RepresentativeDatasetFile>>
representative_dataset_file_map =
CreateRepresentativeDatasetFileMap(representative_dataset_configs);
EXPECT_THAT(representative_dataset_file_map,
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("duplicate signature key: serving_default")));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/representative_dataset.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/representative_dataset_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
13107c29-57b7-48ab-a8cf-a9ea30beefd5 | cpp | google/cel-cpp | unknown_type | common/types/unknown_type.h | common/types/unknown_type_test.cc | #ifndef THIRD_PARTY_CEL_CPP_COMMON_TYPES_UNKNOWN_TYPE_H_
#define THIRD_PARTY_CEL_CPP_COMMON_TYPES_UNKNOWN_TYPE_H_
#include <ostream>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "common/type_kind.h"
namespace cel {
class Type;
class TypeParameters;
class UnknownType final {
public:
static constexpr TypeKind kKind = TypeKind::kUnknown;
static constexpr absl::string_view kName = "*unknown*";
UnknownType() = default;
UnknownType(const UnknownType&) = default;
UnknownType(UnknownType&&) = default;
UnknownType& operator=(const UnknownType&) = default;
UnknownType& operator=(UnknownType&&) = default;
static TypeKind kind() { return kKind; }
static absl::string_view name() { return kName; }
static TypeParameters GetParameters();
static std::string DebugString() { return std::string(name()); }
constexpr void swap(UnknownType&) noexcept {}
};
inline constexpr void swap(UnknownType& lhs, UnknownType& rhs) noexcept {
lhs.swap(rhs);
}
inline constexpr bool operator==(UnknownType, UnknownType) { return true; }
inline constexpr bool operator!=(UnknownType lhs, UnknownType rhs) {
return !operator==(lhs, rhs);
}
template <typename H>
H AbslHashValue(H state, UnknownType) {
return std::move(state);
}
inline std::ostream& operator<<(std::ostream& out, const UnknownType& type) {
return out << type.DebugString();
}
}
#endif | #include <sstream>
#include "absl/hash/hash.h"
#include "common/type.h"
#include "internal/testing.h"
namespace cel {
namespace {
TEST(UnknownType, Kind) {
EXPECT_EQ(UnknownType().kind(), UnknownType::kKind);
EXPECT_EQ(Type(UnknownType()).kind(), UnknownType::kKind);
}
TEST(UnknownType, Name) {
EXPECT_EQ(UnknownType().name(), UnknownType::kName);
EXPECT_EQ(Type(UnknownType()).name(), UnknownType::kName);
}
TEST(UnknownType, DebugString) {
{
std::ostringstream out;
out << UnknownType();
EXPECT_EQ(out.str(), UnknownType::kName);
}
{
std::ostringstream out;
out << Type(UnknownType());
EXPECT_EQ(out.str(), UnknownType::kName);
}
}
TEST(UnknownType, Hash) {
EXPECT_EQ(absl::HashOf(UnknownType()), absl::HashOf(UnknownType()));
}
TEST(UnknownType, Equal) {
EXPECT_EQ(UnknownType(), UnknownType());
EXPECT_EQ(Type(UnknownType()), UnknownType());
EXPECT_EQ(UnknownType(), Type(UnknownType()));
EXPECT_EQ(Type(UnknownType()), Type(UnknownType()));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/unknown_type.h | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/unknown_type_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
6e3458b4-6bda-4c3d-b527-832bd9c9dbfd | cpp | tensorflow/tensorflow | spmd_prepare | third_party/xla/xla/service/spmd/spmd_prepare.cc | third_party/xla/xla/service/spmd/spmd_prepare_test.cc | #include "xla/service/spmd/spmd_prepare.h"
#include <memory>
#include <optional>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_sharding_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/pattern_matcher.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace spmd {
namespace {
absl::StatusOr<bool> ProcessScatter(HloInstruction* hlo,
const CallGraph& call_graph) {
if (hlo->opcode() != HloOpcode::kScatter) {
return false;
}
HloScatterInstruction* scatter = Cast<HloScatterInstruction>(hlo);
HloComputation* computation = hlo->parent();
if (scatter->scatter_operand_count() > 1) {
return false;
}
HloInstruction* operand = scatter->scatter_operands()[0];
HloInstruction* indices = scatter->scatter_indices();
HloInstruction* updates = scatter->scatter_updates()[0];
if (operand->opcode() != HloOpcode::kAdd ||
indices->opcode() != HloOpcode::kConcatenate ||
indices->operand_count() != 2 ||
updates->opcode() != HloOpcode::kConcatenate ||
updates->operand_count() != 2 ||
!Match(scatter->to_apply()->root_instruction(),
match::AddAnyOrder(match::Parameter(0), match::Parameter(1)))) {
return false;
}
const auto& dnums = scatter->scatter_dimension_numbers();
auto get_parallel_dims_for_scatter = [&dnums, &call_graph](
const HloInstruction* operand,
const HloInstruction* indices,
const HloInstruction* updates) {
std::vector<int64_t> slice_sizes = hlo_sharding_util::GetScatterSliceSize(
operand->shape(), updates->shape(), dnums);
int64_t index_vector_dim = dnums.index_vector_dim();
const auto& index_map = dnums.scatter_dims_to_operand_dims();
return hlo_sharding_util::GetGatherScatterBatchParallelDims(
operand, indices, slice_sizes, index_vector_dim, index_map, call_graph);
};
if (get_parallel_dims_for_scatter(operand, indices, updates).has_value()) {
return false;
}
HloInstruction* lhs_indices = indices->mutable_operand(0);
HloInstruction* rhs_indices = indices->mutable_operand(1);
HloInstruction* lhs_updates = updates->mutable_operand(0);
HloInstruction* rhs_updates = updates->mutable_operand(1);
std::optional<hlo_sharding_util::GatherScatterParallelDims> lhs_parallel_dims;
std::optional<hlo_sharding_util::GatherScatterParallelDims> rhs_parallel_dims;
lhs_parallel_dims =
get_parallel_dims_for_scatter(operand, lhs_indices, lhs_updates);
if (!lhs_parallel_dims.has_value()) {
return false;
}
rhs_parallel_dims =
get_parallel_dims_for_scatter(operand, rhs_indices, rhs_updates);
if (!rhs_parallel_dims.has_value()) {
return false;
}
if (lhs_parallel_dims->operand_parallel_dims !=
rhs_parallel_dims->operand_parallel_dims ||
lhs_parallel_dims->indices_parallel_dims !=
rhs_parallel_dims->indices_parallel_dims) {
return false;
}
if (lhs_parallel_dims->operand_parallel_dims.size() !=
lhs_parallel_dims->indices_parallel_dims.size()) {
return false;
}
HloInstruction* lhs_operand = operand->mutable_operand(0);
HloInstruction* rhs_operand = operand->mutable_operand(1);
bool any_sharded_parallel_dim = false;
if (!lhs_operand->has_sharding() || !rhs_operand->has_sharding() ||
!lhs_indices->has_sharding() || !rhs_indices->has_sharding()) {
return false;
}
for (int i = 0; i < lhs_parallel_dims->operand_parallel_dims.size(); ++i) {
if (lhs_operand->sharding().IsTiled() &&
lhs_operand->sharding().tile_assignment().dim(
lhs_parallel_dims->operand_parallel_dims[i]) != 1 &&
lhs_indices->sharding().tile_assignment().dim(
lhs_parallel_dims->indices_parallel_dims[i]) != 1) {
any_sharded_parallel_dim = true;
break;
}
}
if (!any_sharded_parallel_dim) {
return false;
}
HloInstruction* scatter0 =
computation->AddInstruction(HloInstruction::CreateScatter(
scatter->shape(), operand, lhs_indices, lhs_updates,
scatter->to_apply(), dnums, false, false));
scatter0->set_metadata(scatter->metadata());
scatter0->set_sharding(scatter->sharding());
HloInstruction* scatter1 =
computation->AddInstruction(HloInstruction::CreateScatter(
scatter->shape(), scatter0, rhs_indices, rhs_updates,
scatter->to_apply(), dnums, false, false));
scatter1->set_metadata(scatter->metadata());
scatter1->set_sharding(scatter->sharding());
TF_RETURN_IF_ERROR(scatter->ReplaceAllUsesWith(scatter1));
return true;
}
absl::StatusOr<bool> RunOnComputation(HloComputation* computation,
const CallGraph& call_graph) {
bool changed = false;
for (HloInstruction* hlo : computation->MakeInstructionPostOrder()) {
if (!hlo->has_sharding()) {
continue;
}
TF_ASSIGN_OR_RETURN(bool scatter_changed, ProcessScatter(hlo, call_graph));
if (scatter_changed) {
changed = true;
continue;
}
}
return changed;
}
}
absl::StatusOr<bool> SpmdPrepare::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
for (auto comp : module->computations(execution_threads)) {
TF_ASSIGN_OR_RETURN(bool comp_changed, RunOnComputation(comp, *call_graph));
changed |= comp_changed;
}
return changed;
}
}
} | #include "xla/service/spmd/spmd_prepare.h"
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace spmd {
namespace {
namespace op = xla::testing::opcode_matchers;
class SpmdPrepareTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> RunPass(
absl::string_view hlo_module, int64_t distance_threshold = 100) {
TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(
hlo_module, GetModuleConfigForTest()));
HloPassPipeline pipeline("spmd-prepare");
pipeline.AddPass<SpmdPrepare>();
TF_RETURN_IF_ERROR(pipeline.Run(module.get()).status());
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
};
TEST_F(SpmdPrepareTest, ScatterParallelIndexSplit) {
absl::string_view hlo_string = R"(
HloModule module
region_157.5067 {
Arg_0.5068 = f32[] parameter(0)
Arg_1.5069 = f32[] parameter(1)
ROOT add.5070 = f32[] add(Arg_0.5068, Arg_1.5069)
}
ENTRY entry {
p0 = f32[16,1000,2000]{2,1,0} parameter(0), sharding={devices=[4,2,1]<=[8]}
p1 = f32[16,1000,2000]{2,1,0} parameter(1), sharding={devices=[4,2,1]<=[8]}
p2 = s32[16,1000,64,1]{3,2,1,0} parameter(2), sharding={devices=[4,2,1,1]<=[8]}
p3 = f32[16,1000,64]{2,1,0} parameter(3), sharding={devices=[4,2,1]<=[8]}
p4 = f32[16,1000,64]{2,1,0} parameter(4), sharding={devices=[4,2,1]<=[8]}
iota.0 = s32[16,1000,64,1]{3,2,1,0} iota(), iota_dimension=0, sharding={devices=[4,2,1,1]<=[8]}
iota.1 = s32[16,1000,64,1]{3,2,1,0} iota(), iota_dimension=1, sharding={devices=[4,2,1,1]<=[8]}
iota.2 = s32[16,1000,64,1]{3,2,1,0} iota(), iota_dimension=0, sharding={devices=[4,2,1,1]<=[8]}
iota.3 = s32[16,1000,64,1]{3,2,1,0} iota(), iota_dimension=1, sharding={devices=[4,2,1,1]<=[8]}
concatenate.0 = s32[16,1000,64,3]{3,2,1,0} concatenate(iota.0, iota.1, p2), dimensions={3}, sharding={devices=[4,2,1,1]<=[8]}
concatenate.1 = s32[16,1000,64,3]{3,2,1,0} concatenate(iota.2, iota.3, p2), dimensions={3}, sharding={devices=[4,2,1,1]<=[8]}
concatenate.130 = s32[32,1000,64,3]{3,2,1,0} concatenate(concatenate.0, concatenate.1), dimensions={0}, sharding={devices=[4,2,1,1]<=[8]}
concatenate.131 = f32[32,1000,64]{2,1,0} concatenate(p3, p4), dimensions={0}, sharding={devices=[4,2,1]<=[8]}
add.190 = f32[16,1000,2000]{2,1,0} add(p0, p1), sharding={devices=[4,2,1]<=[8]}
ROOT scatter.2 = f32[16,1000,2000]{2,1,0} scatter(add.190, concatenate.130, concatenate.131), update_window_dims={}, inserted_window_dims={0,1,2}, scatter_dims_to_operand_dims={0,1,2}, index_vector_dim=3, to_apply=region_157.5067, sharding={devices=[4,2,1]<=[8]}
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
HloInstruction* root = module->entry_computation()->root_instruction();
XLA_VLOG_LINES(1, module->ToString());
EXPECT_THAT(
root,
op::Scatter(
op::Scatter(op::Add(),
op::Concatenate(op::Iota(), op::Iota(), op::Parameter()),
op::Parameter()),
op::Concatenate(op::Iota(), op::Iota(), op::Parameter()),
op::Parameter()));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/spmd_prepare.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/spmd_prepare_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9cf5631b-5f5c-40c4-9b7c-57d2040f9a38 | cpp | google/googletest | gmock-actions | googlemock/include/gmock/gmock-actions.h | googlemock/test/gmock-actions_test.cc | #ifndef GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_ACTIONS_H_
#define GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_ACTIONS_H_
#ifndef _WIN32_WCE
#include <errno.h>
#endif
#include <algorithm>
#include <exception>
#include <functional>
#include <memory>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include "gmock/internal/gmock-internal-utils.h"
#include "gmock/internal/gmock-port.h"
#include "gmock/internal/gmock-pp.h"
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4100)
namespace testing {
namespace internal {
template <typename T, bool kDefaultConstructible>
struct BuiltInDefaultValueGetter {
static T Get() { return T(); }
};
template <typename T>
struct BuiltInDefaultValueGetter<T, false> {
static T Get() {
Assert(false, __FILE__, __LINE__,
"Default action undefined for the function return type.");
#if defined(__GNUC__) || defined(__clang__)
__builtin_unreachable();
#elif defined(_MSC_VER)
__assume(0);
#else
return Invalid<T>();
#endif
}
};
template <typename T>
class BuiltInDefaultValue {
public:
static bool Exists() { return ::std::is_default_constructible<T>::value; }
static T Get() {
return BuiltInDefaultValueGetter<
T, ::std::is_default_constructible<T>::value>::Get();
}
};
template <typename T>
class BuiltInDefaultValue<const T> {
public:
static bool Exists() { return BuiltInDefaultValue<T>::Exists(); }
static T Get() { return BuiltInDefaultValue<T>::Get(); }
};
template <typename T>
class BuiltInDefaultValue<T*> {
public:
static bool Exists() { return true; }
static T* Get() { return nullptr; }
};
#define GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(type, value) \
template <> \
class BuiltInDefaultValue<type> { \
public: \
static bool Exists() { return true; } \
static type Get() { return value; } \
}
GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(void, );
GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(::std::string, "");
GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(bool, false);
GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned char, '\0');
GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed char, '\0');
GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(char, '\0');
#if GMOCK_WCHAR_T_IS_NATIVE_
GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(wchar_t, 0U);
#endif
GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned short, 0U);
GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed short, 0);
GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned int, 0U);
GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed int, 0);
GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned long, 0UL);
GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed long, 0L);
GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned long long, 0);
GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed long long, 0);
GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(float, 0);
GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(double, 0);
#undef GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_
template <typename P>
struct negation
: std::integral_constant<bool, bool(!P::value)> {};
template <typename...>
struct conjunction : std::true_type {};
template <typename P1>
struct conjunction<P1> : P1 {};
template <typename P1, typename... Ps>
struct conjunction<P1, Ps...>
: std::conditional<bool(P1::value), conjunction<Ps...>, P1>::type {};
template <typename...>
struct disjunction : std::false_type {};
template <typename P1>
struct disjunction<P1> : P1 {};
template <typename P1, typename... Ps>
struct disjunction<P1, Ps...>
: std::conditional<!bool(P1::value), disjunction<Ps...>, P1>::type {};
template <typename...>
using void_t = void;
template <typename From, typename To>
struct is_implicitly_convertible {
private:
template <typename T>
static void Accept(T);
template <typename T>
static T Make();
template <typename T, typename = decltype(Accept<To>(Make<T>()))>
static std::true_type TestImplicitConversion(int);
template <typename T>
static std::false_type TestImplicitConversion(...);
public:
using type = decltype(TestImplicitConversion<From>(0));
static constexpr bool value = type::value;
};
template <typename F, typename... Args>
using call_result_t = decltype(std::declval<F>()(std::declval<Args>()...));
template <typename Void, typename R, typename F, typename... Args>
struct is_callable_r_impl : std::false_type {};
template <typename R, typename F, typename... Args>
struct is_callable_r_impl<void_t<call_result_t<F, Args...>>, R, F, Args...>
: std::conditional<
std::is_void<R>::value,
std::true_type,
is_implicitly_convertible<call_result_t<F, Args...>, R>>::type {};
template <typename R, typename F, typename... Args>
using is_callable_r = is_callable_r_impl<void, R, F, Args...>;
template <typename T>
typename std::add_const<T>::type& as_const(T& t) {
return t;
}
}
template <typename F>
class OnceAction;
template <typename Result, typename... Args>
class OnceAction<Result(Args...)> final {
private:
template <typename Callable>
using IsDirectlyCompatible = internal::conjunction<
std::is_constructible<typename std::decay<Callable>::type, Callable>,
internal::is_callable_r<Result, typename std::decay<Callable>::type,
Args...>>;
template <typename Callable>
using IsCompatibleAfterIgnoringArguments = internal::conjunction<
std::is_constructible<typename std::decay<Callable>::type, Callable>,
internal::is_callable_r<Result, typename std::decay<Callable>::type>>;
public:
template <typename Callable,
typename std::enable_if<
internal::conjunction<
internal::negation<std::is_same<
OnceAction, typename std::decay<Callable>::type>>,
IsDirectlyCompatible<Callable>>
::value,
int>::type = 0>
OnceAction(Callable&& callable)
: function_(StdFunctionAdaptor<typename std::decay<Callable>::type>(
{}, std::forward<Callable>(callable))) {}
template <typename Callable,
typename std::enable_if<
internal::conjunction<
internal::negation<std::is_same<
OnceAction, typename std::decay<Callable>::type>>,
internal::negation<IsDirectlyCompatible<Callable>>,
IsCompatibleAfterIgnoringArguments<Callable>>::value,
int>::type = 0>
OnceAction(Callable&& callable)
: OnceAction(IgnoreIncomingArguments<typename std::decay<Callable>::type>{
std::forward<Callable>(callable)}) {}
OnceAction(const OnceAction&) = delete;
OnceAction& operator=(const OnceAction&) = delete;
OnceAction(OnceAction&&) = default;
Result Call(Args... args) && {
return function_(std::forward<Args>(args)...);
}
private:
template <typename Callable>
class StdFunctionAdaptor final {
public:
struct CallableTag final {};
template <typename F>
explicit StdFunctionAdaptor(CallableTag, F&& callable)
: callable_(std::make_shared<Callable>(std::forward<F>(callable))) {}
template <typename... ArgRefs>
internal::call_result_t<Callable, ArgRefs...> operator()(
ArgRefs&&... args) const {
return std::move(*callable_)(std::forward<ArgRefs>(args)...);
}
private:
std::shared_ptr<Callable> callable_;
};
template <typename Callable>
struct IgnoreIncomingArguments {
internal::call_result_t<Callable> operator()(Args&&...) {
return std::move(callable)();
}
Callable callable;
};
std::function<Result(Args...)> function_;
};
template <typename T>
class DefaultValue {
public:
static void Set(T x) {
delete producer_;
producer_ = new FixedValueProducer(x);
}
typedef T (*FactoryFunction)();
static void SetFactory(FactoryFunction factory) {
delete producer_;
producer_ = new FactoryValueProducer(factory);
}
static void Clear() {
delete producer_;
producer_ = nullptr;
}
static bool IsSet() { return producer_ != nullptr; }
static bool Exists() {
return IsSet() || internal::BuiltInDefaultValue<T>::Exists();
}
static T Get() {
return producer_ == nullptr ? internal::BuiltInDefaultValue<T>::Get()
: producer_->Produce();
}
private:
class ValueProducer {
public:
virtual ~ValueProducer() = default;
virtual T Produce() = 0;
};
class FixedValueProducer : public ValueProducer {
public:
explicit FixedValueProducer(T value) : value_(value) {}
T Produce() override { return value_; }
private:
const T value_;
FixedValueProducer(const FixedValueProducer&) = delete;
FixedValueProducer& operator=(const FixedValueProducer&) = delete;
};
class FactoryValueProducer : public ValueProducer {
public:
explicit FactoryValueProducer(FactoryFunction factory)
: factory_(factory) {}
T Produce() override { return factory_(); }
private:
const FactoryFunction factory_;
FactoryValueProducer(const FactoryValueProducer&) = delete;
FactoryValueProducer& operator=(const FactoryValueProducer&) = delete;
};
static ValueProducer* producer_;
};
template <typename T>
class DefaultValue<T&> {
public:
static void Set(T& x) {
address_ = &x;
}
static void Clear() { address_ = nullptr; }
static bool IsSet() { return address_ != nullptr; }
static bool Exists() {
return IsSet() || internal::BuiltInDefaultValue<T&>::Exists();
}
static T& Get() {
return address_ == nullptr ? internal::BuiltInDefaultValue<T&>::Get()
: *address_;
}
private:
static T* address_;
};
template <>
class DefaultValue<void> {
public:
static bool Exists() { return true; }
static void Get() {}
};
template <typename T>
typename DefaultValue<T>::ValueProducer* DefaultValue<T>::producer_ = nullptr;
template <typename T>
T* DefaultValue<T&>::address_ = nullptr;
template <typename F>
class ActionInterface {
public:
typedef typename internal::Function<F>::Result Result;
typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
ActionInterface() = default;
virtual ~ActionInterface() = default;
virtual Result Perform(const ArgumentTuple& args) = 0;
private:
ActionInterface(const ActionInterface&) = delete;
ActionInterface& operator=(const ActionInterface&) = delete;
};
template <typename F>
class Action;
template <typename R, typename... Args>
class Action<R(Args...)> {
private:
using F = R(Args...);
struct ActionAdapter {
::std::shared_ptr<ActionInterface<F>> impl_;
template <typename... InArgs>
typename internal::Function<F>::Result operator()(InArgs&&... args) {
return impl_->Perform(
::std::forward_as_tuple(::std::forward<InArgs>(args)...));
}
};
template <typename G>
using IsCompatibleFunctor = std::is_constructible<std::function<F>, G>;
public:
typedef typename internal::Function<F>::Result Result;
typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
Action() = default;
template <
typename G,
typename = typename std::enable_if<internal::disjunction<
IsCompatibleFunctor<G>, std::is_constructible<std::function<Result()>,
G>>::value>::type>
Action(G&& fun) {
Init(::std::forward<G>(fun), IsCompatibleFunctor<G>());
}
explicit Action(ActionInterface<F>* impl)
: fun_(ActionAdapter{::std::shared_ptr<ActionInterface<F>>(impl)}) {}
template <typename Func>
Action(const Action<Func>& action)
: fun_(action.fun_) {}
bool IsDoDefault() const { return fun_ == nullptr; }
Result Perform(ArgumentTuple args) const {
if (IsDoDefault()) {
internal::IllegalDoDefault(__FILE__, __LINE__);
}
return internal::Apply(fun_, ::std::move(args));
}
operator OnceAction<F>() const {
struct OA {
Action<F> action;
R operator()(Args... args) && {
return action.Perform(
std::forward_as_tuple(std::forward<Args>(args)...));
}
};
return OA{*this};
}
private:
template <typename G>
friend class Action;
template <typename G>
void Init(G&& g, ::std::true_type) {
fun_ = ::std::forward<G>(g);
}
template <typename G>
void Init(G&& g, ::std::false_type) {
fun_ = IgnoreArgs<typename ::std::decay<G>::type>{::std::forward<G>(g)};
}
template <typename FunctionImpl>
struct IgnoreArgs {
template <typename... InArgs>
Result operator()(const InArgs&...) const {
return function_impl();
}
FunctionImpl function_impl;
};
::std::function<F> fun_;
};
template <typename Impl>
class PolymorphicAction {
public:
explicit PolymorphicAction(const Impl& impl) : impl_(impl) {}
template <typename F>
operator Action<F>() const {
return Action<F>(new MonomorphicImpl<F>(impl_));
}
private:
template <typename F>
class MonomorphicImpl : public ActionInterface<F> {
public:
typedef typename internal::Function<F>::Result Result;
typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
explicit MonomorphicImpl(const Impl& impl) : impl_(impl) {}
Result Perform(const ArgumentTuple& args) override {
return impl_.template Perform<Result>(args);
}
private:
Impl impl_;
};
Impl impl_;
};
template <typename F>
Action<F> MakeAction(ActionInterface<F>* impl) {
return Action<F>(impl);
}
template <typename Impl>
inline PolymorphicAction<Impl> MakePolymorphicAction(const Impl& impl) {
return PolymorphicAction<Impl>(impl);
}
namespace internal {
template <typename T>
struct ByMoveWrapper {
explicit ByMoveWrapper(T value) : payload(std::move(value)) {}
T payload;
};
template <typename R>
class ReturnAction final {
public:
explicit ReturnAction(R value) : value_(std::move(value)) {}
template <typename U, typename... Args,
typename = typename std::enable_if<conjunction<
negation<std::is_same<void, U>>,
negation<std::is_reference<U>>,
std::is_convertible<R, U>,
std::is_move_constructible<U>>::value>::type>
operator OnceAction<U(Args...)>() && {
return Impl<U>(std::move(value_));
}
template <typename U, typename... Args,
typename = typename std::enable_if<conjunction<
negation<std::is_same<void, U>>,
negation<std::is_reference<U>>,
std::is_convertible<const R&, U>,
std::is_copy_constructible<U>>::value>::type>
operator Action<U(Args...)>() const {
return Impl<U>(value_);
}
private:
template <typename U>
class Impl final {
public:
explicit Impl(R&& input_value)
: state_(new State(std::move(input_value))) {}
explicit Impl(const R& input_value) : state_(new State(input_value)) {}
U operator()() && { return std::move(state_->value); }
U operator()() const& { return state_->value; }
private:
struct State {
explicit State(const R& input_value_in)
: input_value(input_value_in),
value(ImplicitCast_<U>(internal::as_const(input_value))) {}
explicit State(R&& input_value_in)
: input_value(std::move(input_value_in)),
value(ImplicitCast_<U>(std::move(input_value))) {}
R input_value;
U value;
};
const std::shared_ptr<State> state_;
};
R value_;
};
template <typename T>
class ReturnAction<ByMoveWrapper<T>> final {
public:
explicit ReturnAction(ByMoveWrapper<T> wrapper)
: state_(new State(std::move(wrapper.payload))) {}
T operator()() const {
GTEST_CHECK_(!state_->called)
<< "A ByMove() action must be performed at most once.";
state_->called = true;
return std::move(state_->value);
}
private:
struct State {
explicit State(T&& value_in) : value(std::move(value_in)) {}
T value;
bool called = false;
};
const std::shared_ptr<State> state_;
};
class ReturnNullAction {
public:
template <typename Result, typename ArgumentTuple>
static Result Perform(const ArgumentTuple&) {
return nullptr;
}
};
class ReturnVoidAction {
public:
template <typename Result, typename ArgumentTuple>
static void Perform(const ArgumentTuple&) {
static_assert(std::is_void<Result>::value, "Result should be void.");
}
};
template <typename T>
class ReturnRefAction {
public:
explicit ReturnRefAction(T& ref) : ref_(ref) {}
template <typename F>
operator Action<F>() const {
typedef typename Function<F>::Result Result;
static_assert(std::is_reference<Result>::value,
"use Return instead of ReturnRef to return a value");
return Action<F>(new Impl<F>(ref_));
}
private:
template <typename F>
class Impl : public ActionInterface<F> {
public:
typedef typename Function<F>::Result Result;
typedef typename Function<F>::ArgumentTuple ArgumentTuple;
explicit Impl(T& ref) : ref_(ref) {}
Result Perform(const ArgumentTuple&) override { return ref_; }
private:
T& ref_;
};
T& ref_;
};
template <typename T>
class ReturnRefOfCopyAction {
public:
explicit ReturnRefOfCopyAction(const T& value) : value_(value) {}
template <typename F>
operator Action<F>() const {
typedef typename Function<F>::Result Result;
static_assert(std::is_reference<Result>::value,
"use Return instead of ReturnRefOfCopy to return a value");
return Action<F>(new Impl<F>(value_));
}
private:
template <typename F>
class Impl : public ActionInterface<F> {
public:
typedef typename Function<F>::Result Result;
typedef typename Function<F>::ArgumentTuple ArgumentTuple;
explicit Impl(const T& value) : value_(value) {}
Result Perform(const ArgumentTuple&) override { return value_; }
private:
T value_;
};
const T value_;
};
template <typename T>
class ReturnRoundRobinAction {
public:
explicit ReturnRoundRobinAction(std::vector<T> values) {
GTEST_CHECK_(!values.empty())
<< "ReturnRoundRobin requires at least one element.";
state_->values = std::move(values);
}
template <typename... Args>
T operator()(Args&&...) const {
return state_->Next();
}
private:
struct State {
T Next() {
T ret_val = values[i++];
if (i == values.size()) i = 0;
return ret_val;
}
std::vector<T> values;
size_t i = 0;
};
std::shared_ptr<State> state_ = std::make_shared<State>();
};
class DoDefaultAction {
public:
template <typename F>
operator Action<F>() const {
return Action<F>();
}
};
template <typename T1, typename T2>
class AssignAction {
public:
AssignAction(T1* ptr, T2 value) : ptr_(ptr), value_(value) {}
template <typename Result, typename ArgumentTuple>
void Perform(const ArgumentTuple& ) const {
*ptr_ = value_;
}
private:
T1* const ptr_;
const T2 value_;
};
#ifndef GTEST_OS_WINDOWS_MOBILE
template <typename T>
class SetErrnoAndReturnAction {
public:
SetErrnoAndReturnAction(int errno_value, T result)
: errno_(errno_value), result_(result) {}
template <typename Result, typename ArgumentTuple>
Result Perform(const ArgumentTuple& ) const {
errno = errno_;
return result_;
}
private:
const int errno_;
const T result_;
};
#endif
template <size_t N, typename A, typename = void>
struct SetArgumentPointeeAction {
A value;
template <typename... Args>
void operator()(const Args&... args) const {
*::std::get<N>(std::tie(args...)) = value;
}
};
template <class Class, typename MethodPtr>
struct InvokeMethodAction {
Class* const obj_ptr;
const MethodPtr method_ptr;
template <typename... Args>
auto operator()(Args&&... args) const
-> decltype((obj_ptr->*method_ptr)(std::forward<Args>(args)...)) {
return (obj_ptr->*method_ptr)(std::forward<Args>(args)...);
}
};
template <typename FunctionImpl>
struct InvokeWithoutArgsAction {
FunctionImpl function_impl;
template <typename... Args>
auto operator()(const Args&...) -> decltype(function_impl()) {
return function_impl();
}
};
template <class Class, typename MethodPtr>
struct InvokeMethodWithoutArgsAction {
Class* const obj_ptr;
const MethodPtr method_ptr;
using ReturnType =
decltype((std::declval<Class*>()->*std::declval<MethodPtr>())());
template <typename... Args>
ReturnType operator()(const Args&...) const {
return (obj_ptr->*method_ptr)();
}
};
template <typename A>
class IgnoreResultAction {
public:
explicit IgnoreResultAction(const A& action) : action_(action) {}
template <typename F>
operator Action<F>() const {
typedef typename internal::Function<F>::Result Result;
static_assert(std::is_void<Result>::value, "Result type should be void.");
return Action<F>(new Impl<F>(action_));
}
private:
template <typename F>
class Impl : public ActionInterface<F> {
public:
typedef typename internal::Function<F>::Result Result;
typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
explicit Impl(const A& action) : action_(action) {}
void Perform(const ArgumentTuple& args) override {
action_.Perform(args);
}
private:
typedef
typename internal::Function<F>::MakeResultIgnoredValue OriginalFunction;
const Action<OriginalFunction> action_;
};
const A action_;
};
template <typename InnerAction, size_t... I>
struct WithArgsAction {
InnerAction inner_action;
template <typename R, typename... Args>
using InnerSignature =
R(typename std::tuple_element<I, std::tuple<Args...>>::type...);
template <
typename R, typename... Args,
typename std::enable_if<
std::is_convertible<InnerAction,
OnceAction<R(internal::TupleElement<
I, std::tuple<Args...>>...)>>::value,
int>::type = 0>
operator OnceAction<R(Args...)>() && {
struct OA {
OnceAction<InnerSignature<R, Args...>> inner_action;
R operator()(Args&&... args) && {
return std::move(inner_action)
.Call(std::get<I>(
std::forward_as_tuple(std::forward<Args>(args)...))...);
}
};
return OA{std::move(inner_action)};
}
template <
typename R, typename... Args,
typename std::enable_if<
std::is_convertible<const InnerAction&,
Action<R(internal::TupleElement<
I, std::tuple<Args...>>...)>>::value,
int>::type = 0>
operator Action<R(Args...)>() const {
Action<InnerSignature<R, Args...>> converted(inner_action);
return [converted](Args&&... args) -> R {
return converted.Perform(std::forward_as_tuple(
std::get<I>(std::forward_as_tuple(std::forward<Args>(args)...))...));
};
}
};
template <typename... Actions>
class DoAllAction;
template <typename FinalAction>
class DoAllAction<FinalAction> {
public:
struct UserConstructorTag {};
template <typename T>
explicit DoAllAction(UserConstructorTag, T&& action)
: final_action_(std::forward<T>(action)) {}
template <typename R, typename... Args,
typename std::enable_if<
std::is_convertible<FinalAction, OnceAction<R(Args...)>>::value,
int>::type = 0>
operator OnceAction<R(Args...)>() && {
return std::move(final_action_);
}
template <
typename R, typename... Args,
typename std::enable_if<
conjunction<
negation<
std::is_convertible<FinalAction, OnceAction<R(Args...)>>>,
std::is_convertible<FinalAction, Action<R(Args...)>>>::value,
int>::type = 0>
operator OnceAction<R(Args...)>() && {
return Action<R(Args...)>(std::move(final_action_));
}
template <
typename R, typename... Args,
typename std::enable_if<
std::is_convertible<const FinalAction&, Action<R(Args...)>>::value,
int>::type = 0>
operator Action<R(Args...)>() const {
return final_action_;
}
private:
FinalAction final_action_;
};
template <typename InitialAction, typename... OtherActions>
class DoAllAction<InitialAction, OtherActions...>
: private DoAllAction<OtherActions...> {
private:
using Base = DoAllAction<OtherActions...>;
template <typename T>
using InitialActionArgType =
typename std::conditional<std::is_scalar<T>::value, T, const T&>::type;
public:
struct UserConstructorTag {};
template <typename T, typename... U>
explicit DoAllAction(UserConstructorTag, T&& initial_action,
U&&... other_actions)
: Base({}, std::forward<U>(other_actions)...),
initial_action_(std::forward<T>(initial_action)) {}
template <
typename R, typename... Args,
typename std::enable_if<
conjunction<std::is_convertible<
InitialAction,
OnceAction<void(InitialActionArgType<Args>...)>>,
std::is_convertible<Base, OnceAction<R(Args...)>>>::value,
int>::type = 0>
operator OnceAction<R(Args...)>() && {
struct OA {
OnceAction<void(InitialActionArgType<Args>...)> initial_action;
OnceAction<R(Args...)> remaining_actions;
R operator()(Args... args) && {
std::move(initial_action)
.Call(static_cast<InitialActionArgType<Args>>(args)...);
return std::move(remaining_actions).Call(std::forward<Args>(args)...);
}
};
return OA{
std::move(initial_action_),
std::move(static_cast<Base&>(*this)),
};
}
template <
typename R, typename... Args,
typename std::enable_if<
conjunction<
negation<std::is_convertible<
InitialAction,
OnceAction<void(InitialActionArgType<Args>...)>>>,
std::is_convertible<InitialAction,
Action<void(InitialActionArgType<Args>...)>>,
std::is_convertible<Base, OnceAction<R(Args...)>>>::value,
int>::type = 0>
operator OnceAction<R(Args...)>() && {
return DoAll(
Action<void(InitialActionArgType<Args>...)>(std::move(initial_action_)),
std::move(static_cast<Base&>(*this)));
}
template <
typename R, typename... Args,
typename std::enable_if<
conjunction<
std::is_convertible<const InitialAction&,
Action<void(InitialActionArgType<Args>...)>>,
std::is_convertible<const Base&, Action<R(Args...)>>>::value,
int>::type = 0>
operator Action<R(Args...)>() const {
struct OA {
Action<void(InitialActionArgType<Args>...)> initial_action;
Action<R(Args...)> remaining_actions;
R operator()(Args... args) const {
initial_action.Perform(std::forward_as_tuple(
static_cast<InitialActionArgType<Args>>(args)...));
return remaining_actions.Perform(
std::forward_as_tuple(std::forward<Args>(args)...));
}
};
return OA{
initial_action_,
static_cast<const Base&>(*this),
};
}
private:
InitialAction initial_action_;
};
template <typename T, typename... Params>
struct ReturnNewAction {
T* operator()() const {
return internal::Apply(
[](const Params&... unpacked_params) {
return new T(unpacked_params...);
},
params);
}
std::tuple<Params...> params;
};
template <size_t k>
struct ReturnArgAction {
template <typename... Args,
typename = typename std::enable_if<(k < sizeof...(Args))>::type>
auto operator()(Args&&... args) const
-> decltype(std::get<k>(
std::forward_as_tuple(std::forward<Args>(args)...))) {
return std::get<k>(std::forward_as_tuple(std::forward<Args>(args)...));
}
};
template <size_t k, typename Ptr>
struct SaveArgAction {
Ptr pointer;
template <typename... Args>
void operator()(const Args&... args) const {
*pointer = std::get<k>(std::tie(args...));
}
};
template <size_t k, typename Ptr>
struct SaveArgPointeeAction {
Ptr pointer;
template <typename... Args>
void operator()(const Args&... args) const {
*pointer = *std::get<k>(std::tie(args...));
}
};
template <size_t k, typename T>
struct SetArgRefereeAction {
T value;
template <typename... Args>
void operator()(Args&&... args) const {
using argk_type =
typename ::std::tuple_element<k, std::tuple<Args...>>::type;
static_assert(std::is_lvalue_reference<argk_type>::value,
"Argument must be a reference type.");
std::get<k>(std::tie(args...)) = value;
}
};
template <size_t k, typename I1, typename I2>
struct SetArrayArgumentAction {
I1 first;
I2 last;
template <typename... Args>
void operator()(const Args&... args) const {
auto value = std::get<k>(std::tie(args...));
for (auto it = first; it != last; ++it, (void)++value) {
*value = *it;
}
}
};
template <size_t k>
struct DeleteArgAction {
template <typename... Args>
void operator()(const Args&... args) const {
delete std::get<k>(std::tie(args...));
}
};
template <typename Ptr>
struct ReturnPointeeAction {
Ptr pointer;
template <typename... Args>
auto operator()(const Args&...) const -> decltype(*pointer) {
return *pointer;
}
};
#if GTEST_HAS_EXCEPTIONS
template <typename T>
struct ThrowAction {
T exception;
template <typename R, typename... Args>
operator Action<R(Args...)>() const {
T copy = exception;
return [copy](Args...) -> R { throw copy; };
}
};
struct RethrowAction {
std::exception_ptr exception;
template <typename R, typename... Args>
operator Action<R(Args...)>() const {
return [ex = exception](Args...) -> R { std::rethrow_exception(ex); };
}
};
#endif
}
typedef internal::IgnoredValue Unused;
template <typename... Action>
internal::DoAllAction<typename std::decay<Action>::type...> DoAll(
Action&&... action) {
return internal::DoAllAction<typename std::decay<Action>::type...>(
{}, std::forward<Action>(action)...);
}
template <size_t k, typename InnerAction>
internal::WithArgsAction<typename std::decay<InnerAction>::type, k> WithArg(
InnerAction&& action) {
return {std::forward<InnerAction>(action)};
}
template <size_t k, size_t... ks, typename InnerAction>
internal::WithArgsAction<typename std::decay<InnerAction>::type, k, ks...>
WithArgs(InnerAction&& action) {
return {std::forward<InnerAction>(action)};
}
template <typename InnerAction>
internal::WithArgsAction<typename std::decay<InnerAction>::type> WithoutArgs(
InnerAction&& action) {
return {std::forward<InnerAction>(action)};
}
template <typename R>
internal::ReturnAction<R> Return(R value) {
return internal::ReturnAction<R>(std::move(value));
}
inline PolymorphicAction<internal::ReturnNullAction> ReturnNull() {
return MakePolymorphicAction(internal::ReturnNullAction());
}
inline PolymorphicAction<internal::ReturnVoidAction> Return() {
return MakePolymorphicAction(internal::ReturnVoidAction());
}
template <typename R>
inline internal::ReturnRefAction<R> ReturnRef(R& x) {
return internal::ReturnRefAction<R>(x);
}
template <typename R, R* = nullptr>
internal::ReturnRefAction<R> ReturnRef(R&&) = delete;
template <typename R>
inline internal::ReturnRefOfCopyAction<R> ReturnRefOfCopy(const R& x) {
return internal::ReturnRefOfCopyAction<R>(x);
}
template <typename R>
internal::ByMoveWrapper<R> ByMove(R x) {
return internal::ByMoveWrapper<R>(std::move(x));
}
template <typename T>
internal::ReturnRoundRobinAction<T> ReturnRoundRobin(std::vector<T> vals) {
return internal::ReturnRoundRobinAction<T>(std::move(vals));
}
template <typename T>
internal::ReturnRoundRobinAction<T> ReturnRoundRobin(
std::initializer_list<T> vals) {
return internal::ReturnRoundRobinAction<T>(std::vector<T>(vals));
}
inline internal::DoDefaultAction DoDefault() {
return internal::DoDefaultAction();
}
template <size_t N, typename T>
internal::SetArgumentPointeeAction<N, T> SetArgPointee(T value) {
return {std::move(value)};
}
template <size_t N, typename T>
internal::SetArgumentPointeeAction<N, T> SetArgumentPointee(T value) {
return {std::move(value)};
}
template <typename T1, typename T2>
PolymorphicAction<internal::AssignAction<T1, T2>> Assign(T1* ptr, T2 val) {
return MakePolymorphicAction(internal::AssignAction<T1, T2>(ptr, val));
}
#ifndef GTEST_OS_WINDOWS_MOBILE
template <typename T>
PolymorphicAction<internal::SetErrnoAndReturnAction<T>> SetErrnoAndReturn(
int errval, T result) {
return MakePolymorphicAction(
internal::SetErrnoAndReturnAction<T>(errval, result));
}
#endif
template <typename FunctionImpl>
typename std::decay<FunctionImpl>::type Invoke(FunctionImpl&& function_impl) {
return std::forward<FunctionImpl>(function_impl);
}
template <class Class, typename MethodPtr>
internal::InvokeMethodAction<Class, MethodPtr> Invoke(Class* obj_ptr,
MethodPtr method_ptr) {
return {obj_ptr, method_ptr};
}
template <typename FunctionImpl>
internal::InvokeWithoutArgsAction<typename std::decay<FunctionImpl>::type>
InvokeWithoutArgs(FunctionImpl function_impl) {
return {std::move(function_impl)};
}
template <class Class, typename MethodPtr>
internal::InvokeMethodWithoutArgsAction<Class, MethodPtr> InvokeWithoutArgs(
Class* obj_ptr, MethodPtr method_ptr) {
return {obj_ptr, method_ptr};
}
template <typename A>
inline internal::IgnoreResultAction<A> IgnoreResult(const A& an_action) {
return internal::IgnoreResultAction<A>(an_action);
}
template <typename T>
inline ::std::reference_wrapper<T> ByRef(T& l_value) {
return ::std::reference_wrapper<T>(l_value);
}
template <typename T, typename... Params>
internal::ReturnNewAction<T, typename std::decay<Params>::type...> ReturnNew(
Params&&... params) {
return {std::forward_as_tuple(std::forward<Params>(params)...)};
}
template <size_t k>
internal::ReturnArgAction<k> ReturnArg() {
return {};
}
template <size_t k, typename Ptr>
internal::SaveArgAction<k, Ptr> SaveArg(Ptr pointer) {
return {pointer};
}
template <size_t k, typename Ptr>
internal::SaveArgPointeeAction<k, Ptr> SaveArgPointee(Ptr pointer) {
return {pointer};
}
template <size_t k, typename T>
internal::SetArgRefereeAction<k, typename std::decay<T>::type> SetArgReferee(
T&& value) {
return {std::forward<T>(value)};
}
template <size_t k, typename I1, typename I2>
internal::SetArrayArgumentAction<k, I1, I2> SetArrayArgument(I1 first,
I2 last) {
return {first, last};
}
template <size_t k>
internal::DeleteArgAction<k> DeleteArg() {
return {};
}
template <typename Ptr>
internal::ReturnPointeeAction<Ptr> ReturnPointee(Ptr pointer) {
return {pointer};
}
#if GTEST_HAS_EXCEPTIONS
template <typename T>
typename std::enable_if<
!std::is_base_of<std::exception_ptr, typename std::decay<T>::type>::value,
internal::ThrowAction<typename std::decay<T>::type>>::type
Throw(T&& exception) {
return {std::forward<T>(exception)};
}
inline internal::RethrowAction Rethrow(std::exception_ptr exception) {
return {std::move(exception)};
}
#endif
namespace internal {
struct ExcessiveArg {};
template <typename F, typename Impl>
struct ActionImpl;
template <typename Impl>
struct ImplBase {
struct Holder {
explicit operator const Impl&() const { return *ptr; }
std::shared_ptr<Impl> ptr;
};
using type = typename std::conditional<std::is_constructible<Impl>::value,
Impl, Holder>::type;
};
template <typename R, typename... Args, typename Impl>
struct ActionImpl<R(Args...), Impl> : ImplBase<Impl>::type {
using Base = typename ImplBase<Impl>::type;
using function_type = R(Args...);
using args_type = std::tuple<Args...>;
ActionImpl() = default;
explicit ActionImpl(std::shared_ptr<Impl> impl) : Base{std::move(impl)} {}
R operator()(Args&&... arg) const {
static constexpr size_t kMaxArgs =
sizeof...(Args) <= 10 ? sizeof...(Args) : 10;
return Apply(std::make_index_sequence<kMaxArgs>{},
std::make_index_sequence<10 - kMaxArgs>{},
args_type{std::forward<Args>(arg)...});
}
template <std::size_t... arg_id, std::size_t... excess_id>
R Apply(std::index_sequence<arg_id...>, std::index_sequence<excess_id...>,
const args_type& args) const {
static constexpr ExcessiveArg kExcessArg{};
return static_cast<const Impl&>(*this)
.template gmock_PerformImpl<
function_type, R,
args_type,
typename std::tuple_element<arg_id, args_type>::type...>(
args, std::get<arg_id>(args)...,
((void)excess_id, kExcessArg)...);
}
};
template <typename F, typename Impl>
::testing::Action<F> MakeAction() {
return ::testing::Action<F>(ActionImpl<F, Impl>());
}
template <typename F, typename Impl>
::testing::Action<F> MakeAction(std::shared_ptr<Impl> impl) {
return ::testing::Action<F>(ActionImpl<F, Impl>(std::move(impl)));
}
#define GMOCK_INTERNAL_ARG_UNUSED(i, data, el) \
, GTEST_INTERNAL_ATTRIBUTE_MAYBE_UNUSED const arg##i##_type& arg##i
#define GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_ \
GTEST_INTERNAL_ATTRIBUTE_MAYBE_UNUSED const args_type& args GMOCK_PP_REPEAT( \
GMOCK_INTERNAL_ARG_UNUSED, , 10)
#define GMOCK_INTERNAL_ARG(i, data, el) , const arg##i##_type& arg##i
#define GMOCK_ACTION_ARG_TYPES_AND_NAMES_ \
const args_type& args GMOCK_PP_REPEAT(GMOCK_INTERNAL_ARG, , 10)
#define GMOCK_INTERNAL_TEMPLATE_ARG(i, data, el) , typename arg##i##_type
#define GMOCK_ACTION_TEMPLATE_ARGS_NAMES_ \
GMOCK_PP_TAIL(GMOCK_PP_REPEAT(GMOCK_INTERNAL_TEMPLATE_ARG, , 10))
#define GMOCK_INTERNAL_TYPENAME_PARAM(i, data, param) , typename param##_type
#define GMOCK_ACTION_TYPENAME_PARAMS_(params) \
GMOCK_PP_TAIL(GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_TYPENAME_PARAM, , params))
#define GMOCK_INTERNAL_TYPE_PARAM(i, data, param) , param##_type
#define GMOCK_ACTION_TYPE_PARAMS_(params) \
GMOCK_PP_TAIL(GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_TYPE_PARAM, , params))
#define GMOCK_INTERNAL_TYPE_GVALUE_PARAM(i, data, param) \
, param##_type gmock_p##i
#define GMOCK_ACTION_TYPE_GVALUE_PARAMS_(params) \
GMOCK_PP_TAIL(GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_TYPE_GVALUE_PARAM, , params))
#define GMOCK_INTERNAL_GVALUE_PARAM(i, data, param) \
, std::forward<param##_type>(gmock_p##i)
#define GMOCK_ACTION_GVALUE_PARAMS_(params) \
GMOCK_PP_TAIL(GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_GVALUE_PARAM, , params))
#define GMOCK_INTERNAL_INIT_PARAM(i, data, param) \
, param(::std::forward<param##_type>(gmock_p##i))
#define GMOCK_ACTION_INIT_PARAMS_(params) \
GMOCK_PP_TAIL(GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_INIT_PARAM, , params))
#define GMOCK_INTERNAL_FIELD_PARAM(i, data, param) param##_type param;
#define GMOCK_ACTION_FIELD_PARAMS_(params) \
GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_FIELD_PARAM, , params)
#define GMOCK_INTERNAL_ACTION(name, full_name, params) \
template <GMOCK_ACTION_TYPENAME_PARAMS_(params)> \
class full_name { \
public: \
explicit full_name(GMOCK_ACTION_TYPE_GVALUE_PARAMS_(params)) \
: impl_(std::make_shared<gmock_Impl>( \
GMOCK_ACTION_GVALUE_PARAMS_(params))) {} \
full_name(const full_name&) = default; \
full_name(full_name&&) noexcept = default; \
template <typename F> \
operator ::testing::Action<F>() const { \
return ::testing::internal::MakeAction<F>(impl_); \
} \
\
private: \
class gmock_Impl { \
public: \
explicit gmock_Impl(GMOCK_ACTION_TYPE_GVALUE_PARAMS_(params)) \
: GMOCK_ACTION_INIT_PARAMS_(params) {} \
template <typename function_type, typename return_type, \
typename args_type, GMOCK_ACTION_TEMPLATE_ARGS_NAMES_> \
return_type gmock_PerformImpl(GMOCK_ACTION_ARG_TYPES_AND_NAMES_) const; \
GMOCK_ACTION_FIELD_PARAMS_(params) \
}; \
std::shared_ptr<const gmock_Impl> impl_; \
}; \
template <GMOCK_ACTION_TYPENAME_PARAMS_(params)> \
inline full_name<GMOCK_ACTION_TYPE_PARAMS_(params)> name( \
GMOCK_ACTION_TYPE_GVALUE_PARAMS_(params)) GTEST_MUST_USE_RESULT_; \
template <GMOCK_ACTION_TYPENAME_PARAMS_(params)> \
inline full_name<GMOCK_ACTION_TYPE_PARAMS_(params)> name( \
GMOCK_ACTION_TYPE_GVALUE_PARAMS_(params)) { \
return full_name<GMOCK_ACTION_TYPE_PARAMS_(params)>( \
GMOCK_ACTION_GVALUE_PARAMS_(params)); \
} \
template <GMOCK_ACTION_TYPENAME_PARAMS_(params)> \
template <typename function_type, typename return_type, typename args_type, \
GMOCK_ACTION_TEMPLATE_ARGS_NAMES_> \
return_type \
full_name<GMOCK_ACTION_TYPE_PARAMS_(params)>::gmock_Impl::gmock_PerformImpl( \
GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const
}
#define ACTION(name) \
class name##Action { \
public: \
explicit name##Action() noexcept {} \
name##Action(const name##Action&) noexcept {} \
template <typename F> \
operator ::testing::Action<F>() const { \
return ::testing::internal::MakeAction<F, gmock_Impl>(); \
} \
\
private: \
class gmock_Impl { \
public: \
template <typename function_type, typename return_type, \
typename args_type, GMOCK_ACTION_TEMPLATE_ARGS_NAMES_> \
return_type gmock_PerformImpl(GMOCK_ACTION_ARG_TYPES_AND_NAMES_) const; \
}; \
}; \
inline name##Action name() GTEST_MUST_USE_RESULT_; \
inline name##Action name() { return name##Action(); } \
template <typename function_type, typename return_type, typename args_type, \
GMOCK_ACTION_TEMPLATE_ARGS_NAMES_> \
return_type name##Action::gmock_Impl::gmock_PerformImpl( \
GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const
#define ACTION_P(name, ...) \
GMOCK_INTERNAL_ACTION(name, name##ActionP, (__VA_ARGS__))
#define ACTION_P2(name, ...) \
GMOCK_INTERNAL_ACTION(name, name##ActionP2, (__VA_ARGS__))
#define ACTION_P3(name, ...) \
GMOCK_INTERNAL_ACTION(name, name##ActionP3, (__VA_ARGS__))
#define ACTION_P4(name, ...) \
GMOCK_INTERNAL_ACTION(name, name##ActionP4, (__VA_ARGS__))
#define ACTION_P5(name, ...) \
GMOCK_INTERNAL_ACTION(name, name##ActionP5, (__VA_ARGS__))
#define ACTION_P6(name, ...) \
GMOCK_INTERNAL_ACTION(name, name##ActionP6, (__VA_ARGS__))
#define ACTION_P7(name, ...) \
GMOCK_INTERNAL_ACTION(name, name##ActionP7, (__VA_ARGS__))
#define ACTION_P8(name, ...) \
GMOCK_INTERNAL_ACTION(name, name##ActionP8, (__VA_ARGS__))
#define ACTION_P9(name, ...) \
GMOCK_INTERNAL_ACTION(name, name##ActionP9, (__VA_ARGS__))
#define ACTION_P10(name, ...) \
GMOCK_INTERNAL_ACTION(name, name##ActionP10, (__VA_ARGS__))
}
GTEST_DISABLE_MSC_WARNINGS_POP_()
#endif | #include "gmock/gmock-actions.h"
#include <algorithm>
#include <functional>
#include <iterator>
#include <memory>
#include <sstream>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gmock/internal/gmock-port.h"
#include "gtest/gtest-spi.h"
#include "gtest/gtest.h"
#include "gtest/internal/gtest-port.h"
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4100 4503)
#if defined(_MSC_VER) && (_MSC_VER == 1900)
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4800)
#endif
namespace testing {
namespace {
using ::testing::internal::BuiltInDefaultValue;
TEST(TypeTraits, Negation) {
static_assert(std::is_base_of<std::false_type,
internal::negation<std::true_type>>::value,
"");
static_assert(std::is_base_of<std::true_type,
internal::negation<std::false_type>>::value,
"");
static_assert(std::is_base_of<
std::true_type,
internal::negation<std::integral_constant<int, 0>>>::value,
"");
static_assert(std::is_base_of<
std::false_type,
internal::negation<std::integral_constant<int, 1>>>::value,
"");
static_assert(std::is_base_of<
std::false_type,
internal::negation<std::integral_constant<int, -1>>>::value,
"");
}
template <int>
struct MyFalse : std::integral_constant<int, 0> {};
template <int>
struct MyTrue : std::integral_constant<int, -1> {};
TEST(TypeTraits, Conjunction) {
static_assert(std::is_base_of<std::true_type, internal::conjunction<>>::value,
"");
static_assert(
std::is_base_of<MyFalse<0>, internal::conjunction<MyFalse<0>>>::value,
"");
static_assert(
std::is_base_of<MyTrue<0>, internal::conjunction<MyTrue<0>>>::value, "");
static_assert(
std::is_base_of<MyFalse<1>, internal::conjunction<MyTrue<0>, MyFalse<1>,
MyTrue<2>>>::value,
"");
static_assert(
std::is_base_of<MyFalse<1>, internal::conjunction<MyTrue<0>, MyFalse<1>,
MyFalse<2>>>::value,
"");
struct Empty {};
static_assert(
std::is_base_of<MyFalse<1>, internal::conjunction<MyTrue<0>, MyFalse<1>,
Empty>>::value,
"");
static_assert(
std::is_base_of<MyTrue<2>, internal::conjunction<MyTrue<0>, MyTrue<1>,
MyTrue<2>>>::value,
"");
}
TEST(TypeTraits, Disjunction) {
static_assert(
std::is_base_of<std::false_type, internal::disjunction<>>::value, "");
static_assert(
std::is_base_of<MyFalse<0>, internal::disjunction<MyFalse<0>>>::value,
"");
static_assert(
std::is_base_of<MyTrue<0>, internal::disjunction<MyTrue<0>>>::value, "");
static_assert(
std::is_base_of<MyTrue<1>, internal::disjunction<MyFalse<0>, MyTrue<1>,
MyFalse<2>>>::value,
"");
static_assert(
std::is_base_of<MyTrue<1>, internal::disjunction<MyFalse<0>, MyTrue<1>,
MyTrue<2>>>::value,
"");
struct Empty {};
static_assert(
std::is_base_of<MyTrue<1>, internal::disjunction<MyFalse<0>, MyTrue<1>,
Empty>>::value,
"");
static_assert(
std::is_base_of<MyFalse<2>, internal::disjunction<MyFalse<0>, MyFalse<1>,
MyFalse<2>>>::value,
"");
}
TEST(TypeTraits, IsInvocableRV) {
struct C {
int operator()() const { return 0; }
void operator()(int) & {}
std::string operator()(int) && { return ""; };
};
static_assert(internal::is_callable_r<int, C>::value, "");
static_assert(internal::is_callable_r<int, C&>::value, "");
static_assert(internal::is_callable_r<int, const C>::value, "");
static_assert(internal::is_callable_r<int, const C&>::value, "");
static_assert(internal::is_callable_r<void, C>::value, "");
static_assert(internal::is_callable_r<const volatile void, C>::value, "");
static_assert(internal::is_callable_r<char, C>::value, "");
static_assert(internal::is_callable_r<void, C&, int>::value, "");
static_assert(!internal::is_callable_r<int, C&, int>::value, "");
static_assert(!internal::is_callable_r<std::string, C&, int>::value, "");
static_assert(!internal::is_callable_r<void, const C&, int>::value, "");
static_assert(internal::is_callable_r<std::string, C, int>::value, "");
static_assert(internal::is_callable_r<void, C, int>::value, "");
static_assert(!internal::is_callable_r<int, C, int>::value, "");
static_assert(!internal::is_callable_r<void, C, std::string>::value, "");
static_assert(!internal::is_callable_r<void, C, int, int>::value, "");
#if defined(GTEST_INTERNAL_CPLUSPLUS_LANG) && \
GTEST_INTERNAL_CPLUSPLUS_LANG >= 201703L
{
struct NonMoveable {
NonMoveable() = default;
NonMoveable(NonMoveable&&) = delete;
};
static_assert(!std::is_move_constructible_v<NonMoveable>);
struct Callable {
NonMoveable operator()() { return NonMoveable(); }
};
static_assert(internal::is_callable_r<NonMoveable, Callable>::value);
static_assert(internal::is_callable_r<void, Callable>::value);
static_assert(
internal::is_callable_r<const volatile void, Callable>::value);
static_assert(!internal::is_callable_r<int, Callable>::value);
static_assert(!internal::is_callable_r<NonMoveable, Callable, int>::value);
}
#endif
static_assert(!internal::is_callable_r<void, int>::value, "");
static_assert(!internal::is_callable_r<void, void (C::*)()>::value, "");
static_assert(!internal::is_callable_r<void, void (C::*)(), C*>::value, "");
}
TEST(BuiltInDefaultValueTest, IsNullForPointerTypes) {
EXPECT_TRUE(BuiltInDefaultValue<int*>::Get() == nullptr);
EXPECT_TRUE(BuiltInDefaultValue<const char*>::Get() == nullptr);
EXPECT_TRUE(BuiltInDefaultValue<void*>::Get() == nullptr);
}
TEST(BuiltInDefaultValueTest, ExistsForPointerTypes) {
EXPECT_TRUE(BuiltInDefaultValue<int*>::Exists());
EXPECT_TRUE(BuiltInDefaultValue<const char*>::Exists());
EXPECT_TRUE(BuiltInDefaultValue<void*>::Exists());
}
TEST(BuiltInDefaultValueTest, IsZeroForNumericTypes) {
EXPECT_EQ(0U, BuiltInDefaultValue<unsigned char>::Get());
EXPECT_EQ(0, BuiltInDefaultValue<signed char>::Get());
EXPECT_EQ(0, BuiltInDefaultValue<char>::Get());
#if GMOCK_WCHAR_T_IS_NATIVE_
#if !defined(__WCHAR_UNSIGNED__)
EXPECT_EQ(0, BuiltInDefaultValue<wchar_t>::Get());
#else
EXPECT_EQ(0U, BuiltInDefaultValue<wchar_t>::Get());
#endif
#endif
EXPECT_EQ(0U, BuiltInDefaultValue<unsigned short>::Get());
EXPECT_EQ(0, BuiltInDefaultValue<signed short>::Get());
EXPECT_EQ(0, BuiltInDefaultValue<short>::Get());
EXPECT_EQ(0U, BuiltInDefaultValue<unsigned int>::Get());
EXPECT_EQ(0, BuiltInDefaultValue<signed int>::Get());
EXPECT_EQ(0, BuiltInDefaultValue<int>::Get());
EXPECT_EQ(0U, BuiltInDefaultValue<unsigned long>::Get());
EXPECT_EQ(0, BuiltInDefaultValue<signed long>::Get());
EXPECT_EQ(0, BuiltInDefaultValue<long>::Get());
EXPECT_EQ(0U, BuiltInDefaultValue<unsigned long long>::Get());
EXPECT_EQ(0, BuiltInDefaultValue<signed long long>::Get());
EXPECT_EQ(0, BuiltInDefaultValue<long long>::Get());
EXPECT_EQ(0, BuiltInDefaultValue<float>::Get());
EXPECT_EQ(0, BuiltInDefaultValue<double>::Get());
}
TEST(BuiltInDefaultValueTest, ExistsForNumericTypes) {
EXPECT_TRUE(BuiltInDefaultValue<unsigned char>::Exists());
EXPECT_TRUE(BuiltInDefaultValue<signed char>::Exists());
EXPECT_TRUE(BuiltInDefaultValue<char>::Exists());
#if GMOCK_WCHAR_T_IS_NATIVE_
EXPECT_TRUE(BuiltInDefaultValue<wchar_t>::Exists());
#endif
EXPECT_TRUE(BuiltInDefaultValue<unsigned short>::Exists());
EXPECT_TRUE(BuiltInDefaultValue<signed short>::Exists());
EXPECT_TRUE(BuiltInDefaultValue<short>::Exists());
EXPECT_TRUE(BuiltInDefaultValue<unsigned int>::Exists());
EXPECT_TRUE(BuiltInDefaultValue<signed int>::Exists());
EXPECT_TRUE(BuiltInDefaultValue<int>::Exists());
EXPECT_TRUE(BuiltInDefaultValue<unsigned long>::Exists());
EXPECT_TRUE(BuiltInDefaultValue<signed long>::Exists());
EXPECT_TRUE(BuiltInDefaultValue<long>::Exists());
EXPECT_TRUE(BuiltInDefaultValue<unsigned long long>::Exists());
EXPECT_TRUE(BuiltInDefaultValue<signed long long>::Exists());
EXPECT_TRUE(BuiltInDefaultValue<long long>::Exists());
EXPECT_TRUE(BuiltInDefaultValue<float>::Exists());
EXPECT_TRUE(BuiltInDefaultValue<double>::Exists());
}
TEST(BuiltInDefaultValueTest, IsFalseForBool) {
EXPECT_FALSE(BuiltInDefaultValue<bool>::Get());
}
TEST(BuiltInDefaultValueTest, BoolExists) {
EXPECT_TRUE(BuiltInDefaultValue<bool>::Exists());
}
TEST(BuiltInDefaultValueTest, IsEmptyStringForString) {
EXPECT_EQ("", BuiltInDefaultValue<::std::string>::Get());
}
TEST(BuiltInDefaultValueTest, ExistsForString) {
EXPECT_TRUE(BuiltInDefaultValue<::std::string>::Exists());
}
TEST(BuiltInDefaultValueTest, WorksForConstTypes) {
EXPECT_EQ("", BuiltInDefaultValue<const std::string>::Get());
EXPECT_EQ(0, BuiltInDefaultValue<const int>::Get());
EXPECT_TRUE(BuiltInDefaultValue<char* const>::Get() == nullptr);
EXPECT_FALSE(BuiltInDefaultValue<const bool>::Get());
}
class MyDefaultConstructible {
public:
MyDefaultConstructible() : value_(42) {}
int value() const { return value_; }
private:
int value_;
};
class MyNonDefaultConstructible {
public:
explicit MyNonDefaultConstructible(int a_value) : value_(a_value) {}
int value() const { return value_; }
private:
int value_;
};
TEST(BuiltInDefaultValueTest, ExistsForDefaultConstructibleType) {
EXPECT_TRUE(BuiltInDefaultValue<MyDefaultConstructible>::Exists());
}
TEST(BuiltInDefaultValueTest, IsDefaultConstructedForDefaultConstructibleType) {
EXPECT_EQ(42, BuiltInDefaultValue<MyDefaultConstructible>::Get().value());
}
TEST(BuiltInDefaultValueTest, DoesNotExistForNonDefaultConstructibleType) {
EXPECT_FALSE(BuiltInDefaultValue<MyNonDefaultConstructible>::Exists());
}
TEST(BuiltInDefaultValueDeathTest, IsUndefinedForReferences) {
EXPECT_DEATH_IF_SUPPORTED({ BuiltInDefaultValue<int&>::Get(); }, "");
EXPECT_DEATH_IF_SUPPORTED({ BuiltInDefaultValue<const char&>::Get(); }, "");
}
TEST(BuiltInDefaultValueDeathTest, IsUndefinedForNonDefaultConstructibleType) {
EXPECT_DEATH_IF_SUPPORTED(
{ BuiltInDefaultValue<MyNonDefaultConstructible>::Get(); }, "");
}
TEST(DefaultValueTest, IsInitiallyUnset) {
EXPECT_FALSE(DefaultValue<int>::IsSet());
EXPECT_FALSE(DefaultValue<MyDefaultConstructible>::IsSet());
EXPECT_FALSE(DefaultValue<const MyNonDefaultConstructible>::IsSet());
}
TEST(DefaultValueTest, CanBeSetAndUnset) {
EXPECT_TRUE(DefaultValue<int>::Exists());
EXPECT_FALSE(DefaultValue<const MyNonDefaultConstructible>::Exists());
DefaultValue<int>::Set(1);
DefaultValue<const MyNonDefaultConstructible>::Set(
MyNonDefaultConstructible(42));
EXPECT_EQ(1, DefaultValue<int>::Get());
EXPECT_EQ(42, DefaultValue<const MyNonDefaultConstructible>::Get().value());
EXPECT_TRUE(DefaultValue<int>::Exists());
EXPECT_TRUE(DefaultValue<const MyNonDefaultConstructible>::Exists());
DefaultValue<int>::Clear();
DefaultValue<const MyNonDefaultConstructible>::Clear();
EXPECT_FALSE(DefaultValue<int>::IsSet());
EXPECT_FALSE(DefaultValue<const MyNonDefaultConstructible>::IsSet());
EXPECT_TRUE(DefaultValue<int>::Exists());
EXPECT_FALSE(DefaultValue<const MyNonDefaultConstructible>::Exists());
}
TEST(DefaultValueDeathTest, GetReturnsBuiltInDefaultValueWhenUnset) {
EXPECT_FALSE(DefaultValue<int>::IsSet());
EXPECT_TRUE(DefaultValue<int>::Exists());
EXPECT_FALSE(DefaultValue<MyNonDefaultConstructible>::IsSet());
EXPECT_FALSE(DefaultValue<MyNonDefaultConstructible>::Exists());
EXPECT_EQ(0, DefaultValue<int>::Get());
EXPECT_DEATH_IF_SUPPORTED(
{ DefaultValue<MyNonDefaultConstructible>::Get(); }, "");
}
TEST(DefaultValueTest, GetWorksForMoveOnlyIfSet) {
EXPECT_TRUE(DefaultValue<std::unique_ptr<int>>::Exists());
EXPECT_TRUE(DefaultValue<std::unique_ptr<int>>::Get() == nullptr);
DefaultValue<std::unique_ptr<int>>::SetFactory(
[] { return std::make_unique<int>(42); });
EXPECT_TRUE(DefaultValue<std::unique_ptr<int>>::Exists());
std::unique_ptr<int> i = DefaultValue<std::unique_ptr<int>>::Get();
EXPECT_EQ(42, *i);
}
TEST(DefaultValueTest, GetWorksForVoid) { return DefaultValue<void>::Get(); }
TEST(DefaultValueOfReferenceTest, IsInitiallyUnset) {
EXPECT_FALSE(DefaultValue<int&>::IsSet());
EXPECT_FALSE(DefaultValue<MyDefaultConstructible&>::IsSet());
EXPECT_FALSE(DefaultValue<MyNonDefaultConstructible&>::IsSet());
}
TEST(DefaultValueOfReferenceTest, IsInitiallyNotExisting) {
EXPECT_FALSE(DefaultValue<int&>::Exists());
EXPECT_FALSE(DefaultValue<MyDefaultConstructible&>::Exists());
EXPECT_FALSE(DefaultValue<MyNonDefaultConstructible&>::Exists());
}
TEST(DefaultValueOfReferenceTest, CanBeSetAndUnset) {
int n = 1;
DefaultValue<const int&>::Set(n);
MyNonDefaultConstructible x(42);
DefaultValue<MyNonDefaultConstructible&>::Set(x);
EXPECT_TRUE(DefaultValue<const int&>::Exists());
EXPECT_TRUE(DefaultValue<MyNonDefaultConstructible&>::Exists());
EXPECT_EQ(&n, &(DefaultValue<const int&>::Get()));
EXPECT_EQ(&x, &(DefaultValue<MyNonDefaultConstructible&>::Get()));
DefaultValue<const int&>::Clear();
DefaultValue<MyNonDefaultConstructible&>::Clear();
EXPECT_FALSE(DefaultValue<const int&>::Exists());
EXPECT_FALSE(DefaultValue<MyNonDefaultConstructible&>::Exists());
EXPECT_FALSE(DefaultValue<const int&>::IsSet());
EXPECT_FALSE(DefaultValue<MyNonDefaultConstructible&>::IsSet());
}
TEST(DefaultValueOfReferenceDeathTest, GetReturnsBuiltInDefaultValueWhenUnset) {
EXPECT_FALSE(DefaultValue<int&>::IsSet());
EXPECT_FALSE(DefaultValue<MyNonDefaultConstructible&>::IsSet());
EXPECT_DEATH_IF_SUPPORTED({ DefaultValue<int&>::Get(); }, "");
EXPECT_DEATH_IF_SUPPORTED(
{ DefaultValue<MyNonDefaultConstructible>::Get(); }, "");
}
typedef int MyGlobalFunction(bool, int);
class MyActionImpl : public ActionInterface<MyGlobalFunction> {
public:
int Perform(const std::tuple<bool, int>& args) override {
return std::get<0>(args) ? std::get<1>(args) : 0;
}
};
TEST(ActionInterfaceTest, CanBeImplementedByDefiningPerform) {
MyActionImpl my_action_impl;
(void)my_action_impl;
}
TEST(ActionInterfaceTest, MakeAction) {
Action<MyGlobalFunction> action = MakeAction(new MyActionImpl);
EXPECT_EQ(5, action.Perform(std::make_tuple(true, 5)));
}
TEST(ActionTest, CanBeConstructedFromActionInterface) {
Action<MyGlobalFunction> action(new MyActionImpl);
}
TEST(ActionTest, DelegatesWorkToActionInterface) {
const Action<MyGlobalFunction> action(new MyActionImpl);
EXPECT_EQ(5, action.Perform(std::make_tuple(true, 5)));
EXPECT_EQ(0, action.Perform(std::make_tuple(false, 1)));
}
TEST(ActionTest, IsCopyable) {
Action<MyGlobalFunction> a1(new MyActionImpl);
Action<MyGlobalFunction> a2(a1);
EXPECT_EQ(5, a1.Perform(std::make_tuple(true, 5)));
EXPECT_EQ(0, a1.Perform(std::make_tuple(false, 1)));
EXPECT_EQ(5, a2.Perform(std::make_tuple(true, 5)));
EXPECT_EQ(0, a2.Perform(std::make_tuple(false, 1)));
a2 = a1;
EXPECT_EQ(5, a1.Perform(std::make_tuple(true, 5)));
EXPECT_EQ(0, a1.Perform(std::make_tuple(false, 1)));
EXPECT_EQ(5, a2.Perform(std::make_tuple(true, 5)));
EXPECT_EQ(0, a2.Perform(std::make_tuple(false, 1)));
}
class IsNotZero : public ActionInterface<bool(int)> {
public:
bool Perform(const std::tuple<int>& arg) override {
return std::get<0>(arg) != 0;
}
};
TEST(ActionTest, CanBeConvertedToOtherActionType) {
const Action<bool(int)> a1(new IsNotZero);
const Action<int(char)> a2 = Action<int(char)>(a1);
EXPECT_EQ(1, a2.Perform(std::make_tuple('a')));
EXPECT_EQ(0, a2.Perform(std::make_tuple('\0')));
}
class ReturnSecondArgumentAction {
public:
template <typename Result, typename ArgumentTuple>
Result Perform(const ArgumentTuple& args) {
return std::get<1>(args);
}
};
class ReturnZeroFromNullaryFunctionAction {
public:
template <typename Result>
Result Perform(const std::tuple<>&) const {
return 0;
}
};
PolymorphicAction<ReturnSecondArgumentAction> ReturnSecondArgument() {
return MakePolymorphicAction(ReturnSecondArgumentAction());
}
PolymorphicAction<ReturnZeroFromNullaryFunctionAction>
ReturnZeroFromNullaryFunction() {
return MakePolymorphicAction(ReturnZeroFromNullaryFunctionAction());
}
TEST(MakePolymorphicActionTest, ConstructsActionFromImpl) {
Action<int(bool, int, double)> a1 = ReturnSecondArgument();
EXPECT_EQ(5, a1.Perform(std::make_tuple(false, 5, 2.0)));
}
TEST(MakePolymorphicActionTest, WorksWhenPerformHasOneTemplateParameter) {
Action<int()> a1 = ReturnZeroFromNullaryFunction();
EXPECT_EQ(0, a1.Perform(std::make_tuple()));
Action<void*()> a2 = ReturnZeroFromNullaryFunction();
EXPECT_TRUE(a2.Perform(std::make_tuple()) == nullptr);
}
TEST(ReturnTest, WorksForVoid) {
const Action<void(int)> ret = Return();
return ret.Perform(std::make_tuple(1));
}
TEST(ReturnTest, ReturnsGivenValue) {
Action<int()> ret = Return(1);
EXPECT_EQ(1, ret.Perform(std::make_tuple()));
ret = Return(-5);
EXPECT_EQ(-5, ret.Perform(std::make_tuple()));
}
TEST(ReturnTest, AcceptsStringLiteral) {
Action<const char*()> a1 = Return("Hello");
EXPECT_STREQ("Hello", a1.Perform(std::make_tuple()));
Action<std::string()> a2 = Return("world");
EXPECT_EQ("world", a2.Perform(std::make_tuple()));
}
TEST(ReturnTest, SupportsReferenceLikeReturnType) {
struct Result {
const std::vector<int>* v;
Result(const std::vector<int>& vec) : v(&vec) {}
};
MockFunction<Result()> mock;
EXPECT_CALL(mock, Call)
.WillOnce(Return(std::vector<int>{17, 19, 23}))
.WillRepeatedly(Return(std::vector<int>{29, 31, 37}));
EXPECT_THAT(mock.AsStdFunction()(),
Field(&Result::v, Pointee(ElementsAre(17, 19, 23))));
EXPECT_THAT(mock.AsStdFunction()(),
Field(&Result::v, Pointee(ElementsAre(29, 31, 37))));
}
TEST(ReturnTest, PrefersConversionOperator) {
struct In;
struct Out {
int x;
explicit Out(const int val) : x(val) {}
explicit Out(const In&) : x(0) {}
};
struct In {
operator Out() const { return Out{19}; }
};
EXPECT_THAT([]() -> Out { return In(); }(), Field(&Out::x, 19));
MockFunction<Out()> mock;
EXPECT_CALL(mock, Call).WillOnce(Return(In()));
EXPECT_THAT(mock.AsStdFunction()(), Field(&Out::x, 19));
}
TEST(ReturnTest, ConversionRequiresConstLvalueReference) {
using R = int;
using U = std::reference_wrapper<const int>;
static_assert(std::is_convertible<const R&, U>::value, "");
static_assert(!std::is_convertible<R, U>::value, "");
MockFunction<U()> mock;
EXPECT_CALL(mock, Call).WillOnce(Return(17)).WillRepeatedly(Return(19));
EXPECT_EQ(17, mock.AsStdFunction()());
EXPECT_EQ(19, mock.AsStdFunction()());
}
TEST(ReturnTest, ConversionRequiresMutableLvalueReference) {
struct S {
S(std::string&) {}
};
static_assert(std::is_convertible<std::string&, S>::value, "");
#ifndef _MSC_VER
static_assert(!std::is_convertible<std::string&&, S>::value, "");
#endif
static_assert(!std::is_convertible<const std::string&, S>::value, "");
using RA = decltype(Return(std::string()));
static_assert(!std::is_convertible<RA, Action<S()>>::value, "");
#ifndef _MSC_VER
static_assert(!std::is_convertible<RA, OnceAction<S()>>::value, "");
#endif
}
TEST(ReturnTest, MoveOnlyResultType) {
{
MockFunction<std::unique_ptr<int>()> mock;
EXPECT_CALL(mock, Call)
.WillOnce(Return(std::unique_ptr<int>(new int(17))));
EXPECT_THAT(mock.AsStdFunction()(), Pointee(17));
}
static_assert(!std::is_convertible<decltype(Return(std::unique_ptr<int>())),
Action<std::unique_ptr<int>()>>::value,
"");
}
struct Base {
bool operator==(const Base&) { return true; }
};
struct Derived : public Base {
bool operator==(const Derived&) { return true; }
};
TEST(ReturnTest, IsCovariant) {
Base base;
Derived derived;
Action<Base*()> ret = Return(&base);
EXPECT_EQ(&base, ret.Perform(std::make_tuple()));
ret = Return(&derived);
EXPECT_EQ(&derived, ret.Perform(std::make_tuple()));
}
class FromType {
public:
explicit FromType(bool* is_converted) : converted_(is_converted) {}
bool* converted() const { return converted_; }
private:
bool* const converted_;
};
class ToType {
public:
ToType(const FromType& x) { *x.converted() = true; }
};
TEST(ReturnTest, ConvertsArgumentWhenConverted) {
bool converted = false;
FromType x(&converted);
Action<ToType()> action(Return(x));
EXPECT_TRUE(converted) << "Return must convert its argument in its own "
<< "conversion operator.";
converted = false;
action.Perform(std::tuple<>());
EXPECT_FALSE(converted) << "Action must NOT convert its argument "
<< "when performed.";
}
TEST(ReturnNullTest, WorksInPointerReturningFunction) {
const Action<int*()> a1 = ReturnNull();
EXPECT_TRUE(a1.Perform(std::make_tuple()) == nullptr);
const Action<const char*(bool)> a2 = ReturnNull();
EXPECT_TRUE(a2.Perform(std::make_tuple(true)) == nullptr);
}
TEST(ReturnNullTest, WorksInSmartPointerReturningFunction) {
const Action<std::unique_ptr<const int>()> a1 = ReturnNull();
EXPECT_TRUE(a1.Perform(std::make_tuple()) == nullptr);
const Action<std::shared_ptr<int>(std::string)> a2 = ReturnNull();
EXPECT_TRUE(a2.Perform(std::make_tuple("foo")) == nullptr);
}
TEST(ReturnRefTest, WorksForReference) {
const int n = 0;
const Action<const int&(bool)> ret = ReturnRef(n);
EXPECT_EQ(&n, &ret.Perform(std::make_tuple(true)));
}
TEST(ReturnRefTest, IsCovariant) {
Base base;
Derived derived;
Action<Base&()> a = ReturnRef(base);
EXPECT_EQ(&base, &a.Perform(std::make_tuple()));
a = ReturnRef(derived);
EXPECT_EQ(&derived, &a.Perform(std::make_tuple()));
}
template <typename T, typename = decltype(ReturnRef(std::declval<T&&>()))>
bool CanCallReturnRef(T&&) {
return true;
}
bool CanCallReturnRef(Unused) { return false; }
TEST(ReturnRefTest, WorksForNonTemporary) {
int scalar_value = 123;
EXPECT_TRUE(CanCallReturnRef(scalar_value));
std::string non_scalar_value("ABC");
EXPECT_TRUE(CanCallReturnRef(non_scalar_value));
const int const_scalar_value{321};
EXPECT_TRUE(CanCallReturnRef(const_scalar_value));
const std::string const_non_scalar_value("CBA");
EXPECT_TRUE(CanCallReturnRef(const_non_scalar_value));
}
TEST(ReturnRefTest, DoesNotWorkForTemporary) {
auto scalar_value = []() -> int { return 123; };
EXPECT_FALSE(CanCallReturnRef(scalar_value()));
auto non_scalar_value = []() -> std::string { return "ABC"; };
EXPECT_FALSE(CanCallReturnRef(non_scalar_value()));
EXPECT_FALSE(CanCallReturnRef(static_cast<const int>(321)));
auto const_non_scalar_value = []() -> const std::string { return "CBA"; };
EXPECT_FALSE(CanCallReturnRef(const_non_scalar_value()));
}
TEST(ReturnRefOfCopyTest, WorksForReference) {
int n = 42;
const Action<const int&()> ret = ReturnRefOfCopy(n);
EXPECT_NE(&n, &ret.Perform(std::make_tuple()));
EXPECT_EQ(42, ret.Perform(std::make_tuple()));
n = 43;
EXPECT_NE(&n, &ret.Perform(std::make_tuple()));
EXPECT_EQ(42, ret.Perform(std::make_tuple()));
}
TEST(ReturnRefOfCopyTest, IsCovariant) {
Base base;
Derived derived;
Action<Base&()> a = ReturnRefOfCopy(base);
EXPECT_NE(&base, &a.Perform(std::make_tuple()));
a = ReturnRefOfCopy(derived);
EXPECT_NE(&derived, &a.Perform(std::make_tuple()));
}
TEST(ReturnRoundRobinTest, WorksForInitList) {
Action<int()> ret = ReturnRoundRobin({1, 2, 3});
EXPECT_EQ(1, ret.Perform(std::make_tuple()));
EXPECT_EQ(2, ret.Perform(std::make_tuple()));
EXPECT_EQ(3, ret.Perform(std::make_tuple()));
EXPECT_EQ(1, ret.Perform(std::make_tuple()));
EXPECT_EQ(2, ret.Perform(std::make_tuple()));
EXPECT_EQ(3, ret.Perform(std::make_tuple()));
}
TEST(ReturnRoundRobinTest, WorksForVector) {
std::vector<double> v = {4.4, 5.5, 6.6};
Action<double()> ret = ReturnRoundRobin(v);
EXPECT_EQ(4.4, ret.Perform(std::make_tuple()));
EXPECT_EQ(5.5, ret.Perform(std::make_tuple()));
EXPECT_EQ(6.6, ret.Perform(std::make_tuple()));
EXPECT_EQ(4.4, ret.Perform(std::make_tuple()));
EXPECT_EQ(5.5, ret.Perform(std::make_tuple()));
EXPECT_EQ(6.6, ret.Perform(std::make_tuple()));
}
class MockClass {
public:
MockClass() = default;
MOCK_METHOD1(IntFunc, int(bool flag));
MOCK_METHOD0(Foo, MyNonDefaultConstructible());
MOCK_METHOD0(MakeUnique, std::unique_ptr<int>());
MOCK_METHOD0(MakeUniqueBase, std::unique_ptr<Base>());
MOCK_METHOD0(MakeVectorUnique, std::vector<std::unique_ptr<int>>());
MOCK_METHOD1(TakeUnique, int(std::unique_ptr<int>));
MOCK_METHOD2(TakeUnique,
int(const std::unique_ptr<int>&, std::unique_ptr<int>));
private:
MockClass(const MockClass&) = delete;
MockClass& operator=(const MockClass&) = delete;
};
TEST(DoDefaultTest, ReturnsBuiltInDefaultValueByDefault) {
MockClass mock;
EXPECT_CALL(mock, IntFunc(_)).WillOnce(DoDefault());
EXPECT_EQ(0, mock.IntFunc(true));
}
TEST(DoDefaultDeathTest, DiesForUnknowType) {
MockClass mock;
EXPECT_CALL(mock, Foo()).WillRepeatedly(DoDefault());
#if GTEST_HAS_EXCEPTIONS
EXPECT_ANY_THROW(mock.Foo());
#else
EXPECT_DEATH_IF_SUPPORTED({ mock.Foo(); }, "");
#endif
}
void VoidFunc(bool ) {}
TEST(DoDefaultDeathTest, DiesIfUsedInCompositeAction) {
MockClass mock;
EXPECT_CALL(mock, IntFunc(_))
.WillRepeatedly(DoAll(Invoke(VoidFunc), DoDefault()));
EXPECT_DEATH_IF_SUPPORTED({ mock.IntFunc(true); }, "");
}
TEST(DoDefaultTest, ReturnsUserSpecifiedPerTypeDefaultValueWhenThereIsOne) {
DefaultValue<int>::Set(1);
MockClass mock;
EXPECT_CALL(mock, IntFunc(_)).WillOnce(DoDefault());
EXPECT_EQ(1, mock.IntFunc(false));
DefaultValue<int>::Clear();
}
TEST(DoDefaultTest, DoesWhatOnCallSpecifies) {
MockClass mock;
ON_CALL(mock, IntFunc(_)).WillByDefault(Return(2));
EXPECT_CALL(mock, IntFunc(_)).WillOnce(DoDefault());
EXPECT_EQ(2, mock.IntFunc(false));
}
TEST(DoDefaultTest, CannotBeUsedInOnCall) {
MockClass mock;
EXPECT_NONFATAL_FAILURE(
{
ON_CALL(mock, IntFunc(_)).WillByDefault(DoDefault());
},
"DoDefault() cannot be used in ON_CALL()");
}
TEST(SetArgPointeeTest, SetsTheNthPointee) {
typedef void MyFunction(bool, int*, char*);
Action<MyFunction> a = SetArgPointee<1>(2);
int n = 0;
char ch = '\0';
a.Perform(std::make_tuple(true, &n, &ch));
EXPECT_EQ(2, n);
EXPECT_EQ('\0', ch);
a = SetArgPointee<2>('a');
n = 0;
ch = '\0';
a.Perform(std::make_tuple(true, &n, &ch));
EXPECT_EQ(0, n);
EXPECT_EQ('a', ch);
}
TEST(SetArgPointeeTest, AcceptsStringLiteral) {
typedef void MyFunction(std::string*, const char**);
Action<MyFunction> a = SetArgPointee<0>("hi");
std::string str;
const char* ptr = nullptr;
a.Perform(std::make_tuple(&str, &ptr));
EXPECT_EQ("hi", str);
EXPECT_TRUE(ptr == nullptr);
a = SetArgPointee<1>("world");
str = "";
a.Perform(std::make_tuple(&str, &ptr));
EXPECT_EQ("", str);
EXPECT_STREQ("world", ptr);
}
TEST(SetArgPointeeTest, AcceptsWideStringLiteral) {
typedef void MyFunction(const wchar_t**);
Action<MyFunction> a = SetArgPointee<0>(L"world");
const wchar_t* ptr = nullptr;
a.Perform(std::make_tuple(&ptr));
EXPECT_STREQ(L"world", ptr);
#if GTEST_HAS_STD_WSTRING
typedef void MyStringFunction(std::wstring*);
Action<MyStringFunction> a2 = SetArgPointee<0>(L"world");
std::wstring str = L"";
a2.Perform(std::make_tuple(&str));
EXPECT_EQ(L"world", str);
#endif
}
TEST(SetArgPointeeTest, AcceptsCharPointer) {
typedef void MyFunction(bool, std::string*, const char**);
const char* const hi = "hi";
Action<MyFunction> a = SetArgPointee<1>(hi);
std::string str;
const char* ptr = nullptr;
a.Perform(std::make_tuple(true, &str, &ptr));
EXPECT_EQ("hi", str);
EXPECT_TRUE(ptr == nullptr);
char world_array[] = "world";
char* const world = world_array;
a = SetArgPointee<2>(world);
str = "";
a.Perform(std::make_tuple(true, &str, &ptr));
EXPECT_EQ("", str);
EXPECT_EQ(world, ptr);
}
TEST(SetArgPointeeTest, AcceptsWideCharPointer) {
typedef void MyFunction(bool, const wchar_t**);
const wchar_t* const hi = L"hi";
Action<MyFunction> a = SetArgPointee<1>(hi);
const wchar_t* ptr = nullptr;
a.Perform(std::make_tuple(true, &ptr));
EXPECT_EQ(hi, ptr);
#if GTEST_HAS_STD_WSTRING
typedef void MyStringFunction(bool, std::wstring*);
wchar_t world_array[] = L"world";
wchar_t* const world = world_array;
Action<MyStringFunction> a2 = SetArgPointee<1>(world);
std::wstring str;
a2.Perform(std::make_tuple(true, &str));
EXPECT_EQ(world_array, str);
#endif
}
TEST(SetArgumentPointeeTest, SetsTheNthPointee) {
typedef void MyFunction(bool, int*, char*);
Action<MyFunction> a = SetArgumentPointee<1>(2);
int n = 0;
char ch = '\0';
a.Perform(std::make_tuple(true, &n, &ch));
EXPECT_EQ(2, n);
EXPECT_EQ('\0', ch);
a = SetArgumentPointee<2>('a');
n = 0;
ch = '\0';
a.Perform(std::make_tuple(true, &n, &ch));
EXPECT_EQ(0, n);
EXPECT_EQ('a', ch);
}
int Nullary() { return 1; }
class NullaryFunctor {
public:
int operator()() { return 2; }
};
bool g_done = false;
void VoidNullary() { g_done = true; }
class VoidNullaryFunctor {
public:
void operator()() { g_done = true; }
};
short Short(short n) { return n; }
char Char(char ch) { return ch; }
const char* CharPtr(const char* s) { return s; }
bool Unary(int x) { return x < 0; }
const char* Binary(const char* input, short n) { return input + n; }
void VoidBinary(int, char) { g_done = true; }
int Ternary(int x, char y, short z) { return x + y + z; }
int SumOf4(int a, int b, int c, int d) { return a + b + c + d; }
class Foo {
public:
Foo() : value_(123) {}
int Nullary() const { return value_; }
private:
int value_;
};
TEST(InvokeWithoutArgsTest, Function) {
Action<int(int)> a = InvokeWithoutArgs(Nullary);
EXPECT_EQ(1, a.Perform(std::make_tuple(2)));
Action<int(int, double)> a2 = InvokeWithoutArgs(Nullary);
EXPECT_EQ(1, a2.Perform(std::make_tuple(2, 3.5)));
Action<void(int)> a3 = InvokeWithoutArgs(VoidNullary);
g_done = false;
a3.Perform(std::make_tuple(1));
EXPECT_TRUE(g_done);
}
TEST(InvokeWithoutArgsTest, Functor) {
Action<int()> a = InvokeWithoutArgs(NullaryFunctor());
EXPECT_EQ(2, a.Perform(std::make_tuple()));
Action<int(int, double, char)> a2 =
InvokeWithoutArgs(NullaryFunctor());
EXPECT_EQ(2, a2.Perform(std::make_tuple(3, 3.5, 'a')));
Action<void()> a3 = InvokeWithoutArgs(VoidNullaryFunctor());
g_done = false;
a3.Perform(std::make_tuple());
EXPECT_TRUE(g_done);
}
TEST(InvokeWithoutArgsTest, Method) {
Foo foo;
Action<int(bool, char)> a =
InvokeWithoutArgs(&foo, &Foo::Nullary);
EXPECT_EQ(123, a.Perform(std::make_tuple(true, 'a')));
}
TEST(IgnoreResultTest, PolymorphicAction) {
Action<void(int)> a = IgnoreResult(Return(5));
a.Perform(std::make_tuple(1));
}
int ReturnOne() {
g_done = true;
return 1;
}
TEST(IgnoreResultTest, MonomorphicAction) {
g_done = false;
Action<void()> a = IgnoreResult(Invoke(ReturnOne));
a.Perform(std::make_tuple());
EXPECT_TRUE(g_done);
}
MyNonDefaultConstructible ReturnMyNonDefaultConstructible(double ) {
g_done = true;
return MyNonDefaultConstructible(42);
}
TEST(IgnoreResultTest, ActionReturningClass) {
g_done = false;
Action<void(int)> a =
IgnoreResult(Invoke(ReturnMyNonDefaultConstructible));
a.Perform(std::make_tuple(2));
EXPECT_TRUE(g_done);
}
TEST(AssignTest, Int) {
int x = 0;
Action<void(int)> a = Assign(&x, 5);
a.Perform(std::make_tuple(0));
EXPECT_EQ(5, x);
}
TEST(AssignTest, String) {
::std::string x;
Action<void(void)> a = Assign(&x, "Hello, world");
a.Perform(std::make_tuple());
EXPECT_EQ("Hello, world", x);
}
TEST(AssignTest, CompatibleTypes) {
double x = 0;
Action<void(int)> a = Assign(&x, 5);
a.Perform(std::make_tuple(0));
EXPECT_DOUBLE_EQ(5, x);
}
TEST(DoAll, SupportsRefQualifiedActions) {
struct InitialAction {
void operator()(const int arg) && { EXPECT_EQ(17, arg); }
};
struct FinalAction {
int operator()() && { return 19; }
};
MockFunction<int(int)> mock;
EXPECT_CALL(mock, Call).WillOnce(DoAll(InitialAction{}, FinalAction{}));
EXPECT_EQ(19, mock.AsStdFunction()(17));
}
TEST(DoAll, ProvidesLvalueReferencesToInitialActions) {
struct Obj {};
{
struct InitialAction {
void operator()(Obj&) const { FAIL() << "Unexpected call"; }
void operator()(const Obj&) const {}
void operator()(Obj&&) const { FAIL() << "Unexpected call"; }
void operator()(const Obj&&) const { FAIL() << "Unexpected call"; }
};
MockFunction<void(Obj)> mock;
EXPECT_CALL(mock, Call)
.WillOnce(DoAll(InitialAction{}, InitialAction{}, [](Obj&&) {}))
.WillRepeatedly(DoAll(InitialAction{}, InitialAction{}, [](Obj&&) {}));
mock.AsStdFunction()(Obj{});
mock.AsStdFunction()(Obj{});
}
{
struct InitialAction {
void operator()(Obj&) const { FAIL() << "Unexpected call"; }
void operator()(const Obj&) const {}
void operator()(Obj&&) const { FAIL() << "Unexpected call"; }
void operator()(const Obj&&) const { FAIL() << "Unexpected call"; }
};
MockFunction<void(const Obj&)> mock;
EXPECT_CALL(mock, Call)
.WillOnce(DoAll(InitialAction{}, InitialAction{}, [](const Obj&) {}))
.WillRepeatedly(
DoAll(InitialAction{}, InitialAction{}, [](const Obj&) {}));
mock.AsStdFunction()(Obj{});
mock.AsStdFunction()(Obj{});
}
{
struct InitialAction {
void operator()(Obj&) const {}
void operator()(Obj&&) const { FAIL() << "Unexpected call"; }
};
MockFunction<void(Obj&)> mock;
EXPECT_CALL(mock, Call)
.WillOnce(DoAll(InitialAction{}, InitialAction{}, [](Obj&) {}))
.WillRepeatedly(DoAll(InitialAction{}, InitialAction{}, [](Obj&) {}));
Obj obj;
mock.AsStdFunction()(obj);
mock.AsStdFunction()(obj);
}
{
struct InitialAction {
void operator()(Obj&) const {}
void operator()(Obj&&) const { FAIL() << "Unexpected call"; }
};
MockFunction<void(Obj&&)> mock;
EXPECT_CALL(mock, Call)
.WillOnce(DoAll(InitialAction{}, InitialAction{}, [](Obj&&) {}))
.WillRepeatedly(DoAll(InitialAction{}, InitialAction{}, [](Obj&&) {}));
mock.AsStdFunction()(Obj{});
mock.AsStdFunction()(Obj{});
}
{
struct InitialAction {
void operator()(Obj&) && {}
};
MockFunction<void(Obj&)> mock;
EXPECT_CALL(mock, Call)
.WillOnce(DoAll(InitialAction{}, InitialAction{}, [](Obj&) {}));
Obj obj;
mock.AsStdFunction()(obj);
}
{
struct InitialAction {
void operator()(Obj&) && {}
};
MockFunction<void(Obj&&)> mock;
EXPECT_CALL(mock, Call)
.WillOnce(DoAll(InitialAction{}, InitialAction{}, [](Obj&&) {}));
mock.AsStdFunction()(Obj{});
}
}
TEST(DoAll, SupportsTypeErasedActions) {
const Action<void()> initial_action = [] {};
const Action<int()> final_action = [] { return 17; };
MockFunction<int()> mock;
EXPECT_CALL(mock, Call)
.WillOnce(DoAll(initial_action, initial_action, final_action))
.WillRepeatedly(DoAll(initial_action, initial_action, final_action));
EXPECT_EQ(17, mock.AsStdFunction()());
{
struct FinalAction {
FinalAction() = default;
FinalAction(FinalAction&&) = default;
int operator()() && { return 17; }
};
EXPECT_CALL(mock, Call)
.WillOnce(DoAll(initial_action, initial_action, FinalAction{}));
EXPECT_EQ(17, mock.AsStdFunction()());
}
}
TEST(DoAll, ConvertibleToOnceActionWithUserProvidedActionConversion) {
struct CustomFinal final {
operator Action<int()>() {
return Return(17);
}
operator Action<int(int, char)>() {
return Return(19);
}
};
{
OnceAction<int()> action = DoAll(CustomFinal{});
EXPECT_EQ(17, std::move(action).Call());
}
{
OnceAction<int(int, char)> action = DoAll(CustomFinal{});
EXPECT_EQ(19, std::move(action).Call(0, 0));
}
struct CustomInitial final {
operator Action<void()>() {
return [] {};
}
operator Action<void(int, char)>() {
return [] {};
}
};
{
OnceAction<int()> action = DoAll(CustomInitial{}, CustomFinal{});
EXPECT_EQ(17, std::move(action).Call());
}
{
OnceAction<int(int, char)> action = DoAll(CustomInitial{}, CustomFinal{});
EXPECT_EQ(19, std::move(action).Call(0, 0));
}
}
TEST(WithArgsTest, OneArg) {
Action<bool(double x, int n)> a = WithArgs<1>(Invoke(Unary));
EXPECT_TRUE(a.Perform(std::make_tuple(1.5, -1)));
EXPECT_FALSE(a.Perform(std::make_tuple(1.5, 1)));
}
TEST(WithArgsTest, TwoArgs) {
Action<const char*(const char* s, double x, short n)> a =
WithArgs<0, 2>(Invoke(Binary));
const char s[] = "Hello";
EXPECT_EQ(s + 2, a.Perform(std::make_tuple(CharPtr(s), 0.5, Short(2))));
}
struct ConcatAll {
std::string operator()() const { return {}; }
template <typename... I>
std::string operator()(const char* a, I... i) const {
return a + ConcatAll()(i...);
}
};
TEST(WithArgsTest, TenArgs) {
Action<std::string(const char*, const char*, const char*, const char*)> a =
WithArgs<0, 1, 2, 3, 2, 1, 0, 1, 2, 3>(Invoke(ConcatAll{}));
EXPECT_EQ("0123210123",
a.Perform(std::make_tuple(CharPtr("0"), CharPtr("1"), CharPtr("2"),
CharPtr("3"))));
}
class SubtractAction : public ActionInterface<int(int, int)> {
public:
int Perform(const std::tuple<int, int>& args) override {
return std::get<0>(args) - std::get<1>(args);
}
};
TEST(WithArgsTest, NonInvokeAction) {
Action<int(const std::string&, int, int)> a =
WithArgs<2, 1>(MakeAction(new SubtractAction));
std::tuple<std::string, int, int> dummy =
std::make_tuple(std::string("hi"), 2, 10);
EXPECT_EQ(8, a.Perform(dummy));
}
TEST(WithArgsTest, Identity) {
Action<int(int x, char y, short z)> a =
WithArgs<0, 1, 2>(Invoke(Ternary));
EXPECT_EQ(123, a.Perform(std::make_tuple(100, Char(20), Short(3))));
}
TEST(WithArgsTest, RepeatedArguments) {
Action<int(bool, int m, int n)> a =
WithArgs<1, 1, 1, 1>(Invoke(SumOf4));
EXPECT_EQ(4, a.Perform(std::make_tuple(false, 1, 10)));
}
TEST(WithArgsTest, ReversedArgumentOrder) {
Action<const char*(short n, const char* input)> a =
WithArgs<1, 0>(Invoke(Binary));
const char s[] = "Hello";
EXPECT_EQ(s + 2, a.Perform(std::make_tuple(Short(2), CharPtr(s))));
}
TEST(WithArgsTest, ArgsOfCompatibleTypes) {
Action<long(short x, char y, double z, char c)> a =
WithArgs<0, 1, 3>(Invoke(Ternary));
EXPECT_EQ(123,
a.Perform(std::make_tuple(Short(100), Char(20), 5.6, Char(3))));
}
TEST(WithArgsTest, VoidAction) {
Action<void(double x, char c, int n)> a = WithArgs<2, 1>(Invoke(VoidBinary));
g_done = false;
a.Perform(std::make_tuple(1.5, 'a', 3));
EXPECT_TRUE(g_done);
}
TEST(WithArgsTest, ReturnReference) {
Action<int&(int&, void*)> aa = WithArgs<0>([](int& a) -> int& { return a; });
int i = 0;
const int& res = aa.Perform(std::forward_as_tuple(i, nullptr));
EXPECT_EQ(&i, &res);
}
TEST(WithArgsTest, InnerActionWithConversion) {
Action<Derived*()> inner = [] { return nullptr; };
MockFunction<Base*(double)> mock;
EXPECT_CALL(mock, Call)
.WillOnce(WithoutArgs(inner))
.WillRepeatedly(WithoutArgs(inner));
EXPECT_EQ(nullptr, mock.AsStdFunction()(1.1));
EXPECT_EQ(nullptr, mock.AsStdFunction()(1.1));
}
TEST(WithArgsTest, RefQualifiedInnerAction) {
struct SomeAction {
int operator()(const int arg) && {
EXPECT_EQ(17, arg);
return 19;
}
};
MockFunction<int(int, int)> mock;
EXPECT_CALL(mock, Call).WillOnce(WithArg<1>(SomeAction{}));
EXPECT_EQ(19, mock.AsStdFunction()(0, 17));
}
#ifndef GTEST_OS_WINDOWS_MOBILE
class SetErrnoAndReturnTest : public testing::Test {
protected:
void SetUp() override { errno = 0; }
void TearDown() override { errno = 0; }
};
TEST_F(SetErrnoAndReturnTest, Int) {
Action<int(void)> a = SetErrnoAndReturn(ENOTTY, -5);
EXPECT_EQ(-5, a.Perform(std::make_tuple()));
EXPECT_EQ(ENOTTY, errno);
}
TEST_F(SetErrnoAndReturnTest, Ptr) {
int x;
Action<int*(void)> a = SetErrnoAndReturn(ENOTTY, &x);
EXPECT_EQ(&x, a.Perform(std::make_tuple()));
EXPECT_EQ(ENOTTY, errno);
}
TEST_F(SetErrnoAndReturnTest, CompatibleTypes) {
Action<double()> a = SetErrnoAndReturn(EINVAL, 5);
EXPECT_DOUBLE_EQ(5.0, a.Perform(std::make_tuple()));
EXPECT_EQ(EINVAL, errno);
}
#endif
TEST(ByRefTest, IsCopyable) {
const std::string s1 = "Hi";
const std::string s2 = "Hello";
auto ref_wrapper = ByRef(s1);
const std::string& r1 = ref_wrapper;
EXPECT_EQ(&s1, &r1);
ref_wrapper = ByRef(s2);
const std::string& r2 = ref_wrapper;
EXPECT_EQ(&s2, &r2);
auto ref_wrapper1 = ByRef(s1);
ref_wrapper = ref_wrapper1;
const std::string& r3 = ref_wrapper;
EXPECT_EQ(&s1, &r3);
}
TEST(ByRefTest, ConstValue) {
const int n = 0;
const int& const_ref = ByRef(n);
EXPECT_EQ(&n, &const_ref);
}
TEST(ByRefTest, NonConstValue) {
int n = 0;
int& ref = ByRef(n);
EXPECT_EQ(&n, &ref);
const int& const_ref = ByRef(n);
EXPECT_EQ(&n, &const_ref);
}
TEST(ByRefTest, ExplicitType) {
int n = 0;
const int& r1 = ByRef<const int>(n);
EXPECT_EQ(&n, &r1);
Derived d;
Derived& r2 = ByRef<Derived>(d);
EXPECT_EQ(&d, &r2);
const Derived& r3 = ByRef<const Derived>(d);
EXPECT_EQ(&d, &r3);
Base& r4 = ByRef<Base>(d);
EXPECT_EQ(&d, &r4);
const Base& r5 = ByRef<const Base>(d);
EXPECT_EQ(&d, &r5);
}
TEST(ByRefTest, PrintsCorrectly) {
int n = 42;
::std::stringstream expected, actual;
testing::internal::UniversalPrinter<const int&>::Print(n, &expected);
testing::internal::UniversalPrint(ByRef(n), &actual);
EXPECT_EQ(expected.str(), actual.str());
}
struct UnaryConstructorClass {
explicit UnaryConstructorClass(int v) : value(v) {}
int value;
};
TEST(ReturnNewTest, Unary) {
Action<UnaryConstructorClass*()> a = ReturnNew<UnaryConstructorClass>(4000);
UnaryConstructorClass* c = a.Perform(std::make_tuple());
EXPECT_EQ(4000, c->value);
delete c;
}
TEST(ReturnNewTest, UnaryWorksWhenMockMethodHasArgs) {
Action<UnaryConstructorClass*(bool, int)> a =
ReturnNew<UnaryConstructorClass>(4000);
UnaryConstructorClass* c = a.Perform(std::make_tuple(false, 5));
EXPECT_EQ(4000, c->value);
delete c;
}
TEST(ReturnNewTest, UnaryWorksWhenMockMethodReturnsPointerToConst) {
Action<const UnaryConstructorClass*()> a =
ReturnNew<UnaryConstructorClass>(4000);
const UnaryConstructorClass* c = a.Perform(std::make_tuple());
EXPECT_EQ(4000, c->value);
delete c;
}
class TenArgConstructorClass {
public:
TenArgConstructorClass(int a1, int a2, int a3, int a4, int a5, int a6, int a7,
int a8, int a9, int a10)
: value_(a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8 + a9 + a10) {}
int value_;
};
TEST(ReturnNewTest, ConstructorThatTakes10Arguments) {
Action<TenArgConstructorClass*()> a = ReturnNew<TenArgConstructorClass>(
1000000000, 200000000, 30000000, 4000000, 500000, 60000, 7000, 800, 90,
0);
TenArgConstructorClass* c = a.Perform(std::make_tuple());
EXPECT_EQ(1234567890, c->value_);
delete c;
}
std::unique_ptr<int> UniquePtrSource() { return std::make_unique<int>(19); }
std::vector<std::unique_ptr<int>> VectorUniquePtrSource() {
std::vector<std::unique_ptr<int>> out;
out.emplace_back(new int(7));
return out;
}
TEST(MockMethodTest, CanReturnMoveOnlyValue_Return) {
MockClass mock;
std::unique_ptr<int> i(new int(19));
EXPECT_CALL(mock, MakeUnique()).WillOnce(Return(ByMove(std::move(i))));
EXPECT_CALL(mock, MakeVectorUnique())
.WillOnce(Return(ByMove(VectorUniquePtrSource())));
Derived* d = new Derived;
EXPECT_CALL(mock, MakeUniqueBase())
.WillOnce(Return(ByMove(std::unique_ptr<Derived>(d))));
std::unique_ptr<int> result1 = mock.MakeUnique();
EXPECT_EQ(19, *result1);
std::vector<std::unique_ptr<int>> vresult = mock.MakeVectorUnique();
EXPECT_EQ(1u, vresult.size());
EXPECT_NE(nullptr, vresult[0]);
EXPECT_EQ(7, *vresult[0]);
std::unique_ptr<Base> result2 = mock.MakeUniqueBase();
EXPECT_EQ(d, result2.get());
}
TEST(MockMethodTest, CanReturnMoveOnlyValue_DoAllReturn) {
testing::MockFunction<void()> mock_function;
MockClass mock;
std::unique_ptr<int> i(new int(19));
EXPECT_CALL(mock_function, Call());
EXPECT_CALL(mock, MakeUnique())
.WillOnce(DoAll(InvokeWithoutArgs(&mock_function,
&testing::MockFunction<void()>::Call),
Return(ByMove(std::move(i)))));
std::unique_ptr<int> result1 = mock.MakeUnique();
EXPECT_EQ(19, *result1);
}
TEST(MockMethodTest, CanReturnMoveOnlyValue_Invoke) {
MockClass mock;
DefaultValue<std::unique_ptr<int>>::SetFactory(
[] { return std::make_unique<int>(42); });
EXPECT_EQ(42, *mock.MakeUnique());
EXPECT_CALL(mock, MakeUnique()).WillRepeatedly(Invoke(UniquePtrSource));
EXPECT_CALL(mock, MakeVectorUnique())
.WillRepeatedly(Invoke(VectorUniquePtrSource));
std::unique_ptr<int> result1 = mock.MakeUnique();
EXPECT_EQ(19, *result1);
std::unique_ptr<int> result2 = mock.MakeUnique();
EXPECT_EQ(19, *result2);
EXPECT_NE(result1, result2);
std::vector<std::unique_ptr<int>> vresult = mock.MakeVectorUnique();
EXPECT_EQ(1u, vresult.size());
EXPECT_NE(nullptr, vresult[0]);
EXPECT_EQ(7, *vresult[0]);
}
TEST(MockMethodTest, CanTakeMoveOnlyValue) {
MockClass mock;
auto make = [](int i) { return std::make_unique<int>(i); };
EXPECT_CALL(mock, TakeUnique(_)).WillRepeatedly([](std::unique_ptr<int> i) {
return *i;
});
EXPECT_CALL(mock, TakeUnique(testing::Pointee(7)))
.WillOnce(Return(-7))
.RetiresOnSaturation();
EXPECT_CALL(mock, TakeUnique(testing::IsNull()))
.WillOnce(Return(-1))
.RetiresOnSaturation();
EXPECT_EQ(5, mock.TakeUnique(make(5)));
EXPECT_EQ(-7, mock.TakeUnique(make(7)));
EXPECT_EQ(7, mock.TakeUnique(make(7)));
EXPECT_EQ(7, mock.TakeUnique(make(7)));
EXPECT_EQ(-1, mock.TakeUnique({}));
auto lvalue = make(6);
EXPECT_CALL(mock, TakeUnique(_, _))
.WillOnce([](const std::unique_ptr<int>& i, std::unique_ptr<int> j) {
return *i * *j;
});
EXPECT_EQ(42, mock.TakeUnique(lvalue, make(7)));
std::unique_ptr<int> saved;
EXPECT_CALL(mock, TakeUnique(_)).WillOnce([&saved](std::unique_ptr<int> i) {
saved = std::move(i);
return 0;
});
EXPECT_EQ(0, mock.TakeUnique(make(42)));
EXPECT_EQ(42, *saved);
}
TEST(MockMethodTest, ActionHasRvalueRefQualifiedCallOperator) {
struct Return17 {
int operator()() && { return 17; }
};
{
MockFunction<int()> mock;
EXPECT_CALL(mock, Call).WillOnce(Return17());
EXPECT_EQ(17, mock.AsStdFunction()());
}
{
MockFunction<int(int)> mock;
EXPECT_CALL(mock, Call).WillOnce(Return17());
EXPECT_EQ(17, mock.AsStdFunction()(0));
}
}
TEST(MockMethodTest, ActionHasMultipleCallOperators) {
struct ReturnInt {
int operator()() && { return 17; }
int operator()() const& { return 19; }
};
{
MockFunction<int()> mock;
EXPECT_CALL(mock, Call).WillOnce(ReturnInt()).WillRepeatedly(ReturnInt());
EXPECT_EQ(17, mock.AsStdFunction()());
EXPECT_EQ(19, mock.AsStdFunction()());
EXPECT_EQ(19, mock.AsStdFunction()());
}
{
MockFunction<int(int)> mock;
EXPECT_CALL(mock, Call).WillOnce(ReturnInt()).WillRepeatedly(ReturnInt());
EXPECT_EQ(17, mock.AsStdFunction()(0));
EXPECT_EQ(19, mock.AsStdFunction()(0));
EXPECT_EQ(19, mock.AsStdFunction()(0));
}
}
TEST(MockMethodTest, MoveOnlyAction) {
{
struct Return17 {
Return17() = default;
Return17(Return17&&) = default;
Return17(const Return17&) = delete;
Return17 operator=(const Return17&) = delete;
int operator()() && { return 17; }
};
MockFunction<int()> mock;
EXPECT_CALL(mock, Call).WillOnce(Return17());
EXPECT_EQ(17, mock.AsStdFunction()());
}
{
struct Return17 {
Return17() = default;
Return17(Return17&&) = default;
Return17(const Return17&) = delete;
Return17 operator=(const Return17&) = delete;
int operator()() const { return 17; }
};
MockFunction<int()> mock;
EXPECT_CALL(mock, Call).WillOnce(Return17());
EXPECT_EQ(17, mock.AsStdFunction()());
}
}
TEST(MockMethodTest, ActionReturnsIgnoredValue) {
struct ReturnInt {
int operator()() const { return 0; }
};
MockFunction<void()> mock;
EXPECT_CALL(mock, Call).WillOnce(ReturnInt()).WillRepeatedly(ReturnInt());
mock.AsStdFunction()();
mock.AsStdFunction()();
}
TEST(MockMethodTest, WillOnceCanAcceptLvalueReference) {
MockFunction<int()> mock;
const auto action = [] { return 17; };
EXPECT_CALL(mock, Call).WillOnce(action);
EXPECT_EQ(17, mock.AsStdFunction()());
}
struct StaticAssertSingleArgument {
template <typename... Args>
static constexpr bool CheckArgs() {
static_assert(sizeof...(Args) == 1, "");
return true;
}
template <typename... Args, bool = CheckArgs<Args...>()>
int operator()(Args...) const {
return 17;
}
};
TEST(MockMethodTest, ActionSwallowsAllArguments) {
MockFunction<int(int)> mock;
EXPECT_CALL(mock, Call)
.WillOnce(StaticAssertSingleArgument{})
.WillRepeatedly(StaticAssertSingleArgument{});
EXPECT_EQ(17, mock.AsStdFunction()(0));
EXPECT_EQ(17, mock.AsStdFunction()(0));
}
struct ActionWithTemplatedConversionOperators {
template <typename... Args>
operator OnceAction<int(Args...)>() && {
return [] { return 17; };
}
template <typename... Args>
operator Action<int(Args...)>() const {
return [] { return 19; };
}
};
TEST(MockMethodTest, ActionHasTemplatedConversionOperators) {
MockFunction<int()> mock;
EXPECT_CALL(mock, Call)
.WillOnce(ActionWithTemplatedConversionOperators{})
.WillRepeatedly(ActionWithTemplatedConversionOperators{});
EXPECT_EQ(17, mock.AsStdFunction()());
EXPECT_EQ(19, mock.AsStdFunction()());
}
int Add(int val, int& ref, int* ptr) {
int result = val + ref + *ptr;
ref = 42;
*ptr = 43;
return result;
}
int Deref(std::unique_ptr<int> ptr) { return *ptr; }
struct Double {
template <typename T>
T operator()(T t) {
return 2 * t;
}
};
std::unique_ptr<int> UniqueInt(int i) { return std::make_unique<int>(i); }
TEST(FunctorActionTest, ActionFromFunction) {
Action<int(int, int&, int*)> a = &Add;
int x = 1, y = 2, z = 3;
EXPECT_EQ(6, a.Perform(std::forward_as_tuple(x, y, &z)));
EXPECT_EQ(42, y);
EXPECT_EQ(43, z);
Action<int(std::unique_ptr<int>)> a1 = &Deref;
EXPECT_EQ(7, a1.Perform(std::make_tuple(UniqueInt(7))));
}
TEST(FunctorActionTest, ActionFromLambda) {
Action<int(bool, int)> a1 = [](bool b, int i) { return b ? i : 0; };
EXPECT_EQ(5, a1.Perform(std::make_tuple(true, 5)));
EXPECT_EQ(0, a1.Perform(std::make_tuple(false, 5)));
std::unique_ptr<int> saved;
Action<void(std::unique_ptr<int>)> a2 = [&saved](std::unique_ptr<int> p) {
saved = std::move(p);
};
a2.Perform(std::make_tuple(UniqueInt(5)));
EXPECT_EQ(5, *saved);
}
TEST(FunctorActionTest, PolymorphicFunctor) {
Action<int(int)> ai = Double();
EXPECT_EQ(2, ai.Perform(std::make_tuple(1)));
Action<double(double)> ad = Double();
EXPECT_EQ(3.0, ad.Perform(std::make_tuple(1.5)));
}
TEST(FunctorActionTest, TypeConversion) {
const Action<bool(int)> a1 = [](int i) { return i > 1; };
const Action<int(bool)> a2 = Action<int(bool)>(a1);
EXPECT_EQ(1, a1.Perform(std::make_tuple(42)));
EXPECT_EQ(0, a2.Perform(std::make_tuple(42)));
const Action<bool(std::string)> s1 = [](std::string s) { return !s.empty(); };
const Action<int(const char*)> s2 = Action<int(const char*)>(s1);
EXPECT_EQ(0, s2.Perform(std::make_tuple("")));
EXPECT_EQ(1, s2.Perform(std::make_tuple("hello")));
const Action<bool(std::string)> x1 = [](Unused) { return 42; };
const Action<bool(std::string)> x2 = [] { return 42; };
EXPECT_TRUE(x1.Perform(std::make_tuple("hello")));
EXPECT_TRUE(x2.Perform(std::make_tuple("hello")));
std::function<int()> f = [] { return 7; };
Action<int(int)> d = f;
f = nullptr;
EXPECT_EQ(7, d.Perform(std::make_tuple(1)));
Action<void(int)>(nullptr);
}
TEST(FunctorActionTest, UnusedArguments) {
Action<int(int, double y, double z)> a = [](int i, Unused, Unused) {
return 2 * i;
};
std::tuple<int, double, double> dummy = std::make_tuple(3, 7.3, 9.44);
EXPECT_EQ(6, a.Perform(dummy));
}
TEST(MoveOnlyArgumentsTest, ReturningActions) {
Action<int(std::unique_ptr<int>)> a = Return(1);
EXPECT_EQ(1, a.Perform(std::make_tuple(nullptr)));
a = testing::WithoutArgs([]() { return 7; });
EXPECT_EQ(7, a.Perform(std::make_tuple(nullptr)));
Action<void(std::unique_ptr<int>, int*)> a2 = testing::SetArgPointee<1>(3);
int x = 0;
a2.Perform(std::make_tuple(nullptr, &x));
EXPECT_EQ(x, 3);
}
ACTION(ReturnArity) { return std::tuple_size<args_type>::value; }
TEST(ActionMacro, LargeArity) {
EXPECT_EQ(
1, testing::Action<int(int)>(ReturnArity()).Perform(std::make_tuple(0)));
EXPECT_EQ(
10,
testing::Action<int(int, int, int, int, int, int, int, int, int, int)>(
ReturnArity())
.Perform(std::make_tuple(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)));
EXPECT_EQ(
20,
testing::Action<int(int, int, int, int, int, int, int, int, int, int, int,
int, int, int, int, int, int, int, int, int)>(
ReturnArity())
.Perform(std::make_tuple(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19)));
}
}
}
#if defined(_MSC_VER) && (_MSC_VER == 1900)
GTEST_DISABLE_MSC_WARNINGS_POP_()
#endif
GTEST_DISABLE_MSC_WARNINGS_POP_() | https://github.com/google/googletest/blob/a1e255a582377e1006bb88a408ac3f933ba7c916/googlemock/include/gmock/gmock-actions.h | https://github.com/google/googletest/blob/a1e255a582377e1006bb88a408ac3f933ba7c916/googlemock/test/gmock-actions_test.cc | a1e255a582377e1006bb88a408ac3f933ba7c916 |
3de0f6a1-48f4-438a-b8a4-3ea5085c9e4a | cpp | tensorflow/tensorflow | ring_gatherer | tensorflow/core/common_runtime/ring_gatherer.cc | tensorflow/core/common_runtime/ring_gatherer_test.cc | #include "tensorflow/core/common_runtime/ring_gatherer.h"
#include <stdlib.h>
#include <atomic>
#include <functional>
#include <utility>
#include "tensorflow/core/common_runtime/collective_rma_local.h"
#include "tensorflow/core/common_runtime/collective_util.h"
#include "tensorflow/core/common_runtime/copy_tensor.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/lib/traceme.h"
namespace tensorflow {
Status RingGatherer::InitializeCollectiveParams(CollectiveParams* col_params) {
DCHECK_EQ(col_params->instance.type, GATHER_COLLECTIVE);
DCHECK_EQ(col_params->instance.impl_details.collective_name, "RingGather");
if (!col_params->instance.impl_details.subdiv_offsets.empty() &&
(col_params->instance.impl_details.subdiv_offsets.size() > 1 ||
col_params->instance.impl_details.subdiv_offsets[0] != 0)) {
return errors::InvalidArgument(
"RingGather cannot take any subdiv offset other than 0.");
}
if (col_params->instance.impl_details.subdiv_offsets.empty()) {
col_params->instance.impl_details.subdiv_offsets.push_back(0);
}
return RingAlg::InitializeCollectiveParams(col_params);
}
void RingGatherer::Run(StatusCallback done) {
DCHECK(col_ctx_);
DCHECK(col_params_);
done_ = std::move(done);
group_size_ = col_params_->group.group_size;
num_subdivs_ = static_cast<int>(
col_params_->instance.impl_details.subdiv_permutations.size());
DCHECK_GT(num_subdivs_, 0);
if (VLOG_IS_ON(1)) {
string buf;
for (int r = 0; r < col_params_->group.members.size(); ++r) {
strings::StrAppend(&buf, "dev ", r, " : ",
col_params_->group.members[r].device.name(), "\n");
}
for (int sd = 0;
sd < col_params_->instance.impl_details.subdiv_permutations.size();
++sd) {
strings::StrAppend(&buf, "\nsubdiv ", sd, " perm: ");
for (auto x :
col_params_->instance.impl_details.subdiv_permutations[sd]) {
strings::StrAppend(&buf, x, ", ");
}
}
VLOG(1) << "RingGatherer::Run for device " << col_ctx_->device_name
<< " default_rank " << col_params_->default_rank << "\n"
<< buf;
}
AllocatorAttributes attr = col_ctx_->op_ctx->output_alloc_attr(0);
ca_.reset(MakeCollectiveAdapter(col_ctx_->output, group_size_ * num_subdivs_,
col_ctx_->device->GetAllocator(attr),
false ));
{
tsl::profiler::TraceMe activity("MemCpyAsync",
tsl::profiler::TraceMeLevel::kInfo);
Notification note;
Status status;
Tensor alias_chunk(ca_->ChunkAlias(col_params_->subdiv_rank[0]));
CollectiveRemoteAccessLocal::MemCpyAsync(
col_ctx_->op_ctx->op_device_context(),
col_ctx_->op_ctx->op_device_context(), col_ctx_->device,
col_ctx_->device, col_ctx_->op_ctx->input_alloc_attr(0),
col_ctx_->op_ctx->output_alloc_attr(0), col_ctx_->input, &alias_chunk,
0 , [¬e, &status](const Status& s) {
status.Update(s);
note.Notify();
});
note.WaitForNotification();
if (!status.ok()) {
done_(status);
return;
}
}
Finish(RunAsyncParts());
}
bool RingGatherer::RunAsyncParts() {
rfv_.clear();
rfv_.resize(group_size_ * num_subdivs_);
PCQueue ready_queue;
for (int chunk_idx = 0; chunk_idx < group_size_; ++chunk_idx) {
for (int subdiv_idx = 0; subdiv_idx < num_subdivs_; ++subdiv_idx) {
int rf_index = (chunk_idx * num_subdivs_) + subdiv_idx;
InitRingField(&rfv_[rf_index], chunk_idx, subdiv_idx, rf_index);
ready_queue.Enqueue(&rfv_[rf_index]);
}
}
const DeviceBase::AcceleratorDeviceInfo* gpu_info =
col_ctx_->device->tensorflow_accelerator_device_info();
if (gpu_info) {
tsl::profiler::TraceMe activity("WaitForQueuedEvents",
tsl::profiler::TraceMeLevel::kInfo);
Notification note;
Status s = gpu_info->default_context->ThenExecute(
col_ctx_->device, gpu_info->stream, [¬e]() { note.Notify(); });
if (s.ok()) {
note.WaitForNotification();
} else {
mutex_lock l(status_mu_);
status_ =
errors::Internal("Failed to dispatch ThenExecute in RingGatherer");
return false;
}
}
int field_done_count = 0;
int send_pending_count = 0;
int recv_pending_count = 0;
std::atomic<bool> aborted(false);
{
tsl::profiler::TraceMe activity("Loop", tsl::profiler::TraceMeLevel::kInfo);
while (field_done_count < rfv_.size()) {
VLOG(4) << FieldState();
RingField* rf = ready_queue.Dequeue();
bool dispatched = false;
do {
if (aborted) {
ready_queue.Enqueue(rf);
break;
}
switch (rf->action) {
case RF_INIT:
if (rf->do_recv) {
rf->action = RF_RECV;
auto requeue = [this, rf, &ready_queue, &aborted](Status s) {
if (!s.ok()) {
aborted = true;
StartAbort(s);
}
ready_queue.Enqueue(rf);
};
DispatchRecv(rf, requeue);
dispatched = true;
++recv_pending_count;
} else {
rf->action = RF_SEND_READY;
}
break;
case RF_RECV:
DCHECK_GT(recv_pending_count, 0);
--recv_pending_count;
rf->action = RF_SEND_READY;
break;
case RF_REDUCE:
TF_FALLTHROUGH_INTENDED;
case RF_FINALIZE:
TF_FALLTHROUGH_INTENDED;
case RF_SEND_READY:
if (rf->do_send) {
rf->action = RF_SEND;
auto send_complete = [this, rf, &ready_queue,
&aborted](Status s) {
if (!s.ok()) {
aborted = true;
StartAbort(s);
}
ready_queue.Enqueue(rf);
};
DispatchSend(rf, send_complete);
dispatched = true;
++send_pending_count;
} else {
rf->action = RF_DONE;
}
break;
case RF_SEND:
DCHECK_GT(send_pending_count, 0);
--send_pending_count;
rf->action = RF_DONE;
break;
case RF_DONE:
break;
}
if (rf->action == RF_DONE) {
++field_done_count;
break;
}
} while (!dispatched);
if (aborted) break;
}
if (aborted) {
while ((send_pending_count > 0) || (recv_pending_count > 0)) {
RingField* rf = ready_queue.Dequeue();
switch (rf->action) {
case RF_RECV:
--recv_pending_count;
break;
case RF_SEND:
--send_pending_count;
break;
default: {
}
}
}
}
}
DCHECK_EQ(send_pending_count, 0);
DCHECK_EQ(recv_pending_count, 0);
VLOG(2) << this << " device=" << col_ctx_->device_name << " finish;"
<< " final value " << TensorDebugString(ca_->Value());
return !aborted;
}
namespace {
REGISTER_COLLECTIVE(RingGather, RingGatherer);
}
} | #include "tensorflow/core/common_runtime/ring_gatherer.h"
#include <algorithm>
#include "absl/memory/memory.h"
#include "tensorflow/core/common_runtime/base_collective_executor.h"
#include "tensorflow/core/common_runtime/collective_rma_local.h"
#include "tensorflow/core/common_runtime/collective_test_util.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/device_resolver_local.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/common_runtime/test_collective_executor_mgr.h"
#include "tensorflow/core/common_runtime/threadpool_device.h"
#include "tensorflow/core/framework/collective.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/unbounded_work_queue.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
class RingGathererTest : public ::testing::Test {
protected:
void Init(int num_workers, int num_devices, DataType dtype,
const TensorShape& shape, const DeviceType& device_type,
int num_subdivs, int fail_after) {
test_env_ = CreateCollectiveTestEnv(num_workers, num_devices, device_type);
test_env_->remote_access->set_fail_after(fail_after);
for (int wi = 0; wi < num_workers; ++wi) {
for (int di = 0; di < num_devices; ++di) {
int rank = wi * num_devices + di;
instances_.push_back(std::make_unique<DeviceInstance>(
rank, num_subdivs, dtype, shape, test_env_.get()));
}
}
}
void Gather(int fail_after) {
std::atomic<int> done(0);
for (auto& di : instances_) {
SchedClosure([&di, &done] {
di->DoGather();
++done;
});
if (fail_after > 0) {
Env::Default()->SleepForMicroseconds(100);
}
}
while (done < static_cast<int>(instances_.size())) {
Env::Default()->SleepForMicroseconds(1000);
}
}
template <typename T>
void RunTest(DataType dtype, const DeviceType& device_type, int num_workers,
int num_devices, int num_subdivs, int tensor_len,
int fail_after) {
Init(num_workers, num_devices, dtype, TensorShape({tensor_len}),
device_type, num_subdivs, fail_after);
int32_t output_len = tensor_len * num_workers * num_devices;
std::vector<T> expected(output_len, 0.0);
for (int di = 0; di < static_cast<int>(instances_.size()); ++di) {
int32_t instance_offset = di * tensor_len;
instances_[di]->InitTensor(
[instance_offset, &expected, dtype, di](Tensor* t) {
for (size_t i = 0; i < t->NumElements(); ++i) {
float value = pow(10, static_cast<double>(di)) * i;
if (dtype == DT_INT32 || dtype == DT_INT64) {
value = di * 10 + i;
}
t->flat<T>()(i) = static_cast<T>(value);
expected[instance_offset + i] = value;
}
});
}
Gather(fail_after);
if (fail_after > 0) {
for (int di = 0; di < static_cast<int>(instances_.size()); ++di) {
EXPECT_NE(instances_[di]->status_.message().find("Deliberate failure"),
string::npos);
}
} else {
for (int di = 0; di < static_cast<int>(instances_.size()); ++di) {
TF_EXPECT_OK(instances_[di]->status_);
test::ExpectTensorEqual<T>(test::AsTensor<T>(expected),
instances_[di]->output_tensor());
}
}
}
class DeviceInstance {
public:
DeviceInstance(int rank, int num_subdivs, DataType dtype,
const TensorShape& shape, CollectiveTestEnv* test_env)
: test_env_(test_env), input_tensor_(dtype, shape) {
col_params_ = CreateCollectiveParams(*test_env_, rank, "RingGather",
GATHER_COLLECTIVE, dtype, shape);
if (num_subdivs > 0) {
col_params_->instance.impl_details.subdiv_offsets =
GenerateEvenSubdivOffsets(test_env->num_devices_per_worker,
num_subdivs);
}
string dev_name = col_params_->group.members[rank].device.name();
TF_CHECK_OK(test_env_->device_mgr->LookupDevice(dev_name, &device_))
<< "Couldn't find device " << dev_name
<< " existing devices: " << test_env_->device_mgr->DebugString();
TensorShape output_shape = shape;
output_shape.set_dim(
0, output_shape.dim_size(0) * col_params_->group.group_size);
output_tensor_ = Tensor(dtype, output_shape);
}
void InitTensor(const std::function<void(Tensor*)>& init_f) {
init_f(&input_tensor_);
}
void DoGather() {
status_ = RunCollective(test_env_, col_params_.get(), device_,
&input_tensor_, &output_tensor_);
}
const Tensor& input_tensor() { return input_tensor_; }
const Tensor& output_tensor() { return output_tensor_; }
CollectiveTestEnv* test_env_;
Tensor input_tensor_;
Tensor output_tensor_;
Device* device_;
core::RefCountPtr<CollectiveParams> col_params_;
Status status_;
};
std::unique_ptr<CollectiveTestEnv> test_env_;
std::vector<std::unique_ptr<DeviceInstance>> instances_;
};
class RingGathererInitParamsTest : public ::testing::Test {
protected:
void RunSubdivPermsTest(
CollectiveParams* cp,
const std::vector<std::vector<int>>& expected_subdiv_perms,
const std::vector<int>& expected_subdiv_rank) {
cp->instance.impl_details.subdiv_permutations.clear();
cp->subdiv_rank.clear();
core::RefCountPtr<RingGatherer> gatherer(new RingGatherer());
TF_CHECK_OK(gatherer->InitializeCollectiveParams(cp));
EXPECT_EQ(expected_subdiv_perms,
cp->instance.impl_details.subdiv_permutations);
EXPECT_EQ(expected_subdiv_rank, cp->subdiv_rank);
}
};
TEST_F(RingGathererInitParamsTest, SpecifiedSubdivs) {
const int kNumDevsPerWorker = 8;
const int kNumWorkers = 3;
auto test_env =
CreateCollectiveTestEnv(kNumWorkers, kNumDevsPerWorker, DEVICE_CPU);
auto cp =
CreateCollectiveParams(*test_env, 0, "RingGather",
GATHER_COLLECTIVE, DT_FLOAT, TensorShape({1}));
cp->default_rank = 0;
cp->instance.impl_details.subdiv_offsets = {};
RunSubdivPermsTest(cp.get(),
{{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}},
{0});
cp->instance.impl_details.subdiv_offsets = {0};
RunSubdivPermsTest(cp.get(),
{{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}},
{0});
cp->default_rank = 3;
cp->instance.impl_details.subdiv_offsets = {};
RunSubdivPermsTest(cp.get(),
{{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}},
{3});
}
#define DEF_TEST(B, T, W, D, S, L, A) \
TEST_F(RingGathererTest, \
DaTy##B##_DevTy##T##_Wkr##W##_Dev##D##_Sdiv##S##_Len##L##_Abrt##A) { \
DataType dtype = DT_##B; \
switch (dtype) { \
case DT_FLOAT: { \
RunTest<float>(dtype, DEVICE_##T, W, D, S, L, A); \
} break; \
case DT_DOUBLE: { \
RunTest<double>(dtype, DEVICE_##T, W, D, S, L, A); \
} break; \
case DT_INT32: { \
RunTest<int32>(dtype, DEVICE_##T, W, D, S, L, A); \
} break; \
case DT_INT64: { \
RunTest<int64_t>(dtype, DEVICE_##T, W, D, S, L, A); \
} break; \
default: \
LOG(FATAL) << "Unimplemented"; \
} \
}
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
DEF_TEST(FLOAT, CPU, 1, 2, 1, 1, 0)
DEF_TEST(FLOAT, CPU, 1, 2, 1, 2, 0)
DEF_TEST(FLOAT, CPU, 1, 2, 1, 8, 0)
DEF_TEST(FLOAT, CPU, 1, 2, 1, 16, 0)
DEF_TEST(FLOAT, CPU, 1, 2, 1, 1001, 0)
DEF_TEST(FLOAT, CPU, 2, 4, 1, 128, 0)
DEF_TEST(FLOAT, CPU, 2, 8, 1, 1001, 0)
DEF_TEST(FLOAT, CPU, 2, 8, 1, 4096, 0)
DEF_TEST(FLOAT, CPU, 2, 8, 1, 9408, 0)
DEF_TEST(FLOAT, CPU, 4, 4, 1, 32768, 0)
DEF_TEST(DOUBLE, CPU, 1, 2, 1, 1001, 0)
DEF_TEST(DOUBLE, CPU, 2, 8, 1, 4095, 0)
DEF_TEST(INT32, CPU, 1, 2, 1, 1001, 0)
DEF_TEST(INT32, CPU, 2, 8, 1, 4095, 0)
DEF_TEST(INT64, CPU, 1, 2, 1, 1001, 0)
DEF_TEST(INT64, CPU, 2, 8, 1, 4095, 0)
DEF_TEST(FLOAT, CPU, 2, 8, 1, 9408, 1)
DEF_TEST(FLOAT, CPU, 2, 8, 1, 9408, 7)
DEF_TEST(FLOAT, CPU, 2, 8, 1, 9408, 11)
#endif
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
DEF_TEST(FLOAT, GPU, 1, 2, 1, 1, 0)
DEF_TEST(FLOAT, GPU, 1, 2, 1, 2, 0)
DEF_TEST(FLOAT, GPU, 1, 2, 1, 8, 0)
DEF_TEST(FLOAT, GPU, 1, 2, 1, 16, 0)
DEF_TEST(FLOAT, GPU, 1, 2, 1, 1001, 0)
DEF_TEST(FLOAT, GPU, 1, 8, 1, 1001, 0)
DEF_TEST(FLOAT, GPU, 1, 8, 1, 4096, 0)
DEF_TEST(FLOAT, GPU, 1, 8, 1, 4095, 0)
DEF_TEST(FLOAT, GPU, 1, 8, 1, 32768, 0)
DEF_TEST(FLOAT, GPU, 1, 4, 1, 32768, 0)
DEF_TEST(DOUBLE, GPU, 1, 2, 1, 1001, 0)
DEF_TEST(INT64, GPU, 1, 2, 1, 1001, 0)
DEF_TEST(FLOAT, GPU, 1, 8, 1, 9408, 2)
DEF_TEST(FLOAT, GPU, 1, 8, 1, 9408, 5)
#endif
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/ring_gatherer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/ring_gatherer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
713db5ba-df5e-4ccf-870c-7ebd13752780 | cpp | tensorflow/tensorflow | merge | tensorflow/tools/proto_splitter/merge.cc | tensorflow/tools/proto_splitter/merge_test.cc | #include "tensorflow/tools/proto_splitter/merge.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "riegeli/base/object.h"
#include "riegeli/bytes/fd_reader.h"
#include "riegeli/records/record_reader.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/file_system_helper.h"
#include "tensorflow/tools/proto_splitter/cc/util.h"
#include "tensorflow/tools/proto_splitter/chunk.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace tensorflow::tools::proto_splitter {
using ::tensorflow::proto_splitter::ChunkedField;
using ::tensorflow::proto_splitter::ChunkedMessage;
using ::tensorflow::proto_splitter::ChunkInfo;
using ::tensorflow::proto_splitter::ChunkMetadata;
using ::tensorflow::proto_splitter::FieldIndex;
using tools::proto_splitter::GetChunkMetadata;
using tools::proto_splitter::GetRiegeliReader;
using tools::proto_splitter::OnlyContainsPb;
using tsl::protobuf::FieldDescriptor;
using tsl::protobuf::Message;
using tsl::protobuf::Reflection;
absl::Status Merger::Merge(const std::vector<std::unique_ptr<Message>>& chunks,
const ChunkedMessage& chunked_message,
Message* merged_message) {
riegeli::RecordReader<riegeli::FdReader<>> null_reader{riegeli::kClosed};
if (chunked_message.has_chunk_index()) {
merged_message->MergeFrom(*chunks[chunked_message.chunk_index()].get());
}
for (const auto& chunked_field : chunked_message.chunked_fields()) {
absl::Status s = ProcessField(chunked_field, merged_message, {}, chunks,
null_reader, MergerOp::MERGE);
if (!s.ok()) return s;
}
return absl::OkStatus();
}
absl::Status Merger::Read(std::string prefix, Message* merged_message) {
uint64_t start_time = Env::Default()->NowMicros();
TF_ASSIGN_OR_RETURN(bool only_contains_pb, OnlyContainsPb(prefix));
if (only_contains_pb) {
return ReadPb(absl::StrCat(prefix, ".pb"), merged_message);
}
TF_ASSIGN_OR_RETURN(auto reader,
GetRiegeliReader(absl::StrCat(prefix, ".cpb")));
auto read_metadata = GetChunkMetadata(reader);
if (!read_metadata.ok()) {
reader.Close();
return absl::FailedPreconditionError(
absl::StrCat("Couldn't read ChunkMetadata from chunked proto.\n",
read_metadata.status().ToString()));
}
ChunkMetadata chunk_metadata = read_metadata.value();
std::vector<ChunkInfo> chunks_info = std::vector<ChunkInfo>(
chunk_metadata.chunks().begin(), chunk_metadata.chunks().end());
absl::Status s =
ReadFields(chunk_metadata.message(), reader, chunks_info, merged_message);
reader.Close();
uint64_t end_time = Env::Default()->NowMicros();
LOG(INFO) << "Finished reading and merging chunked proto, took "
<< HumanReadableDuration(end_time - start_time) << ".";
return s;
}
absl::Status Merger::ReadPartial(absl::string_view prefix,
const ChunkMetadata& chunk_metadata,
Message* merged_message) {
uint64_t start_time = Env::Default()->NowMicros();
TF_ASSIGN_OR_RETURN(bool only_contains_pb, OnlyContainsPb(prefix));
if (only_contains_pb) {
return absl::FailedPreconditionError(
absl::StrCat("Attempting to read part of a chunked proto .cpb file, "
"but only found a regular proto: ",
prefix, ".pb"));
}
TF_ASSIGN_OR_RETURN(auto reader,
GetRiegeliReader(absl::StrCat(prefix, ".cpb")));
std::vector<ChunkInfo> chunks_info = std::vector<ChunkInfo>(
chunk_metadata.chunks().begin(), chunk_metadata.chunks().end());
absl::Status s =
ReadFields(chunk_metadata.message(), reader, chunks_info, merged_message);
reader.Close();
uint64_t end_time = Env::Default()->NowMicros();
LOG(INFO) << "Finished reading and merging chunked proto, took "
<< HumanReadableDuration(end_time - start_time) << ".";
return s;
}
absl::Status Merger::ReadPb(const std::string& pb_file,
Message* merged_message) {
uint64_t start_time = Env::Default()->NowMicros();
TF_ASSIGN_OR_RETURN(bool file_exists,
internal::FileExists(Env::Default(), pb_file));
if (!file_exists)
return absl::NotFoundError(absl::StrCat("File not found: ", pb_file));
LOG(INFO) << "Reading binary proto from " << pb_file;
auto ret = ReadBinaryProto(Env::Default(), pb_file, merged_message);
uint64_t end_time = Env::Default()->NowMicros();
LOG(INFO) << "Finished reading binary proto, took "
<< HumanReadableDuration(end_time - start_time) << ".";
return ret;
}
absl::Status Merger::ReadFields(
const ChunkedMessage& chunked_message,
riegeli::RecordReader<riegeli::FdReader<>>& reader,
const std::vector<ChunkInfo>& chunks_info,
tsl::protobuf::Message* merged_message) {
if (chunked_message.has_chunk_index()) {
TF_ASSIGN_OR_RETURN(
std::string chunk,
ReadChunk(reader, chunks_info[chunked_message.chunk_index()]));
if (!merged_message->MergeFromString(chunk)) {
return absl::FailedPreconditionError(
"Couldn't merge chunk into message.");
}
}
std::vector<ChunkedField> chunked_fields(
chunked_message.chunked_fields().begin(),
chunked_message.chunked_fields().end());
absl::Status sort_status = absl::OkStatus();
std::sort(
chunked_fields.begin(), chunked_fields.end(),
[&sort_status](ChunkedField cf1, ChunkedField cf2) {
int tag_depth =
std::min(cf1.field_tag().size(), cf2.field_tag().size());
for (int depth = 0; depth < tag_depth; ++depth) {
FieldIndex tag1 = cf1.field_tag()[depth];
FieldIndex tag2 = cf2.field_tag()[depth];
if (tag1.has_field() && tag2.has_field()) {
uint32_t field1 = tag1.field();
uint32_t field2 = tag2.field();
if (field1 != field2) return field1 < field2;
} else if (tag1.has_index() && tag2.has_index()) {
uint64_t index1 = tag1.index();
uint64_t index2 = tag2.index();
if (index1 != index2) return index1 < index2;
} else if (tag1.has_map_key() && tag2.has_map_key()) {
return false;
} else {
sort_status = absl::FailedPreconditionError("Field tag mismatch");
return false;
}
}
if (cf1.field_tag().size() == cf2.field_tag().size()) {
return cf1.message().chunk_index() < cf2.message().chunk_index();
}
return cf1.field_tag().size() < cf2.field_tag().size();
});
if (!sort_status.ok()) return sort_status;
for (const auto& chunked_field : chunked_fields) {
absl::Status s = ProcessField(chunked_field, merged_message, chunks_info,
{}, reader, MergerOp::READ);
if (!s.ok()) return s;
}
return absl::OkStatus();
}
absl::Status Merger::ProcessField(
const ChunkedField& chunked_field, Message* merged_message,
const std::vector<ChunkInfo>& chunks_info,
const std::vector<std::unique_ptr<Message>>& chunks,
riegeli::RecordReader<riegeli::FdReader<>>& reader, MergerOp op) {
std::string chunk;
switch (op) {
case MergerOp::READ: {
TF_ASSIGN_OR_RETURN(
chunk, ReadChunk(reader,
chunks_info[chunked_field.message().chunk_index()]));
break;
}
case MergerOp::MERGE: {
chunk =
chunks[chunked_field.message().chunk_index()]->SerializeAsString();
break;
}
}
if (chunked_field.field_tag().empty()) {
merged_message->MergeFromString(chunk);
return absl::OkStatus();
}
uint64_t field_index;
Message* curr_message = merged_message;
TF_ASSIGN_OR_RETURN(const std::vector<Field> fields,
GetFieldTypes(chunked_field.field_tag()));
const FieldDescriptor* field_desc = nullptr;
for (const auto& field : fields) {
merged_message = curr_message;
field_desc = merged_message->GetDescriptor()->FindFieldByNumber(
std::get<int>(field.first));
auto res = GetMutableField(merged_message, field);
if (!res.ok()) {
if (!absl::IsNotFound(res.status())) return res.status();
if (field_desc->is_map()) {
TF_RETURN_IF_ERROR(
AddMapEntry(curr_message, field_desc, field.second.value()));
res = GetMutableField(curr_message, field);
} else {
curr_message->GetReflection()->AddMessage(curr_message, field_desc);
res = GetMutableField(curr_message, field);
}
}
auto [parent, mutable_field, mutable_field_index] = res.value();
if (mutable_field->is_repeated() && mutable_field_index != -1) {
field_index = mutable_field_index;
curr_message = parent->GetReflection()->MutableRepeatedMessage(
parent, mutable_field, std::max(0, mutable_field_index));
if (mutable_field->is_map()) {
field_desc = mutable_field->message_type()->FindFieldByNumber(2);
merged_message = curr_message;
curr_message = curr_message->GetReflection()->MutableMessage(
curr_message, field_desc);
}
} else if (mutable_field->type() == FieldDescriptor::Type::TYPE_MESSAGE) {
curr_message =
parent->GetReflection()->MutableMessage(parent, mutable_field);
}
}
const Reflection* reflection = merged_message->GetReflection();
if (field_desc->is_repeated()) {
auto message_callback = [&reflection, &merged_message, &field_index, &op,
&chunks, &chunked_field, &reader, &chunks_info,
&field_desc]() -> absl::Status {
for (int _ = reflection->FieldSize(*merged_message, field_desc);
_ <= field_index; _++) {
reflection->AddMessage(merged_message, field_desc);
}
switch (op) {
case MergerOp::MERGE:
TF_RETURN_IF_ERROR(
Merge(chunks, chunked_field.message(),
reflection->MutableRepeatedMessage(
merged_message, field_desc, field_index)));
break;
case MergerOp::READ:
TF_RETURN_IF_ERROR(
ReadFields(chunked_field.message(), reader, chunks_info,
reflection->MutableRepeatedMessage(
merged_message, field_desc, field_index)));
break;
default:
return absl::InternalError("Encountered unknown MergerOp.");
}
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(SetRepeatedFieldElement(
merged_message, field_desc, field_index, chunk, message_callback));
} else {
auto message_callback = [&reflection, &merged_message, &op, &chunks,
&chunked_field, &reader, &chunks_info,
&field_desc]() -> absl::Status {
switch (op) {
case MergerOp::MERGE:
TF_RETURN_IF_ERROR(
Merge(chunks, chunked_field.message(),
reflection->MutableMessage(merged_message, field_desc)));
break;
case MergerOp::READ:
TF_RETURN_IF_ERROR(ReadFields(
chunked_field.message(), reader, chunks_info,
reflection->MutableMessage(merged_message, field_desc)));
break;
default:
return absl::InternalError("Encountered unknown MergerOp.");
}
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(
SetFieldElement(merged_message, field_desc, chunk, message_callback));
}
return absl::OkStatus();
}
} | #include "tensorflow/tools/proto_splitter/merge.h"
#include <array>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/saved_model.pb.h"
#include "tensorflow/tools/proto_splitter/cc/test_util.h"
#include "tensorflow/tools/proto_splitter/cc/util.h"
#include "tensorflow/tools/proto_splitter/chunk.pb.h"
#include "tensorflow/tools/proto_splitter/testdata/test_message.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace tensorflow::tools::proto_splitter {
namespace {
inline constexpr std::array kDFSplitTreeChunks = {
"val: \"0\"", "val: \"010\"", "val: \"01020\"",
"val: \"0102030\"", "val: \"0102031\"", "val: \"0102032\"",
"val: \"01021\"", "val: \"0102130\"", "val: \"0102131\"",
"val: \"0102132\""};
inline constexpr std::array kBFSplitTreeChunks = {
"val: \"0\"", "val: \"010\"", "val: \"01020\"",
"val: \"01021\"", "val: \"0102030\"", "val: \"0102031\"",
"val: \"0102032\"", "val: \"0102130\"", "val: \"0102131\"",
"val: \"0102132\""};
TEST(MergeTest, TestReadRiegeliTreeDepthFirst) {
const std::string cpb_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "df-split-tree");
::tensorflow::proto_splitter_testdata::StringNode merged_tree;
TF_ASSERT_OK(Merger::Read(cpb_path, &merged_tree));
const std::string pbtxt_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "split-tree");
::tensorflow::proto_splitter_testdata::StringNode test_proto;
TF_ASSERT_OK(tsl::ReadTextProto(
tsl::Env::Default(), absl::StrCat(pbtxt_path, ".pbtxt"), &test_proto));
ASSERT_THAT(merged_tree, EqualsProto(test_proto));
}
TEST(MergeTest, TestReadRiegeliTreeBreadthFirst) {
const std::string cpb_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "bf-split-tree");
::tensorflow::proto_splitter_testdata::StringNode merged_tree;
TF_ASSERT_OK(Merger::Read(cpb_path, &merged_tree));
const std::string pbtxt_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "split-tree");
::tensorflow::proto_splitter_testdata::StringNode test_proto;
TF_ASSERT_OK(tsl::ReadTextProto(
tsl::Env::Default(), absl::StrCat(pbtxt_path, ".pbtxt"), &test_proto));
ASSERT_THAT(merged_tree, EqualsProto(test_proto));
}
TEST(MergeTest, TestMergeTreeChunksDepthFirst) {
const std::string path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "df-split-tree");
std::vector<std::unique_ptr<::tsl::protobuf::Message>> chunks;
for (const auto& chunk : kDFSplitTreeChunks) {
::tensorflow::proto_splitter_testdata::StringNode string_node;
::tsl::protobuf::TextFormat::ParseFromString(chunk, &string_node);
std::unique_ptr<::tsl::protobuf::Message> node =
std::make_unique<::tensorflow::proto_splitter_testdata::StringNode>(
string_node);
chunks.push_back(std::move(node));
}
std::string split_tree_metadata;
TF_ASSERT_OK(tsl::ReadFileToString(
tsl::Env::Default(), absl::StrCat(path, ".pbtxt"), &split_tree_metadata));
::tensorflow::proto_splitter::ChunkedMessage chunked_message;
::tsl::protobuf::TextFormat::ParseFromString(split_tree_metadata,
&chunked_message);
::tensorflow::proto_splitter_testdata::StringNode merged_tree;
TF_ASSERT_OK(Merger::Merge(chunks, chunked_message, &merged_tree));
const std::string pbtxt_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "split-tree");
::tensorflow::proto_splitter_testdata::StringNode test_proto;
TF_ASSERT_OK(tsl::ReadTextProto(
tsl::Env::Default(), absl::StrCat(pbtxt_path, ".pbtxt"), &test_proto));
ASSERT_THAT(merged_tree, EqualsProto(test_proto));
}
TEST(MergeTest, TestMergeTreeChunksBreadthFirst) {
const std::string path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "bf-split-tree");
std::vector<std::unique_ptr<::tsl::protobuf::Message>> chunks;
for (const auto& chunk : kBFSplitTreeChunks) {
::tensorflow::proto_splitter_testdata::StringNode string_node;
::tsl::protobuf::TextFormat::ParseFromString(chunk, &string_node);
std::unique_ptr<::tsl::protobuf::Message> node =
std::make_unique<::tensorflow::proto_splitter_testdata::StringNode>(
string_node);
chunks.push_back(std::move(node));
}
std::string split_tree_metadata;
TF_ASSERT_OK(tsl::ReadFileToString(
tsl::Env::Default(), absl::StrCat(path, ".pbtxt"), &split_tree_metadata));
::tensorflow::proto_splitter::ChunkedMessage chunked_message;
::tsl::protobuf::TextFormat::ParseFromString(split_tree_metadata,
&chunked_message);
::tensorflow::proto_splitter_testdata::StringNode merged_tree;
TF_ASSERT_OK(Merger::Merge(chunks, chunked_message, &merged_tree));
const std::string pbtxt_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "split-tree");
::tensorflow::proto_splitter_testdata::StringNode test_proto;
TF_ASSERT_OK(tsl::ReadTextProto(
tsl::Env::Default(), absl::StrCat(pbtxt_path, ".pbtxt"), &test_proto));
ASSERT_THAT(merged_tree, EqualsProto(test_proto));
}
TEST(MergeTest, TestReadGraphDefLotsNodes) {
const std::string path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "split-lots-nodes");
GraphDef merged_graph_def;
TF_ASSERT_OK(Merger::Read(path, &merged_graph_def));
GraphDef test_graph_def;
TF_ASSERT_OK(tsl::ReadTextProto(
tsl::Env::Default(), absl::StrCat(path, ".pbtxt"), &test_graph_def));
ASSERT_THAT(merged_graph_def, EqualsProto(test_graph_def));
}
TEST(MergeTest, TestReadGraphDefLargeNodes) {
const std::string path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "split-large-nodes");
GraphDef merged_graph_def;
TF_ASSERT_OK(Merger::Read(path, &merged_graph_def));
GraphDef test_graph_def;
TF_ASSERT_OK(tsl::ReadTextProto(
tsl::Env::Default(), absl::StrCat(path, ".pbtxt"), &test_graph_def));
ASSERT_THAT(merged_graph_def, EqualsProto(test_graph_def));
}
TEST(MergeTest, TestReadGraphDefLargeConstant) {
const std::string path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "split-large-constant");
GraphDef merged_graph_def;
TF_ASSERT_OK(Merger::Read(path, &merged_graph_def));
GraphDef test_graph_def;
TF_ASSERT_OK(tsl::ReadTextProto(
tsl::Env::Default(), absl::StrCat(path, ".pbtxt"), &test_graph_def));
ASSERT_THAT(merged_graph_def, EqualsProto(test_graph_def));
}
TEST(MergeTest, TestReadManyField) {
const std::string path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "many-field");
::tensorflow::proto_splitter_testdata::ManyFields merged_many_field;
TF_ASSERT_OK(Merger::Read(path, &merged_many_field));
::tensorflow::proto_splitter_testdata::ManyFields test_many_field;
TF_ASSERT_OK(tsl::ReadTextProto(
tsl::Env::Default(), absl::StrCat(path, ".pbtxt"), &test_many_field));
ASSERT_THAT(merged_many_field, EqualsProto(test_many_field));
}
TEST(MergeTest, TestReadSavedModel) {
const std::string path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "split-standard");
SavedModel merged_saved_model;
TF_ASSERT_OK(Merger::Read(path, &merged_saved_model));
SavedModel test_saved_model;
TF_ASSERT_OK(tsl::ReadTextProto(
tsl::Env::Default(), absl::StrCat(path, ".pbtxt"), &test_saved_model));
ASSERT_THAT(merged_saved_model, EqualsProto(test_saved_model));
}
TEST(MergeTest, TestReadChunkedModel) {
const std::string path =
io::JoinPath(testing::TensorFlowSrcRoot(), "cc/saved_model/testdata",
"chunked_saved_model/chunked_model/saved_model");
SavedModel merged_saved_model;
TF_ASSERT_OK(Merger::Read(path, &merged_saved_model));
SavedModel test_saved_model;
TF_ASSERT_OK(tsl::ReadTextProto(
tsl::Env::Default(), absl::StrCat(path, ".pbtxt"), &test_saved_model));
ASSERT_THAT(merged_saved_model, EqualsProto(test_saved_model));
}
TEST(MergeTest, TestReadPartial) {
const std::string path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "many-field");
TF_ASSERT_OK_AND_ASSIGN(auto reader, tools::proto_splitter::GetRiegeliReader(
absl::StrCat(path, ".cpb")));
auto read_metadata = GetChunkMetadata(reader);
if (!read_metadata.ok()) {
reader.Close();
TF_ASSERT_OK(read_metadata.status());
}
::tensorflow::proto_splitter::ChunkMetadata chunk_metadata =
read_metadata.value();
::tensorflow::proto_splitter::ChunkMetadata partial_chunk_metadata;
partial_chunk_metadata.mutable_chunks()->CopyFrom(chunk_metadata.chunks());
partial_chunk_metadata.mutable_message()->set_chunk_index(
chunk_metadata.message().chunk_index());
proto_splitter_testdata::ManyFields merged_many_fields;
TF_ASSERT_OK(
Merger::ReadPartial(path, partial_chunk_metadata, &merged_many_fields));
ASSERT_THAT(merged_many_fields, EqualsProto(R"pb(
map_field_int64 { key: -1345 value: "map_value_-1345" }
)pb"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/proto_splitter/merge.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/proto_splitter/merge_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3c7cc377-e95e-41fa-9b3f-d5f718e8e8de | cpp | tensorflow/tensorflow | kernel_thunk | third_party/xla/xla/service/gpu/runtime/kernel_thunk.cc | third_party/xla/xla/backends/cpu/runtime/kernel_thunk_test.cc | #include "xla/service/gpu/runtime/kernel_thunk.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/kernel_arguments.h"
#include "xla/service/gpu/kernels/custom_kernel.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/stream_executor.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
KernelThunk::KernelThunk(const HloInstruction* instr, std::string kernel_name,
absl::Span<const KernelArgument> kernel_arguments,
LaunchDimensions launch_dimensions,
std::optional<se::ClusterDim> cluster_dim,
int64_t shmem_bytes)
: Thunk(Kind::kKernel, Thunk::ThunkInfo::WithProfileAnnotation(instr)),
kernel_name_(std::move(kernel_name)),
launch_dimensions_(std::move(launch_dimensions)),
cluster_dim_(std::move(cluster_dim)),
shmem_bytes_(shmem_bytes) {
args_.reserve(kernel_arguments.size());
written_.reserve(kernel_arguments.size());
for (const auto& kernel_argument : kernel_arguments) {
if (!kernel_argument.first_with_same_slice().has_value()) {
args_.push_back(kernel_argument.slice());
written_.push_back(kernel_argument.written());
}
}
}
std::string KernelThunk::ToString(int indent) const {
return absl::StrFormat(
", kernel = %s, launch dimensions = %s, cluster_dim = %s", kernel_name_,
launch_dimensions_.ToString(),
cluster_dim_.has_value() ? cluster_dim_->ToString() : "nullopt");
}
absl::Status KernelThunk::Initialize(const InitializeParams& params) {
absl::MutexLock lock(&mutex_);
auto it = kernel_cache_.find(params.executor);
if (kernel_cache_.end() == it) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<se::Kernel> kernel,
CreateKernel(kernel_name_, args_.size(), params.src.text,
params.src.binary, params.executor, shmem_bytes_));
kernel_cache_.emplace(params.executor, std::move(kernel));
}
return absl::OkStatus();
}
static void PrintBufferContents(
se::Stream* stream, absl::Span<const se::DeviceMemoryBase> buffer_args) {
int input_idx = 0;
for (const se::DeviceMemoryBase& buf : buffer_args) {
auto host_buffer = std::make_unique<char[]>(buf.size());
CHECK_OK(stream->Memcpy(host_buffer.get(), buf, buf.size()));
CHECK_OK(stream->BlockHostUntilDone());
std::string buffer_contents;
for (int i = 0; i < buf.size(); i++) {
absl::StrAppendFormat(&buffer_contents, "%x ",
static_cast<unsigned>(host_buffer[i]));
}
VLOG(100) << "BUF(" << input_idx++ << ") = " << buffer_contents;
}
}
absl::Status KernelThunk::ExecuteOnStream(const ExecuteParams& params) {
se::StreamExecutor* executor = params.stream->parent();
LaunchDimensions launch_dimensions;
std::optional<se::ClusterDim> cluster_dim;
const se::Kernel* kernel = nullptr;
TF_ASSIGN_OR_RETURN(
se::Stream * stream,
GetStreamForExecution(Thunk::execution_stream_id(), params));
{
absl::MutexLock lock(&mutex_);
auto it = kernel_cache_.find(executor);
CHECK(it != kernel_cache_.end())
<< "Initialize() not called for StreamExecutor " << executor;
launch_dimensions = launch_dimensions_;
cluster_dim = cluster_dim_;
kernel = it->second.get();
}
VLOG(3) << "Launching " << kernel->name();
absl::InlinedVector<se::DeviceMemoryBase, 4> buffer_args;
for (const BufferAllocation::Slice& arg : args_) {
se::DeviceMemoryBase buf = params.buffer_allocations->GetDeviceAddress(arg);
VLOG(3) << " Arg: alloc #" << arg.index() << ", offset: " << arg.offset()
<< ": " << buf.opaque() << " (" << buf.size() << "B)";
buffer_args.push_back(buf);
}
if (VLOG_IS_ON(100)) {
PrintBufferContents(stream, buffer_args);
}
if (cluster_dim.has_value()) {
return ExecuteKernelOnStream(*kernel, buffer_args, launch_dimensions,
cluster_dim.value(), stream);
} else {
return ExecuteKernelOnStream(*kernel, buffer_args, launch_dimensions,
stream);
}
}
CustomKernelThunk::CustomKernelThunk(
const HloInstruction* instr, CustomKernel custom_kernel,
absl::Span<const KernelArgument> kernel_arguments)
: Thunk(Kind::kCustomKernel,
Thunk::ThunkInfo::WithProfileAnnotation(instr)),
custom_kernel_(std::move(custom_kernel)) {
args_.reserve(kernel_arguments.size());
written_.reserve(kernel_arguments.size());
for (const auto& kernel_argument : kernel_arguments) {
if (!kernel_argument.first_with_same_slice().has_value()) {
args_.push_back(kernel_argument.slice());
written_.push_back(kernel_argument.written());
}
}
}
std::string CustomKernelThunk::ToString(int indent) const {
return custom_kernel_.ToString();
}
absl::Status CustomKernelThunk::Initialize(const InitializeParams& params) {
absl::MutexLock lock(&mutex_);
auto it = kernel_cache_.find(params.executor);
if (kernel_cache_.end() == it) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<se::Kernel> kernel,
params.executor->LoadKernel(custom_kernel_.kernel_spec()));
kernel_cache_.emplace(params.executor, std::move(kernel));
}
return absl::OkStatus();
}
absl::Status CustomKernelThunk::ExecuteOnStream(const ExecuteParams& params) {
se::StreamExecutor* executor = params.stream->parent();
const se::Kernel* kernel = [&] {
absl::MutexLock lock(&mutex_);
return kernel_cache_[executor].get();
}();
VLOG(3) << "Launching " << custom_kernel_.ToString() << " as device kernel "
<< kernel->name();
absl::InlinedVector<se::DeviceMemoryBase, 4> buffer_args;
for (const BufferAllocation::Slice& arg : args_) {
se::DeviceMemoryBase buf = params.buffer_allocations->GetDeviceAddress(arg);
VLOG(3) << " Arg: alloc #" << arg.index() << ", offset: " << arg.offset()
<< ": " << buf.opaque() << " (" << buf.size() << "B)";
buffer_args.push_back(buf);
}
if (VLOG_IS_ON(100)) {
PrintBufferContents(params.stream, buffer_args);
}
se::KernelArgsDeviceMemoryArray args(buffer_args,
custom_kernel_.shared_memory_bytes());
if (auto cluster = custom_kernel_.cluster_dims(); cluster.has_value()) {
return params.stream->Launch(custom_kernel_.thread_dims(),
custom_kernel_.block_dims(), *cluster, *kernel,
args);
} else {
return params.stream->Launch(custom_kernel_.thread_dims(),
custom_kernel_.block_dims(), *kernel, args);
}
}
}
} | #include "xla/backends/cpu/runtime/kernel_thunk.h"
#include <cstddef>
#include <cstdint>
#include <string_view>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "xla/backends/cpu/runtime/buffer_allocations.h"
#include "xla/backends/cpu/runtime/thunk.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/host/host_kernel_c_api.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
class AddF32HostKernel : public Thunk::FunctionRegistry {
public:
absl::StatusOr<Kernel> FindKernel(std::string_view name) override {
return +[](const SE_HOST_KernelCallFrame* call_frame) {
const SE_HOST_KernelArg& in = call_frame->args[0];
const SE_HOST_KernelArg& out = call_frame->args[1];
float* in_ptr = reinterpret_cast<float*>(in.data);
float* out_ptr = reinterpret_cast<float*>(out.data);
uint64_t i = call_frame->thread->x;
*(out_ptr + i) = *(in_ptr + i) + *(in_ptr + i);
return static_cast<SE_HOST_KernelError*>(nullptr);
};
}
};
TEST(KernelThunkTest, CheckAlignment) {
auto thunk =
KernelThunk::Create({"test"}, {}, {}, "test", se::ThreadDim(), {},
3);
EXPECT_TRUE(absl::StrContains(thunk.status().message(),
"minimum alignment 3 is not a power of 2"));
}
TEST(KernelThunkTest, AddF32) {
std::vector<MaybeOwningDeviceMemory> buffers;
std::vector<float> in = {1.0, 2.0, 3.0, 4.0};
std::vector<float> out(4, 0.0);
size_t size_in_bytes = in.size() * sizeof(float);
buffers.emplace_back(se::DeviceMemoryBase(in.data(), size_in_bytes));
buffers.emplace_back(se::DeviceMemoryBase(out.data(), size_in_bytes));
BufferAllocations allocations(buffers);
BufferAllocation in_alloc(0, size_in_bytes, 0);
BufferAllocation out_alloc(1, size_in_bytes, 0);
BufferAllocation::Slice in_slice(&in_alloc, 0, size_in_bytes);
BufferAllocation::Slice out_slice(&out_alloc, 0, size_in_bytes);
TF_ASSERT_OK_AND_ASSIGN(
auto thunk,
KernelThunk::Create({"add_f32"}, {in_slice}, {out_slice}, "add_f32",
se::ThreadDim(4), {0}));
AddF32HostKernel host_kernels;
Thunk::ExecuteParams params = {&host_kernels, &allocations};
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError()) << execute_event.GetError();
std::vector<float> expected = {2.0, 4.0, 6.0, 8.0};
EXPECT_EQ(out, expected);
}
TEST(KernelThunkTest, AddF32Inline) {
std::vector<MaybeOwningDeviceMemory> buffers;
std::vector<float> in_out = {1.0, 2.0, 3.0, 4.0};
size_t size_in_bytes = in_out.size() * sizeof(float);
buffers.emplace_back(se::DeviceMemoryBase(in_out.data(), size_in_bytes));
BufferAllocations allocations(buffers);
BufferAllocation in_out_alloc(0, size_in_bytes, 0);
BufferAllocation::Slice in_out_slice(&in_out_alloc, 0, size_in_bytes);
TF_ASSERT_OK_AND_ASSIGN(
auto thunk, KernelThunk::Create(
{"add_f32"}, {in_out_slice}, {in_out_slice}, "add_f32",
se::ThreadDim(4), {}));
AddF32HostKernel host_kernels;
Thunk::ExecuteParams params = {&host_kernels, &allocations};
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError());
std::vector<float> expected = {2.0, 4.0, 6.0, 8.0};
EXPECT_EQ(in_out, expected);
}
TEST(KernelThunkInvariantBuffersTest, MissingBufferSlice) {
#ifdef NDEBUG
GTEST_SKIP() << "Invariant buffers check is disabled in optimized build.";
#endif
std::vector<MaybeOwningDeviceMemory> buffers;
std::vector<float> in = {1.0, 2.0, 3.0, 4.0};
std::vector<float> out(4, 0.0);
size_t size_in_bytes = in.size() * sizeof(float);
buffers.emplace_back(se::DeviceMemoryBase(in.data(), size_in_bytes));
buffers.emplace_back(se::DeviceMemoryBase(out.data(), size_in_bytes));
BufferAllocations allocations(buffers);
BufferAllocation in_alloc(0, size_in_bytes, 0);
BufferAllocation out_alloc(1, size_in_bytes, 0);
BufferAllocation::Slice in_slice(&in_alloc, 0, size_in_bytes);
BufferAllocation::Slice out_slice(&out_alloc, 0, size_in_bytes);
TF_ASSERT_OK_AND_ASSIGN(
auto thunk,
KernelThunk::Create({"add_f32"}, {in_slice}, {out_slice}, "add_f32",
se::ThreadDim(4), {}));
AddF32HostKernel host_kernels;
Thunk::ExecuteParams params = {&host_kernels, &allocations};
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_TRUE(execute_event.IsError());
auto status = execute_event.GetError();
EXPECT_EQ(status.code(), absl::StatusCode::kInternal);
EXPECT_TRUE(absl::StrContains(status.message(),
"Mismatch in invariant buffers metadata"));
}
TEST(KernelThunkInvariantBuffersTest, ExtraInputOutputBufferSlice) {
#ifdef NDEBUG
GTEST_SKIP() << "Invariant buffers check is disabled in optimized build.";
#endif
std::vector<MaybeOwningDeviceMemory> buffers;
std::vector<float> in_out = {1.0, 2.0, 3.0, 4.0};
size_t size_in_bytes = in_out.size() * sizeof(float);
buffers.emplace_back(se::DeviceMemoryBase(in_out.data(), size_in_bytes));
BufferAllocations allocations(buffers);
BufferAllocation in_out_alloc(0, size_in_bytes, 0);
BufferAllocation::Slice in_out_slice(&in_out_alloc, 0, size_in_bytes);
TF_ASSERT_OK_AND_ASSIGN(
auto thunk, KernelThunk::Create(
{"add_f32"}, {in_out_slice}, {in_out_slice}, "add_f32",
se::ThreadDim(4), {0}));
AddF32HostKernel host_kernels;
Thunk::ExecuteParams params = {&host_kernels, &allocations};
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_TRUE(execute_event.IsError());
auto status = execute_event.GetError();
EXPECT_EQ(status.code(), absl::StatusCode::kInternal);
EXPECT_TRUE(absl::StrContains(status.message(),
"Mismatch in invariant buffers metadata"));
}
TEST(KernelThunkInvariantBuffersTest,
MemorySectionIncorrectlyMarkedAsInvariant) {
#ifdef NDEBUG
GTEST_SKIP() << "Invariant buffers check is disabled in optimized build.";
#endif
std::vector<MaybeOwningDeviceMemory> buffers;
std::vector<float> in_out = {1.0, 2.0, 3.0, 4.0};
size_t size_in_bytes = in_out.size() * sizeof(float);
buffers.emplace_back(se::DeviceMemoryBase(in_out.data(), size_in_bytes));
buffers.emplace_back(se::DeviceMemoryBase(in_out.data(), size_in_bytes));
BufferAllocations allocations(buffers);
BufferAllocation in_0_alloc(0, size_in_bytes, 0);
BufferAllocation in_1_alloc(1, size_in_bytes, 0);
BufferAllocation::Slice in_0_slice(&in_0_alloc, 0, size_in_bytes);
BufferAllocation::Slice in_1_slice(&in_1_alloc, 0, size_in_bytes);
TF_ASSERT_OK_AND_ASSIGN(
auto thunk, KernelThunk::Create({"add_f32"}, {in_0_slice, in_1_slice},
{in_0_slice}, "add_f32", se::ThreadDim(4),
{1}));
AddF32HostKernel host_kernels;
Thunk::ExecuteParams params = {&host_kernels, &allocations};
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_TRUE(execute_event.IsError());
auto status = execute_event.GetError();
EXPECT_EQ(status.code(), absl::StatusCode::kInternal);
EXPECT_TRUE(absl::StrContains(status.message(),
"Mismatch in invariant buffers metadata"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime/kernel_thunk.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/kernel_thunk_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4e0595d1-6ba2-4742-adc2-e2b72de011da | cpp | abseil/abseil-cpp | statusor | absl/status/statusor.cc | absl/status/statusor_test.cc | #include "absl/status/statusor.h"
#include <cstdlib>
#include <utility>
#include "absl/base/call_once.h"
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/nullability.h"
#include "absl/status/internal/statusor_internal.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
BadStatusOrAccess::BadStatusOrAccess(absl::Status status)
: status_(std::move(status)) {}
BadStatusOrAccess::BadStatusOrAccess(const BadStatusOrAccess& other)
: status_(other.status_) {}
BadStatusOrAccess& BadStatusOrAccess::operator=(
const BadStatusOrAccess& other) {
other.InitWhat();
status_ = other.status_;
what_ = other.what_;
return *this;
}
BadStatusOrAccess& BadStatusOrAccess::operator=(BadStatusOrAccess&& other) {
other.InitWhat();
status_ = std::move(other.status_);
what_ = std::move(other.what_);
return *this;
}
BadStatusOrAccess::BadStatusOrAccess(BadStatusOrAccess&& other)
: status_(std::move(other.status_)) {}
absl::Nonnull<const char*> BadStatusOrAccess::what() const noexcept {
InitWhat();
return what_.c_str();
}
const absl::Status& BadStatusOrAccess::status() const { return status_; }
void BadStatusOrAccess::InitWhat() const {
absl::call_once(init_what_, [this] {
what_ = absl::StrCat("Bad StatusOr access: ", status_.ToString());
});
}
namespace internal_statusor {
void Helper::HandleInvalidStatusCtorArg(absl::Nonnull<absl::Status*> status) {
const char* kMessage =
"An OK status is not a valid constructor argument to StatusOr<T>";
#ifdef NDEBUG
ABSL_INTERNAL_LOG(ERROR, kMessage);
#else
ABSL_INTERNAL_LOG(FATAL, kMessage);
#endif
*status = absl::InternalError(kMessage);
}
void Helper::Crash(const absl::Status& status) {
ABSL_INTERNAL_LOG(
FATAL,
absl::StrCat("Attempting to fetch value instead of handling error ",
status.ToString()));
}
void ThrowBadStatusOrAccess(absl::Status status) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw absl::BadStatusOrAccess(std::move(status));
#else
ABSL_INTERNAL_LOG(
FATAL,
absl::StrCat("Attempting to fetch value instead of handling error ",
status.ToString()));
std::abort();
#endif
}
}
ABSL_NAMESPACE_END
} | #include "absl/status/statusor.h"
#include <array>
#include <cstddef>
#include <initializer_list>
#include <map>
#include <memory>
#include <ostream>
#include <sstream>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/casts.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/any.h"
#include "absl/types/variant.h"
#include "absl/utility/utility.h"
namespace {
using ::absl_testing::IsOk;
using ::absl_testing::IsOkAndHolds;
using ::testing::AllOf;
using ::testing::AnyOf;
using ::testing::AnyWith;
using ::testing::ElementsAre;
using ::testing::EndsWith;
using ::testing::Field;
using ::testing::HasSubstr;
using ::testing::Ne;
using ::testing::Not;
using ::testing::Pointee;
using ::testing::StartsWith;
using ::testing::VariantWith;
struct CopyDetector {
CopyDetector() = default;
explicit CopyDetector(int xx) : x(xx) {}
CopyDetector(CopyDetector&& d) noexcept
: x(d.x), copied(false), moved(true) {}
CopyDetector(const CopyDetector& d) : x(d.x), copied(true), moved(false) {}
CopyDetector& operator=(const CopyDetector& c) {
x = c.x;
copied = true;
moved = false;
return *this;
}
CopyDetector& operator=(CopyDetector&& c) noexcept {
x = c.x;
copied = false;
moved = true;
return *this;
}
int x = 0;
bool copied = false;
bool moved = false;
};
testing::Matcher<const CopyDetector&> CopyDetectorHas(int a, bool b, bool c) {
return AllOf(Field(&CopyDetector::x, a), Field(&CopyDetector::moved, b),
Field(&CopyDetector::copied, c));
}
class Base1 {
public:
virtual ~Base1() {}
int pad;
};
class Base2 {
public:
virtual ~Base2() {}
int yetotherpad;
};
class Derived : public Base1, public Base2 {
public:
virtual ~Derived() {}
int evenmorepad;
};
class CopyNoAssign {
public:
explicit CopyNoAssign(int value) : foo(value) {}
CopyNoAssign(const CopyNoAssign& other) : foo(other.foo) {}
int foo;
private:
const CopyNoAssign& operator=(const CopyNoAssign&);
};
absl::StatusOr<std::unique_ptr<int>> ReturnUniquePtr() {
return absl::make_unique<int>(0);
}
TEST(StatusOr, ElementType) {
static_assert(std::is_same<absl::StatusOr<int>::value_type, int>(), "");
static_assert(std::is_same<absl::StatusOr<char>::value_type, char>(), "");
}
TEST(StatusOr, TestMoveOnlyInitialization) {
absl::StatusOr<std::unique_ptr<int>> thing(ReturnUniquePtr());
ASSERT_TRUE(thing.ok());
EXPECT_EQ(0, **thing);
int* previous = thing->get();
thing = ReturnUniquePtr();
EXPECT_TRUE(thing.ok());
EXPECT_EQ(0, **thing);
EXPECT_NE(previous, thing->get());
}
TEST(StatusOr, TestMoveOnlyValueExtraction) {
absl::StatusOr<std::unique_ptr<int>> thing(ReturnUniquePtr());
ASSERT_TRUE(thing.ok());
std::unique_ptr<int> ptr = *std::move(thing);
EXPECT_EQ(0, *ptr);
thing = std::move(ptr);
ptr = std::move(*thing);
EXPECT_EQ(0, *ptr);
}
TEST(StatusOr, TestMoveOnlyInitializationFromTemporaryByValueOrDie) {
std::unique_ptr<int> ptr(*ReturnUniquePtr());
EXPECT_EQ(0, *ptr);
}
TEST(StatusOr, TestValueOrDieOverloadForConstTemporary) {
static_assert(
std::is_same<
const int&&,
decltype(std::declval<const absl::StatusOr<int>&&>().value())>(),
"value() for const temporaries should return const T&&");
}
TEST(StatusOr, TestMoveOnlyConversion) {
absl::StatusOr<std::unique_ptr<const int>> const_thing(ReturnUniquePtr());
EXPECT_TRUE(const_thing.ok());
EXPECT_EQ(0, **const_thing);
const int* const_previous = const_thing->get();
const_thing = ReturnUniquePtr();
EXPECT_TRUE(const_thing.ok());
EXPECT_EQ(0, **const_thing);
EXPECT_NE(const_previous, const_thing->get());
}
TEST(StatusOr, TestMoveOnlyVector) {
std::vector<absl::StatusOr<std::unique_ptr<int>>> vec;
vec.push_back(ReturnUniquePtr());
vec.resize(2);
auto another_vec = std::move(vec);
EXPECT_EQ(0, **another_vec[0]);
EXPECT_EQ(absl::UnknownError(""), another_vec[1].status());
}
TEST(StatusOr, TestDefaultCtor) {
absl::StatusOr<int> thing;
EXPECT_FALSE(thing.ok());
EXPECT_EQ(thing.status().code(), absl::StatusCode::kUnknown);
}
TEST(StatusOr, StatusCtorForwards) {
absl::Status status(absl::StatusCode::kInternal, "Some error");
EXPECT_EQ(absl::StatusOr<int>(status).status().message(), "Some error");
EXPECT_EQ(status.message(), "Some error");
EXPECT_EQ(absl::StatusOr<int>(std::move(status)).status().message(),
"Some error");
EXPECT_NE(status.message(), "Some error");
}
TEST(BadStatusOrAccessTest, CopyConstructionWhatOk) {
absl::Status error =
absl::InternalError("some arbitrary message too big for the sso buffer");
absl::BadStatusOrAccess e1{error};
absl::BadStatusOrAccess e2{e1};
EXPECT_THAT(e1.what(), HasSubstr(error.ToString()));
EXPECT_THAT(e2.what(), HasSubstr(error.ToString()));
}
TEST(BadStatusOrAccessTest, CopyAssignmentWhatOk) {
absl::Status error =
absl::InternalError("some arbitrary message too big for the sso buffer");
absl::BadStatusOrAccess e1{error};
absl::BadStatusOrAccess e2{absl::InternalError("other")};
e2 = e1;
EXPECT_THAT(e1.what(), HasSubstr(error.ToString()));
EXPECT_THAT(e2.what(), HasSubstr(error.ToString()));
}
TEST(BadStatusOrAccessTest, MoveConstructionWhatOk) {
absl::Status error =
absl::InternalError("some arbitrary message too big for the sso buffer");
absl::BadStatusOrAccess e1{error};
absl::BadStatusOrAccess e2{std::move(e1)};
EXPECT_THAT(e2.what(), HasSubstr(error.ToString()));
}
TEST(BadStatusOrAccessTest, MoveAssignmentWhatOk) {
absl::Status error =
absl::InternalError("some arbitrary message too big for the sso buffer");
absl::BadStatusOrAccess e1{error};
absl::BadStatusOrAccess e2{absl::InternalError("other")};
e2 = std::move(e1);
EXPECT_THAT(e2.what(), HasSubstr(error.ToString()));
}
#ifdef ABSL_HAVE_EXCEPTIONS
#define EXPECT_DEATH_OR_THROW(statement, status_) \
EXPECT_THROW( \
{ \
try { \
statement; \
} catch (const absl::BadStatusOrAccess& e) { \
EXPECT_EQ(e.status(), status_); \
EXPECT_THAT(e.what(), HasSubstr(e.status().ToString())); \
throw; \
} \
}, \
absl::BadStatusOrAccess);
#else
#define EXPECT_DEATH_OR_THROW(statement, status) \
EXPECT_DEATH_IF_SUPPORTED(statement, status.ToString());
#endif
TEST(StatusOrDeathTest, TestDefaultCtorValue) {
absl::StatusOr<int> thing;
EXPECT_DEATH_OR_THROW(thing.value(), absl::UnknownError(""));
const absl::StatusOr<int> thing2;
EXPECT_DEATH_OR_THROW(thing2.value(), absl::UnknownError(""));
}
TEST(StatusOrDeathTest, TestValueNotOk) {
absl::StatusOr<int> thing(absl::CancelledError());
EXPECT_DEATH_OR_THROW(thing.value(), absl::CancelledError());
}
TEST(StatusOrDeathTest, TestValueNotOkConst) {
const absl::StatusOr<int> thing(absl::UnknownError(""));
EXPECT_DEATH_OR_THROW(thing.value(), absl::UnknownError(""));
}
TEST(StatusOrDeathTest, TestPointerDefaultCtorValue) {
absl::StatusOr<int*> thing;
EXPECT_DEATH_OR_THROW(thing.value(), absl::UnknownError(""));
}
TEST(StatusOrDeathTest, TestPointerValueNotOk) {
absl::StatusOr<int*> thing(absl::CancelledError());
EXPECT_DEATH_OR_THROW(thing.value(), absl::CancelledError());
}
TEST(StatusOrDeathTest, TestPointerValueNotOkConst) {
const absl::StatusOr<int*> thing(absl::CancelledError());
EXPECT_DEATH_OR_THROW(thing.value(), absl::CancelledError());
}
#if GTEST_HAS_DEATH_TEST
TEST(StatusOrDeathTest, TestStatusCtorStatusOk) {
EXPECT_DEBUG_DEATH(
{
absl::StatusOr<int> thing(absl::OkStatus());
EXPECT_FALSE(thing.ok());
EXPECT_EQ(thing.status().code(), absl::StatusCode::kInternal);
},
"An OK status is not a valid constructor argument");
}
TEST(StatusOrDeathTest, TestPointerStatusCtorStatusOk) {
EXPECT_DEBUG_DEATH(
{
absl::StatusOr<int*> thing(absl::OkStatus());
EXPECT_FALSE(thing.ok());
EXPECT_EQ(thing.status().code(), absl::StatusCode::kInternal);
},
"An OK status is not a valid constructor argument");
}
#endif
TEST(StatusOr, ValueAccessor) {
const int kIntValue = 110;
{
absl::StatusOr<int> status_or(kIntValue);
EXPECT_EQ(kIntValue, status_or.value());
EXPECT_EQ(kIntValue, std::move(status_or).value());
}
{
absl::StatusOr<CopyDetector> status_or(kIntValue);
EXPECT_THAT(status_or,
IsOkAndHolds(CopyDetectorHas(kIntValue, false, false)));
CopyDetector copy_detector = status_or.value();
EXPECT_THAT(copy_detector, CopyDetectorHas(kIntValue, false, true));
copy_detector = std::move(status_or).value();
EXPECT_THAT(copy_detector, CopyDetectorHas(kIntValue, true, false));
}
}
TEST(StatusOr, BadValueAccess) {
const absl::Status kError = absl::CancelledError("message");
absl::StatusOr<int> status_or(kError);
EXPECT_DEATH_OR_THROW(status_or.value(), kError);
}
TEST(StatusOr, TestStatusCtor) {
absl::StatusOr<int> thing(absl::CancelledError());
EXPECT_FALSE(thing.ok());
EXPECT_EQ(thing.status().code(), absl::StatusCode::kCancelled);
}
TEST(StatusOr, TestValueCtor) {
const int kI = 4;
const absl::StatusOr<int> thing(kI);
EXPECT_TRUE(thing.ok());
EXPECT_EQ(kI, *thing);
}
struct Foo {
const int x;
explicit Foo(int y) : x(y) {}
};
TEST(StatusOr, InPlaceConstruction) {
EXPECT_THAT(absl::StatusOr<Foo>(absl::in_place, 10),
IsOkAndHolds(Field(&Foo::x, 10)));
}
struct InPlaceHelper {
InPlaceHelper(std::initializer_list<int> xs, std::unique_ptr<int> yy)
: x(xs), y(std::move(yy)) {}
const std::vector<int> x;
std::unique_ptr<int> y;
};
TEST(StatusOr, InPlaceInitListConstruction) {
absl::StatusOr<InPlaceHelper> status_or(absl::in_place, {10, 11, 12},
absl::make_unique<int>(13));
EXPECT_THAT(status_or, IsOkAndHolds(AllOf(
Field(&InPlaceHelper::x, ElementsAre(10, 11, 12)),
Field(&InPlaceHelper::y, Pointee(13)))));
}
TEST(StatusOr, Emplace) {
absl::StatusOr<Foo> status_or_foo(10);
status_or_foo.emplace(20);
EXPECT_THAT(status_or_foo, IsOkAndHolds(Field(&Foo::x, 20)));
status_or_foo = absl::InvalidArgumentError("msg");
EXPECT_FALSE(status_or_foo.ok());
EXPECT_EQ(status_or_foo.status().code(), absl::StatusCode::kInvalidArgument);
EXPECT_EQ(status_or_foo.status().message(), "msg");
status_or_foo.emplace(20);
EXPECT_THAT(status_or_foo, IsOkAndHolds(Field(&Foo::x, 20)));
}
TEST(StatusOr, EmplaceInitializerList) {
absl::StatusOr<InPlaceHelper> status_or(absl::in_place, {10, 11, 12},
absl::make_unique<int>(13));
status_or.emplace({1, 2, 3}, absl::make_unique<int>(4));
EXPECT_THAT(status_or,
IsOkAndHolds(AllOf(Field(&InPlaceHelper::x, ElementsAre(1, 2, 3)),
Field(&InPlaceHelper::y, Pointee(4)))));
status_or = absl::InvalidArgumentError("msg");
EXPECT_FALSE(status_or.ok());
EXPECT_EQ(status_or.status().code(), absl::StatusCode::kInvalidArgument);
EXPECT_EQ(status_or.status().message(), "msg");
status_or.emplace({1, 2, 3}, absl::make_unique<int>(4));
EXPECT_THAT(status_or,
IsOkAndHolds(AllOf(Field(&InPlaceHelper::x, ElementsAre(1, 2, 3)),
Field(&InPlaceHelper::y, Pointee(4)))));
}
TEST(StatusOr, TestCopyCtorStatusOk) {
const int kI = 4;
const absl::StatusOr<int> original(kI);
const absl::StatusOr<int> copy(original);
EXPECT_THAT(copy.status(), IsOk());
EXPECT_EQ(*original, *copy);
}
TEST(StatusOr, TestCopyCtorStatusNotOk) {
absl::StatusOr<int> original(absl::CancelledError());
absl::StatusOr<int> copy(original);
EXPECT_EQ(copy.status().code(), absl::StatusCode::kCancelled);
}
TEST(StatusOr, TestCopyCtorNonAssignable) {
const int kI = 4;
CopyNoAssign value(kI);
absl::StatusOr<CopyNoAssign> original(value);
absl::StatusOr<CopyNoAssign> copy(original);
EXPECT_THAT(copy.status(), IsOk());
EXPECT_EQ(original->foo, copy->foo);
}
TEST(StatusOr, TestCopyCtorStatusOKConverting) {
const int kI = 4;
absl::StatusOr<int> original(kI);
absl::StatusOr<double> copy(original);
EXPECT_THAT(copy.status(), IsOk());
EXPECT_DOUBLE_EQ(*original, *copy);
}
TEST(StatusOr, TestCopyCtorStatusNotOkConverting) {
absl::StatusOr<int> original(absl::CancelledError());
absl::StatusOr<double> copy(original);
EXPECT_EQ(copy.status(), original.status());
}
TEST(StatusOr, TestAssignmentStatusOk) {
{
const auto p = std::make_shared<int>(17);
absl::StatusOr<std::shared_ptr<int>> source(p);
absl::StatusOr<std::shared_ptr<int>> target;
target = source;
ASSERT_TRUE(target.ok());
EXPECT_THAT(target.status(), IsOk());
EXPECT_EQ(p, *target);
ASSERT_TRUE(source.ok());
EXPECT_THAT(source.status(), IsOk());
EXPECT_EQ(p, *source);
}
{
const auto p = std::make_shared<int>(17);
absl::StatusOr<std::shared_ptr<int>> source(p);
absl::StatusOr<std::shared_ptr<int>> target;
target = std::move(source);
ASSERT_TRUE(target.ok());
EXPECT_THAT(target.status(), IsOk());
EXPECT_EQ(p, *target);
ASSERT_TRUE(source.ok());
EXPECT_THAT(source.status(), IsOk());
EXPECT_EQ(nullptr, *source);
}
}
TEST(StatusOr, TestAssignmentStatusNotOk) {
{
const absl::Status expected = absl::CancelledError();
absl::StatusOr<int> source(expected);
absl::StatusOr<int> target;
target = source;
EXPECT_FALSE(target.ok());
EXPECT_EQ(expected, target.status());
EXPECT_FALSE(source.ok());
EXPECT_EQ(expected, source.status());
}
{
const absl::Status expected = absl::CancelledError();
absl::StatusOr<int> source(expected);
absl::StatusOr<int> target;
target = std::move(source);
EXPECT_FALSE(target.ok());
EXPECT_EQ(expected, target.status());
EXPECT_FALSE(source.ok());
EXPECT_EQ(source.status().code(), absl::StatusCode::kInternal);
}
}
TEST(StatusOr, TestAssignmentStatusOKConverting) {
{
const int kI = 4;
absl::StatusOr<int> source(kI);
absl::StatusOr<double> target;
target = source;
ASSERT_TRUE(target.ok());
EXPECT_THAT(target.status(), IsOk());
EXPECT_DOUBLE_EQ(kI, *target);
ASSERT_TRUE(source.ok());
EXPECT_THAT(source.status(), IsOk());
EXPECT_DOUBLE_EQ(kI, *source);
}
{
const auto p = new int(17);
absl::StatusOr<std::unique_ptr<int>> source(absl::WrapUnique(p));
absl::StatusOr<std::shared_ptr<int>> target;
target = std::move(source);
ASSERT_TRUE(target.ok());
EXPECT_THAT(target.status(), IsOk());
EXPECT_EQ(p, target->get());
ASSERT_TRUE(source.ok());
EXPECT_THAT(source.status(), IsOk());
EXPECT_EQ(nullptr, source->get());
}
}
struct A {
int x;
};
struct ImplicitConstructibleFromA {
int x;
bool moved;
ImplicitConstructibleFromA(const A& a)
: x(a.x), moved(false) {}
ImplicitConstructibleFromA(A&& a)
: x(a.x), moved(true) {}
};
TEST(StatusOr, ImplicitConvertingConstructor) {
EXPECT_THAT(
absl::implicit_cast<absl::StatusOr<ImplicitConstructibleFromA>>(
absl::StatusOr<A>(A{11})),
IsOkAndHolds(AllOf(Field(&ImplicitConstructibleFromA::x, 11),
Field(&ImplicitConstructibleFromA::moved, true))));
absl::StatusOr<A> a(A{12});
EXPECT_THAT(
absl::implicit_cast<absl::StatusOr<ImplicitConstructibleFromA>>(a),
IsOkAndHolds(AllOf(Field(&ImplicitConstructibleFromA::x, 12),
Field(&ImplicitConstructibleFromA::moved, false))));
}
struct ExplicitConstructibleFromA {
int x;
bool moved;
explicit ExplicitConstructibleFromA(const A& a) : x(a.x), moved(false) {}
explicit ExplicitConstructibleFromA(A&& a) : x(a.x), moved(true) {}
};
TEST(StatusOr, ExplicitConvertingConstructor) {
EXPECT_FALSE(
(std::is_convertible<const absl::StatusOr<A>&,
absl::StatusOr<ExplicitConstructibleFromA>>::value));
EXPECT_FALSE(
(std::is_convertible<absl::StatusOr<A>&&,
absl::StatusOr<ExplicitConstructibleFromA>>::value));
EXPECT_THAT(
absl::StatusOr<ExplicitConstructibleFromA>(absl::StatusOr<A>(A{11})),
IsOkAndHolds(AllOf(Field(&ExplicitConstructibleFromA::x, 11),
Field(&ExplicitConstructibleFromA::moved, true))));
absl::StatusOr<A> a(A{12});
EXPECT_THAT(
absl::StatusOr<ExplicitConstructibleFromA>(a),
IsOkAndHolds(AllOf(Field(&ExplicitConstructibleFromA::x, 12),
Field(&ExplicitConstructibleFromA::moved, false))));
}
struct ImplicitConstructibleFromBool {
ImplicitConstructibleFromBool(bool y) : x(y) {}
bool x = false;
};
struct ConvertibleToBool {
explicit ConvertibleToBool(bool y) : x(y) {}
operator bool() const { return x; }
bool x = false;
};
TEST(StatusOr, ImplicitBooleanConstructionWithImplicitCasts) {
EXPECT_THAT(absl::StatusOr<bool>(absl::StatusOr<ConvertibleToBool>(true)),
IsOkAndHolds(true));
EXPECT_THAT(absl::StatusOr<bool>(absl::StatusOr<ConvertibleToBool>(false)),
IsOkAndHolds(false));
EXPECT_THAT(
absl::implicit_cast<absl::StatusOr<ImplicitConstructibleFromBool>>(
absl::StatusOr<bool>(false)),
IsOkAndHolds(Field(&ImplicitConstructibleFromBool::x, false)));
EXPECT_FALSE((std::is_convertible<
absl::StatusOr<ConvertibleToBool>,
absl::StatusOr<ImplicitConstructibleFromBool>>::value));
}
TEST(StatusOr, BooleanConstructionWithImplicitCasts) {
EXPECT_THAT(absl::StatusOr<bool>(absl::StatusOr<ConvertibleToBool>(true)),
IsOkAndHolds(true));
EXPECT_THAT(absl::StatusOr<bool>(absl::StatusOr<ConvertibleToBool>(false)),
IsOkAndHolds(false));
EXPECT_THAT(
absl::StatusOr<ImplicitConstructibleFromBool>{
absl::StatusOr<bool>(false)},
IsOkAndHolds(Field(&ImplicitConstructibleFromBool::x, false)));
EXPECT_THAT(
absl::StatusOr<ImplicitConstructibleFromBool>{
absl::StatusOr<bool>(absl::InvalidArgumentError(""))},
Not(IsOk()));
EXPECT_THAT(
absl::StatusOr<ImplicitConstructibleFromBool>{
absl::StatusOr<ConvertibleToBool>(ConvertibleToBool{false})},
IsOkAndHolds(Field(&ImplicitConstructibleFromBool::x, false)));
EXPECT_THAT(
absl::StatusOr<ImplicitConstructibleFromBool>{
absl::StatusOr<ConvertibleToBool>(absl::InvalidArgumentError(""))},
Not(IsOk()));
}
TEST(StatusOr, ConstImplicitCast) {
EXPECT_THAT(absl::implicit_cast<absl::StatusOr<bool>>(
absl::StatusOr<const bool>(true)),
IsOkAndHolds(true));
EXPECT_THAT(absl::implicit_cast<absl::StatusOr<bool>>(
absl::StatusOr<const bool>(false)),
IsOkAndHolds(false));
EXPECT_THAT(absl::implicit_cast<absl::StatusOr<const bool>>(
absl::StatusOr<bool>(true)),
IsOkAndHolds(true));
EXPECT_THAT(absl::implicit_cast<absl::StatusOr<const bool>>(
absl::StatusOr<bool>(false)),
IsOkAndHolds(false));
EXPECT_THAT(absl::implicit_cast<absl::StatusOr<const std::string>>(
absl::StatusOr<std::string>("foo")),
IsOkAndHolds("foo"));
EXPECT_THAT(absl::implicit_cast<absl::StatusOr<std::string>>(
absl::StatusOr<const std::string>("foo")),
IsOkAndHolds("foo"));
EXPECT_THAT(
absl::implicit_cast<absl::StatusOr<std::shared_ptr<const std::string>>>(
absl::StatusOr<std::shared_ptr<std::string>>(
std::make_shared<std::string>("foo"))),
IsOkAndHolds(Pointee(std::string("foo"))));
}
TEST(StatusOr, ConstExplicitConstruction) {
EXPECT_THAT(absl::StatusOr<bool>(absl::StatusOr<const bool>(true)),
IsOkAndHolds(true));
EXPECT_THAT(absl::StatusOr<bool>(absl::StatusOr<const bool>(false)),
IsOkAndHolds(false));
EXPECT_THAT(absl::StatusOr<const bool>(absl::StatusOr<bool>(true)),
IsOkAndHolds(true));
EXPECT_THAT(absl::StatusOr<const bool>(absl::StatusOr<bool>(false)),
IsOkAndHolds(false));
}
struct ExplicitConstructibleFromInt {
int x;
explicit ExplicitConstructibleFromInt(int y) : x(y) {}
};
TEST(StatusOr, ExplicitConstruction) {
EXPECT_THAT(absl::StatusOr<ExplicitConstructibleFromInt>(10),
IsOkAndHolds(Field(&ExplicitConstructibleFromInt::x, 10)));
}
TEST(StatusOr, ImplicitConstruction) {
auto status_or =
absl::implicit_cast<absl::StatusOr<absl::variant<int, std::string>>>(10);
EXPECT_THAT(status_or, IsOkAndHolds(VariantWith<int>(10)));
}
TEST(StatusOr, ImplicitConstructionFromInitliazerList) {
auto status_or =
absl::implicit_cast<absl::StatusOr<std::vector<int>>>({{10, 20, 30}});
EXPECT_THAT(status_or, IsOkAndHolds(ElementsAre(10, 20, 30)));
}
TEST(StatusOr, UniquePtrImplicitConstruction) {
auto status_or = absl::implicit_cast<absl::StatusOr<std::unique_ptr<Base1>>>(
absl::make_unique<Derived>());
EXPECT_THAT(status_or, IsOkAndHolds(Ne(nullptr)));
}
TEST(StatusOr, NestedStatusOrCopyAndMoveConstructorTests) {
absl::StatusOr<absl::StatusOr<CopyDetector>> status_or = CopyDetector(10);
absl::StatusOr<absl::StatusOr<CopyDetector>> status_error =
absl::InvalidArgumentError("foo");
EXPECT_THAT(status_or,
IsOkAndHolds(IsOkAndHolds(CopyDetectorHas(10, true, false))));
absl::StatusOr<absl::StatusOr<CopyDetector>> a = status_or;
EXPECT_THAT(a, IsOkAndHolds(IsOkAndHolds(CopyDetectorHas(10, false, true))));
absl::StatusOr<absl::StatusOr<CopyDetector>> a_err = status_error;
EXPECT_THAT(a_err, Not(IsOk()));
const absl::StatusOr<absl::StatusOr<CopyDetector>>& cref = status_or;
absl::StatusOr<absl::StatusOr<CopyDetector>> b = cref;
EXPECT_THAT(b, IsOkAndHolds(IsOkAndHolds(CopyDetectorHas(10, false, true))));
const absl::StatusOr<absl::StatusOr<CopyDetector>>& cref_err = status_error;
absl::StatusOr<absl::StatusOr<CopyDetector>> b_err = cref_err;
EXPECT_THAT(b_err, Not(IsOk()));
absl::StatusOr<absl::StatusOr<CopyDetector>> c = std::move(status_or);
EXPECT_THAT(c, IsOkAndHolds(IsOkAndHolds(CopyDetectorHas(10, true, false))));
absl::StatusOr<absl::StatusOr<CopyDetector>> c_err = std::move(status_error);
EXPECT_THAT(c_err, Not(IsOk()));
}
TEST(StatusOr, NestedStatusOrCopyAndMoveAssignment) {
absl::StatusOr<absl::StatusOr<CopyDetector>> status_or = CopyDetector(10);
absl::StatusOr<absl::StatusOr<CopyDetector>> status_error =
absl::InvalidArgumentError("foo");
absl::StatusOr<absl::StatusOr<CopyDetector>> a;
a = status_or;
EXPECT_THAT(a, IsOkAndHolds(IsOkAndHolds(CopyDetectorHas(10, false, true))));
a = status_error;
EXPECT_THAT(a, Not(IsOk()));
const absl::StatusOr<absl::StatusOr<CopyDetector>>& cref = status_or;
a = cref;
EXPECT_THAT(a, IsOkAndHolds(IsOkAndHolds(CopyDetectorHas(10, false, true))));
const absl::StatusOr<absl::StatusOr<CopyDetector>>& cref_err = status_error;
a = cref_err;
EXPECT_THAT(a, Not(IsOk()));
a = std::move(status_or);
EXPECT_THAT(a, IsOkAndHolds(IsOkAndHolds(CopyDetectorHas(10, true, false))));
a = std::move(status_error);
EXPECT_THAT(a, Not(IsOk()));
}
struct Copyable {
Copyable() {}
Copyable(const Copyable&) {}
Copyable& operator=(const Copyable&) { return *this; }
};
struct MoveOnly {
MoveOnly() {}
MoveOnly(MoveOnly&&) {}
MoveOnly& operator=(MoveOnly&&) { return *this; }
};
struct NonMovable {
NonMovable() {}
NonMovable(const NonMovable&) = delete;
NonMovable(NonMovable&&) = delete;
NonMovable& operator=(const NonMovable&) = delete;
NonMovable& operator=(NonMovable&&) = delete;
};
TEST(StatusOr, CopyAndMoveAbility) {
EXPECT_TRUE(std::is_copy_constructible<Copyable>::value);
EXPECT_TRUE(std::is_copy_assignable<Copyable>::value);
EXPECT_TRUE(std::is_move_constructible<Copyable>::value);
EXPECT_TRUE(std::is_move_assignable<Copyable>::value);
EXPECT_FALSE(std::is_copy_constructible<MoveOnly>::value);
EXPECT_FALSE(std::is_copy_assignable<MoveOnly>::value);
EXPECT_TRUE(std::is_move_constructible<MoveOnly>::value);
EXPECT_TRUE(std::is_move_assignable<MoveOnly>::value);
EXPECT_FALSE(std::is_copy_constructible<NonMovable>::value);
EXPECT_FALSE(std::is_copy_assignable<NonMovable>::value);
EXPECT_FALSE(std::is_move_constructible<NonMovable>::value);
EXPECT_FALSE(std::is_move_assignable<NonMovable>::value);
}
TEST(StatusOr, StatusOrAnyCopyAndMoveConstructorTests) {
absl::StatusOr<absl::any> status_or = CopyDetector(10);
absl::StatusOr<absl::any> status_error = absl::InvalidArgumentError("foo");
EXPECT_THAT(
status_or,
IsOkAndHolds(AnyWith<CopyDetector>(CopyDetectorHas(10, true, false))));
absl::StatusOr<absl::any> a = status_or;
EXPECT_THAT(
a, IsOkAndHolds(AnyWith<CopyDetector>(CopyDetectorHas(10, false, true))));
absl::StatusOr<absl::any> a_err = status_error;
EXPECT_THAT(a_err, Not(IsOk()));
const absl::StatusOr<absl::any>& cref = status_or;
absl::StatusOr<absl::any> b = cref;
EXPECT_THAT(
b, IsOkAndHolds(AnyWith<CopyDetector>(CopyDetectorHas(10, false, true))));
const absl::StatusOr<absl::any>& cref_err = status_error;
absl::StatusOr<absl::any> b_err = cref_err;
EXPECT_THAT(b_err, Not(IsOk()));
absl::StatusOr<absl::any> c = std::move(status_or);
EXPECT_THAT(
c, IsOkAndHolds(AnyWith<CopyDetector>(CopyDetectorHas(10, true, false))));
absl::StatusOr<absl::any> c_err = std::move(status_error);
EXPECT_THAT(c_err, Not(IsOk()));
}
TEST(StatusOr, StatusOrAnyCopyAndMoveAssignment) {
absl::StatusOr<absl::any> status_or = CopyDetector(10);
absl::StatusOr<absl::any> status_error = absl::InvalidArgumentError("foo");
absl::StatusOr<absl::any> a;
a = status_or;
EXPECT_THAT(
a, IsOkAndHolds(AnyWith<CopyDetector>(CopyDetectorHas(10, false, true))));
a = status_error;
EXPECT_THAT(a, Not(IsOk()));
const absl::StatusOr<absl::any>& cref = status_or;
a = cref;
EXPECT_THAT(
a, IsOkAndHolds(AnyWith<CopyDetector>(CopyDetectorHas(10, false, true))));
const absl::StatusOr<absl::any>& cref_err = status_error;
a = cref_err;
EXPECT_THAT(a, Not(IsOk()));
a = std::move(status_or);
EXPECT_THAT(
a, IsOkAndHolds(AnyWith<CopyDetector>(CopyDetectorHas(10, true, false))));
a = std::move(status_error);
EXPECT_THAT(a, Not(IsOk()));
}
TEST(StatusOr, StatusOrCopyAndMoveTestsConstructor) {
absl::StatusOr<CopyDetector> status_or(10);
ASSERT_THAT(status_or, IsOkAndHolds(CopyDetectorHas(10, false, false)));
absl::StatusOr<CopyDetector> a(status_or);
EXPECT_THAT(a, IsOkAndHolds(CopyDetectorHas(10, false, true)));
const absl::StatusOr<CopyDetector>& cref = status_or;
absl::StatusOr<CopyDetector> b(cref);
EXPECT_THAT(b, IsOkAndHolds(CopyDetectorHas(10, false, true)));
absl::StatusOr<CopyDetector> c(std::move(status_or));
EXPECT_THAT(c, IsOkAndHolds(CopyDetectorHas(10, true, false)));
}
TEST(StatusOr, StatusOrCopyAndMoveTestsAssignment) {
absl::StatusOr<CopyDetector> status_or(10);
ASSERT_THAT(status_or, IsOkAndHolds(CopyDetectorHas(10, false, false)));
absl::StatusOr<CopyDetector> a;
a = status_or;
EXPECT_THAT(a, IsOkAndHolds(CopyDetectorHas(10, false, true)));
const absl::StatusOr<CopyDetector>& cref = status_or;
absl::StatusOr<CopyDetector> b;
b = cref;
EXPECT_THAT(b, IsOkAndHolds(CopyDetectorHas(10, false, true)));
absl::StatusOr<CopyDetector> c;
c = std::move(status_or);
EXPECT_THAT(c, IsOkAndHolds(CopyDetectorHas(10, true, false)));
}
TEST(StatusOr, AbslAnyAssignment) {
EXPECT_FALSE((std::is_assignable<absl::StatusOr<absl::any>,
absl::StatusOr<int>>::value));
absl::StatusOr<absl::any> status_or;
status_or = absl::InvalidArgumentError("foo");
EXPECT_THAT(status_or, Not(IsOk()));
}
TEST(StatusOr, ImplicitAssignment) {
absl::StatusOr<absl::variant<int, std::string>> status_or;
status_or = 10;
EXPECT_THAT(status_or, IsOkAndHolds(VariantWith<int>(10)));
}
TEST(StatusOr, SelfDirectInitAssignment) {
absl::StatusOr<std::vector<int>> status_or = {{10, 20, 30}};
status_or = *status_or;
EXPECT_THAT(status_or, IsOkAndHolds(ElementsAre(10, 20, 30)));
}
TEST(StatusOr, ImplicitCastFromInitializerList) {
absl::StatusOr<std::vector<int>> status_or = {{10, 20, 30}};
EXPECT_THAT(status_or, IsOkAndHolds(ElementsAre(10, 20, 30)));
}
TEST(StatusOr, UniquePtrImplicitAssignment) {
absl::StatusOr<std::unique_ptr<Base1>> status_or;
status_or = absl::make_unique<Derived>();
EXPECT_THAT(status_or, IsOkAndHolds(Ne(nullptr)));
}
TEST(StatusOr, Pointer) {
struct A {};
struct B : public A {};
struct C : private A {};
EXPECT_TRUE((std::is_constructible<absl::StatusOr<A*>, B*>::value));
EXPECT_TRUE((std::is_convertible<B*, absl::StatusOr<A*>>::value));
EXPECT_FALSE((std::is_constructible<absl::StatusOr<A*>, C*>::value));
EXPECT_FALSE((std::is_convertible<C*, absl::StatusOr<A*>>::value));
}
TEST(StatusOr, TestAssignmentStatusNotOkConverting) {
{
const absl::Status expected = absl::CancelledError();
absl::StatusOr<int> source(expected);
absl::StatusOr<double> target;
target = source;
EXPECT_FALSE(target.ok());
EXPECT_EQ(expected, target.status());
EXPECT_FALSE(source.ok());
EXPECT_EQ(expected, source.status());
}
{
const absl::Status expected = absl::CancelledError();
absl::StatusOr<int> source(expected);
absl::StatusOr<double> target;
target = std::move(source);
EXPECT_FALSE(target.ok());
EXPECT_EQ(expected, target.status());
EXPECT_FALSE(source.ok());
EXPECT_EQ(source.status().code(), absl::StatusCode::kInternal);
}
}
TEST(StatusOr, SelfAssignment) {
{
const std::string long_str(128, 'a');
absl::StatusOr<std::string> so = long_str;
so = *&so;
ASSERT_TRUE(so.ok());
EXPECT_THAT(so.status(), IsOk());
EXPECT_EQ(long_str, *so);
}
{
absl::StatusOr<int> so = absl::NotFoundError("taco");
so = *&so;
EXPECT_FALSE(so.ok());
EXPECT_EQ(so.status().code(), absl::StatusCode::kNotFound);
EXPECT_EQ(so.status().message(), "taco");
}
{
absl::StatusOr<int> so = 17;
auto& same = so;
so = std::move(same);
ASSERT_TRUE(so.ok());
EXPECT_THAT(so.status(), IsOk());
EXPECT_EQ(17, *so);
}
{
absl::StatusOr<int> so = absl::NotFoundError("taco");
auto& same = so;
so = std::move(same);
EXPECT_FALSE(so.ok());
EXPECT_EQ(so.status().code(), absl::StatusCode::kNotFound);
EXPECT_EQ(so.status().message(), "taco");
}
{
const auto raw = new int(17);
absl::StatusOr<std::unique_ptr<int>> so = absl::WrapUnique(raw);
auto& same = so;
so = std::move(same);
ASSERT_TRUE(so.ok());
EXPECT_THAT(so.status(), IsOk());
EXPECT_EQ(raw, so->get());
}
{
absl::StatusOr<std::unique_ptr<int>> so = absl::NotFoundError("taco");
auto& same = so;
so = std::move(same);
EXPECT_FALSE(so.ok());
EXPECT_EQ(so.status().code(), absl::StatusCode::kNotFound);
EXPECT_EQ(so.status().message(), "taco");
}
}
struct FromConstructibleAssignableLvalue {};
struct FromConstructibleAssignableRvalue {};
struct FromImplicitConstructibleOnly {};
struct FromAssignableOnly {};
struct MockValue {
MockValue(const FromConstructibleAssignableLvalue&)
: from_rvalue(false), assigned(false) {}
MockValue(FromConstructibleAssignableRvalue&&)
: from_rvalue(true), assigned(false) {}
MockValue(const FromImplicitConstructibleOnly&)
: from_rvalue(false), assigned(false) {}
MockValue& operator=(const FromConstructibleAssignableLvalue&) {
from_rvalue = false;
assigned = true;
return *this;
}
MockValue& operator=(FromConstructibleAssignableRvalue&&) {
from_rvalue = true;
assigned = true;
return *this;
}
MockValue& operator=(const FromAssignableOnly&) {
from_rvalue = false;
assigned = true;
return *this;
}
bool from_rvalue;
bool assigned;
};
TEST(StatusOr, PerfectForwardingAssignment) {
constexpr int kValue1 = 10, kValue2 = 20;
absl::StatusOr<CopyDetector> status_or;
CopyDetector lvalue(kValue1);
status_or = lvalue;
EXPECT_THAT(status_or, IsOkAndHolds(CopyDetectorHas(kValue1, false, true)));
status_or = CopyDetector(kValue2);
EXPECT_THAT(status_or, IsOkAndHolds(CopyDetectorHas(kValue2, true, false)));
EXPECT_TRUE(
(std::is_assignable<absl::StatusOr<MockValue>&,
const FromConstructibleAssignableLvalue&>::value));
EXPECT_TRUE((std::is_assignable<absl::StatusOr<MockValue>&,
FromConstructibleAssignableLvalue&&>::value));
EXPECT_FALSE(
(std::is_assignable<absl::StatusOr<MockValue>&,
const FromConstructibleAssignableRvalue&>::value));
EXPECT_TRUE((std::is_assignable<absl::StatusOr<MockValue>&,
FromConstructibleAssignableRvalue&&>::value));
EXPECT_TRUE(
(std::is_assignable<absl::StatusOr<MockValue>&,
const FromImplicitConstructibleOnly&>::value));
EXPECT_FALSE((std::is_assignable<absl::StatusOr<MockValue>&,
const FromAssignableOnly&>::value));
absl::StatusOr<MockValue> from_lvalue(FromConstructibleAssignableLvalue{});
EXPECT_FALSE(from_lvalue->from_rvalue);
EXPECT_FALSE(from_lvalue->assigned);
from_lvalue = FromConstructibleAssignableLvalue{};
EXPECT_FALSE(from_lvalue->from_rvalue);
EXPECT_TRUE(from_lvalue->assigned);
absl::StatusOr<MockValue> from_rvalue(FromConstructibleAssignableRvalue{});
EXPECT_TRUE(from_rvalue->from_rvalue);
EXPECT_FALSE(from_rvalue->assigned);
from_rvalue = FromConstructibleAssignableRvalue{};
EXPECT_TRUE(from_rvalue->from_rvalue);
EXPECT_TRUE(from_rvalue->assigned);
absl::StatusOr<MockValue> from_implicit_constructible(
FromImplicitConstructibleOnly{});
EXPECT_FALSE(from_implicit_constructible->from_rvalue);
EXPECT_FALSE(from_implicit_constructible->assigned);
from_implicit_constructible = FromImplicitConstructibleOnly{};
EXPECT_FALSE(from_implicit_constructible->from_rvalue);
EXPECT_FALSE(from_implicit_constructible->assigned);
}
TEST(StatusOr, TestStatus) {
absl::StatusOr<int> good(4);
EXPECT_TRUE(good.ok());
absl::StatusOr<int> bad(absl::CancelledError());
EXPECT_FALSE(bad.ok());
EXPECT_EQ(bad.status().code(), absl::StatusCode::kCancelled);
}
TEST(StatusOr, OperatorStarRefQualifiers) {
static_assert(
std::is_same<const int&,
decltype(*std::declval<const absl::StatusOr<int>&>())>(),
"Unexpected ref-qualifiers");
static_assert(
std::is_same<int&, decltype(*std::declval<absl::StatusOr<int>&>())>(),
"Unexpected ref-qualifiers");
static_assert(
std::is_same<const int&&,
decltype(*std::declval<const absl::StatusOr<int>&&>())>(),
"Unexpected ref-qualifiers");
static_assert(
std::is_same<int&&, decltype(*std::declval<absl::StatusOr<int>&&>())>(),
"Unexpected ref-qualifiers");
}
TEST(StatusOr, OperatorStar) {
const absl::StatusOr<std::string> const_lvalue("hello");
EXPECT_EQ("hello", *const_lvalue);
absl::StatusOr<std::string> lvalue("hello");
EXPECT_EQ("hello", *lvalue);
const absl::StatusOr<std::string> const_rvalue("hello");
EXPECT_EQ("hello", *std::move(const_rvalue));
absl::StatusOr<std::string> rvalue("hello");
EXPECT_EQ("hello", *std::move(rvalue));
}
TEST(StatusOr, OperatorArrowQualifiers) {
static_assert(
std::is_same<
const int*,
decltype(std::declval<const absl::StatusOr<int>&>().operator->())>(),
"Unexpected qualifiers");
static_assert(
std::is_same<
int*, decltype(std::declval<absl::StatusOr<int>&>().operator->())>(),
"Unexpected qualifiers");
static_assert(
std::is_same<
const int*,
decltype(std::declval<const absl::StatusOr<int>&&>().operator->())>(),
"Unexpected qualifiers");
static_assert(
std::is_same<
int*, decltype(std::declval<absl::StatusOr<int>&&>().operator->())>(),
"Unexpected qualifiers");
}
TEST(StatusOr, OperatorArrow) {
const absl::StatusOr<std::string> const_lvalue("hello");
EXPECT_EQ(std::string("hello"), const_lvalue->c_str());
absl::StatusOr<std::string> lvalue("hello");
EXPECT_EQ(std::string("hello"), lvalue->c_str());
}
TEST(StatusOr, RValueStatus) {
absl::StatusOr<int> so(absl::NotFoundError("taco"));
const absl::Status s = std::move(so).status();
EXPECT_EQ(s.code(), absl::StatusCode::kNotFound);
EXPECT_EQ(s.message(), "taco");
EXPECT_FALSE(so.ok());
EXPECT_FALSE(so.status().ok());
EXPECT_EQ(so.status().code(), absl::StatusCode::kInternal);
EXPECT_EQ(so.status().message(), "Status accessed after move.");
}
TEST(StatusOr, TestValue) {
const int kI = 4;
absl::StatusOr<int> thing(kI);
EXPECT_EQ(kI, *thing);
}
TEST(StatusOr, TestValueConst) {
const int kI = 4;
const absl::StatusOr<int> thing(kI);
EXPECT_EQ(kI, *thing);
}
TEST(StatusOr, TestPointerDefaultCtor) {
absl::StatusOr<int*> thing;
EXPECT_FALSE(thing.ok());
EXPECT_EQ(thing.status().code(), absl::StatusCode::kUnknown);
}
TEST(StatusOr, TestPointerStatusCtor) {
absl::StatusOr<int*> thing(absl::CancelledError());
EXPECT_FALSE(thing.ok());
EXPECT_EQ(thing.status().code(), absl::StatusCode::kCancelled);
}
TEST(StatusOr, TestPointerValueCtor) {
const int kI = 4;
{
absl::StatusOr<const int*> so(&kI);
EXPECT_TRUE(so.ok());
EXPECT_THAT(so.status(), IsOk());
EXPECT_EQ(&kI, *so);
}
{
absl::StatusOr<const int*> so(nullptr);
EXPECT_TRUE(so.ok());
EXPECT_THAT(so.status(), IsOk());
EXPECT_EQ(nullptr, *so);
}
{
const int* const p = nullptr;
absl::StatusOr<const int*> so(p);
EXPECT_TRUE(so.ok());
EXPECT_THAT(so.status(), IsOk());
EXPECT_EQ(nullptr, *so);
}
}
TEST(StatusOr, TestPointerCopyCtorStatusOk) {
const int kI = 0;
absl::StatusOr<const int*> original(&kI);
absl::StatusOr<const int*> copy(original);
EXPECT_THAT(copy.status(), IsOk());
EXPECT_EQ(*original, *copy);
}
TEST(StatusOr, TestPointerCopyCtorStatusNotOk) {
absl::StatusOr<int*> original(absl::CancelledError());
absl::StatusOr<int*> copy(original);
EXPECT_EQ(copy.status().code(), absl::StatusCode::kCancelled);
}
TEST(StatusOr, TestPointerCopyCtorStatusOKConverting) {
Derived derived;
absl::StatusOr<Derived*> original(&derived);
absl::StatusOr<Base2*> copy(original);
EXPECT_THAT(copy.status(), IsOk());
EXPECT_EQ(static_cast<const Base2*>(*original), *copy);
}
TEST(StatusOr, TestPointerCopyCtorStatusNotOkConverting) {
absl::StatusOr<Derived*> original(absl::CancelledError());
absl::StatusOr<Base2*> copy(original);
EXPECT_EQ(copy.status().code(), absl::StatusCode::kCancelled);
}
TEST(StatusOr, TestPointerAssignmentStatusOk) {
const int kI = 0;
absl::StatusOr<const int*> source(&kI);
absl::StatusOr<const int*> target;
target = source;
EXPECT_THAT(target.status(), IsOk());
EXPECT_EQ(*source, *target);
}
TEST(StatusOr, TestPointerAssignmentStatusNotOk) {
absl::StatusOr<int*> source(absl::CancelledError());
absl::StatusOr<int*> target;
target = source;
EXPECT_EQ(target.status().code(), absl::StatusCode::kCancelled);
}
TEST(StatusOr, TestPointerAssignmentStatusOKConverting) {
Derived derived;
absl::StatusOr<Derived*> source(&derived);
absl::StatusOr<Base2*> target;
target = source;
EXPECT_THAT(target.status(), IsOk());
EXPECT_EQ(static_cast<const Base2*>(*source), *target);
}
TEST(StatusOr, TestPointerAssignmentStatusNotOkConverting) {
absl::StatusOr<Derived*> source(absl::CancelledError());
absl::StatusOr<Base2*> target;
target = source;
EXPECT_EQ(target.status(), source.status());
}
TEST(StatusOr, TestPointerStatus) {
const int kI = 0;
absl::StatusOr<const int*> good(&kI);
EXPECT_TRUE(good.ok());
absl::StatusOr<const int*> bad(absl::CancelledError());
EXPECT_EQ(bad.status().code(), absl::StatusCode::kCancelled);
}
TEST(StatusOr, TestPointerValue) {
const int kI = 0;
absl::StatusOr<const int*> thing(&kI);
EXPECT_EQ(&kI, *thing);
}
TEST(StatusOr, TestPointerValueConst) {
const int kI = 0;
const absl::StatusOr<const int*> thing(&kI);
EXPECT_EQ(&kI, *thing);
}
TEST(StatusOr, StatusOrVectorOfUniquePointerCanReserveAndResize) {
using EvilType = std::vector<std::unique_ptr<int>>;
static_assert(std::is_copy_constructible<EvilType>::value, "");
std::vector<::absl::StatusOr<EvilType>> v(5);
v.reserve(v.capacity() + 10);
v.resize(v.capacity() + 10);
}
TEST(StatusOr, ConstPayload) {
absl::StatusOr<const int> a;
absl::StatusOr<const int> b(a);
EXPECT_FALSE(std::is_copy_assignable<absl::StatusOr<const int>>::value);
absl::StatusOr<const int> c(std::move(a));
EXPECT_FALSE(std::is_move_assignable<absl::StatusOr<const int>>::value);
}
TEST(StatusOr, MapToStatusOrUniquePtr) {
using MapType = std::map<std::string, absl::StatusOr<std::unique_ptr<int>>>;
MapType a;
MapType b(std::move(a));
a = std::move(b);
}
TEST(StatusOr, ValueOrOk) {
const absl::StatusOr<int> status_or = 0;
EXPECT_EQ(status_or.value_or(-1), 0);
}
TEST(StatusOr, ValueOrDefault) {
const absl::StatusOr<int> status_or = absl::CancelledError();
EXPECT_EQ(status_or.value_or(-1), -1);
}
TEST(StatusOr, MoveOnlyValueOrOk) {
EXPECT_THAT(absl::StatusOr<std::unique_ptr<int>>(absl::make_unique<int>(0))
.value_or(absl::make_unique<int>(-1)),
Pointee(0));
}
TEST(StatusOr, MoveOnlyValueOrDefault) {
EXPECT_THAT(absl::StatusOr<std::unique_ptr<int>>(absl::CancelledError())
.value_or(absl::make_unique<int>(-1)),
Pointee(-1));
}
static absl::StatusOr<int> MakeStatus() { return 100; }
TEST(StatusOr, TestIgnoreError) { MakeStatus().IgnoreError(); }
TEST(StatusOr, EqualityOperator) {
constexpr size_t kNumCases = 4;
std::array<absl::StatusOr<int>, kNumCases> group1 = {
absl::StatusOr<int>(1), absl::StatusOr<int>(2),
absl::StatusOr<int>(absl::InvalidArgumentError("msg")),
absl::StatusOr<int>(absl::InternalError("msg"))};
std::array<absl::StatusOr<int>, kNumCases> group2 = {
absl::StatusOr<int>(1), absl::StatusOr<int>(2),
absl::StatusOr<int>(absl::InvalidArgumentError("msg")),
absl::StatusOr<int>(absl::InternalError("msg"))};
for (size_t i = 0; i < kNumCases; ++i) {
for (size_t j = 0; j < kNumCases; ++j) {
if (i == j) {
EXPECT_TRUE(group1[i] == group2[j]);
EXPECT_FALSE(group1[i] != group2[j]);
} else {
EXPECT_FALSE(group1[i] == group2[j]);
EXPECT_TRUE(group1[i] != group2[j]);
}
}
}
}
struct MyType {
bool operator==(const MyType&) const { return true; }
};
enum class ConvTraits { kNone = 0, kImplicit = 1, kExplicit = 2 };
template <typename T, ConvTraits conv_traits = ConvTraits::kNone>
struct StatusOrConversionBase {};
template <typename T>
struct StatusOrConversionBase<T, ConvTraits::kImplicit> {
operator absl::StatusOr<T>() const& {
return absl::InvalidArgumentError("conversion to absl::StatusOr");
}
operator absl::StatusOr<T>() && {
return absl::InvalidArgumentError("conversion to absl::StatusOr");
}
};
template <typename T>
struct StatusOrConversionBase<T, ConvTraits::kExplicit> {
explicit operator absl::StatusOr<T>() const& {
return absl::InvalidArgumentError("conversion to absl::StatusOr");
}
explicit operator absl::StatusOr<T>() && {
return absl::InvalidArgumentError("conversion to absl::StatusOr");
}
};
template <typename T, ConvTraits conv_traits = ConvTraits::kNone>
struct ConversionBase {};
template <typename T>
struct ConversionBase<T, ConvTraits::kImplicit> {
operator T() const& { return t; }
operator T() && { return std::move(t); }
T t;
};
template <typename T>
struct ConversionBase<T, ConvTraits::kExplicit> {
explicit operator T() const& { return t; }
explicit operator T() && { return std::move(t); }
T t;
};
template <ConvTraits conv_traits = ConvTraits::kNone>
struct StatusConversionBase {};
template <>
struct StatusConversionBase<ConvTraits::kImplicit> {
operator absl::Status() const& {
return absl::InternalError("conversion to Status");
}
operator absl::Status() && {
return absl::InternalError("conversion to Status");
}
};
template <>
struct StatusConversionBase<ConvTraits::kExplicit> {
explicit operator absl::Status() const& {
return absl::InternalError("conversion to Status");
}
explicit operator absl::Status() && {
return absl::InternalError("conversion to Status");
}
};
static constexpr int kConvToStatus = 1;
static constexpr int kConvToStatusOr = 2;
static constexpr int kConvToT = 4;
static constexpr int kConvExplicit = 8;
constexpr ConvTraits GetConvTraits(int bit, int config) {
return (config & bit) == 0
? ConvTraits::kNone
: ((config & kConvExplicit) == 0 ? ConvTraits::kImplicit
: ConvTraits::kExplicit);
}
template <typename T, int config>
struct CustomType
: StatusOrConversionBase<T, GetConvTraits(kConvToStatusOr, config)>,
ConversionBase<T, GetConvTraits(kConvToT, config)>,
StatusConversionBase<GetConvTraits(kConvToStatus, config)> {};
struct ConvertibleToAnyStatusOr {
template <typename T>
operator absl::StatusOr<T>() const {
return absl::InvalidArgumentError("Conversion to absl::StatusOr");
}
};
TEST(StatusOr, ConstructionFromT) {
{
ConvertibleToAnyStatusOr v;
absl::StatusOr<ConvertibleToAnyStatusOr> statusor(v);
EXPECT_TRUE(statusor.ok());
}
{
ConvertibleToAnyStatusOr v;
absl::StatusOr<ConvertibleToAnyStatusOr> statusor = v;
EXPECT_TRUE(statusor.ok());
}
{
CustomType<MyType, kConvToStatus | kConvExplicit> v;
absl::StatusOr<CustomType<MyType, kConvToStatus | kConvExplicit>> statusor(
v);
EXPECT_TRUE(statusor.ok());
}
{
CustomType<MyType, kConvToStatus | kConvExplicit> v;
absl::StatusOr<CustomType<MyType, kConvToStatus | kConvExplicit>> statusor =
v;
EXPECT_TRUE(statusor.ok());
}
}
TEST(StatusOr, ConstructionFromTypeConvertibleToT) {
{
CustomType<MyType, kConvToT | kConvExplicit> v;
absl::StatusOr<MyType> statusor(v);
EXPECT_TRUE(statusor.ok());
}
{
CustomType<MyType, kConvToT> v;
absl::StatusOr<MyType> statusor = v;
EXPECT_TRUE(statusor.ok());
}
}
TEST(StatusOr, ConstructionFromTypeWithConversionOperatorToStatusOrT) {
{
CustomType<MyType, kConvToStatusOr | kConvExplicit> v;
absl::StatusOr<MyType> statusor(v);
EXPECT_EQ(statusor, v.operator absl::StatusOr<MyType>());
}
{
CustomType<MyType, kConvToT | kConvToStatusOr | kConvExplicit> v;
absl::StatusOr<MyType> statusor(v);
EXPECT_EQ(statusor, v.operator absl::StatusOr<MyType>());
}
{
CustomType<MyType, kConvToStatusOr | kConvToStatus | kConvExplicit> v;
absl::StatusOr<MyType> statusor(v);
EXPECT_EQ(statusor, v.operator absl::StatusOr<MyType>());
}
{
CustomType<MyType,
kConvToT | kConvToStatusOr | kConvToStatus | kConvExplicit>
v;
absl::StatusOr<MyType> statusor(v);
EXPECT_EQ(statusor, v.operator absl::StatusOr<MyType>());
}
{
CustomType<MyType, kConvToStatusOr> v;
absl::StatusOr<MyType> statusor = v;
EXPECT_EQ(statusor, v.operator absl::StatusOr<MyType>());
}
{
CustomType<MyType, kConvToT | kConvToStatusOr> v;
absl::StatusOr<MyType> statusor = v;
EXPECT_EQ(statusor, v.operator absl::StatusOr<MyType>());
}
{
CustomType<MyType, kConvToStatusOr | kConvToStatus> v;
absl::StatusOr<MyType> statusor = v;
EXPECT_EQ(statusor, v.operator absl::StatusOr<MyType>());
}
{
CustomType<MyType, kConvToT | kConvToStatusOr | kConvToStatus> v;
absl::StatusOr<MyType> statusor = v;
EXPECT_EQ(statusor, v.operator absl::StatusOr<MyType>());
}
}
TEST(StatusOr, ConstructionFromTypeConvertibleToStatus) {
{
CustomType<MyType, kConvToStatus | kConvExplicit> v;
absl::StatusOr<MyType> statusor(v);
EXPECT_FALSE(statusor.ok());
EXPECT_EQ(statusor.status(), static_cast<absl::Status>(v));
}
{
CustomType<MyType, kConvToT | kConvToStatus | kConvExplicit> v;
absl::StatusOr<MyType> statusor(v);
EXPECT_FALSE(statusor.ok());
EXPECT_EQ(statusor.status(), static_cast<absl::Status>(v));
}
{
CustomType<MyType, kConvToStatus> v;
absl::StatusOr<MyType> statusor = v;
EXPECT_FALSE(statusor.ok());
EXPECT_EQ(statusor.status(), static_cast<absl::Status>(v));
}
{
CustomType<MyType, kConvToT | kConvToStatus> v;
absl::StatusOr<MyType> statusor = v;
EXPECT_FALSE(statusor.ok());
EXPECT_EQ(statusor.status(), static_cast<absl::Status>(v));
}
}
TEST(StatusOr, AssignmentFromT) {
{
ConvertibleToAnyStatusOr v;
absl::StatusOr<ConvertibleToAnyStatusOr> statusor;
statusor = v;
EXPECT_TRUE(statusor.ok());
}
{
CustomType<MyType, kConvToStatus> v;
absl::StatusOr<CustomType<MyType, kConvToStatus>> statusor;
statusor = v;
EXPECT_TRUE(statusor.ok());
}
}
TEST(StatusOr, AssignmentFromTypeConvertibleToT) {
{
CustomType<MyType, kConvToT> v;
absl::StatusOr<MyType> statusor;
statusor = v;
EXPECT_TRUE(statusor.ok());
}
}
TEST(StatusOr, AssignmentFromTypeWithConversionOperatortoStatusOrT) {
{
CustomType<MyType, kConvToStatusOr> v;
absl::StatusOr<MyType> statusor;
statusor = v;
EXPECT_EQ(statusor, v.operator absl::StatusOr<MyType>());
}
{
CustomType<MyType, kConvToT | kConvToStatusOr> v;
absl::StatusOr<MyType> statusor;
statusor = v;
EXPECT_EQ(statusor, v.operator absl::StatusOr<MyType>());
}
{
CustomType<MyType, kConvToStatusOr | kConvToStatus> v;
absl::StatusOr<MyType> statusor;
statusor = v;
EXPECT_EQ(statusor, v.operator absl::StatusOr<MyType>());
}
{
CustomType<MyType, kConvToT | kConvToStatusOr | kConvToStatus> v;
absl::StatusOr<MyType> statusor;
statusor = v;
EXPECT_EQ(statusor, v.operator absl::StatusOr<MyType>());
}
}
TEST(StatusOr, AssignmentFromTypeConvertibleToStatus) {
{
CustomType<MyType, kConvToStatus> v;
absl::StatusOr<MyType> statusor;
statusor = v;
EXPECT_FALSE(statusor.ok());
EXPECT_EQ(statusor.status(), static_cast<absl::Status>(v));
}
{
CustomType<MyType, kConvToT | kConvToStatus> v;
absl::StatusOr<MyType> statusor;
statusor = v;
EXPECT_FALSE(statusor.ok());
EXPECT_EQ(statusor.status(), static_cast<absl::Status>(v));
}
}
TEST(StatusOr, StatusAssignmentFromStatusError) {
absl::StatusOr<absl::Status> statusor;
statusor.AssignStatus(absl::CancelledError());
EXPECT_FALSE(statusor.ok());
EXPECT_EQ(statusor.status(), absl::CancelledError());
}
#if GTEST_HAS_DEATH_TEST
TEST(StatusOr, StatusAssignmentFromStatusOk) {
EXPECT_DEBUG_DEATH(
{
absl::StatusOr<absl::Status> statusor;
statusor.AssignStatus(absl::OkStatus());
EXPECT_FALSE(statusor.ok());
EXPECT_EQ(statusor.status().code(), absl::StatusCode::kInternal);
},
"An OK status is not a valid constructor argument to StatusOr<T>");
}
#endif
TEST(StatusOr, StatusAssignmentFromTypeConvertibleToStatus) {
CustomType<MyType, kConvToStatus> v;
absl::StatusOr<MyType> statusor;
statusor.AssignStatus(v);
EXPECT_FALSE(statusor.ok());
EXPECT_EQ(statusor.status(), static_cast<absl::Status>(v));
}
struct PrintTestStruct {
friend std::ostream& operator<<(std::ostream& os, const PrintTestStruct&) {
return os << "ostream";
}
template <typename Sink>
friend void AbslStringify(Sink& sink, const PrintTestStruct&) {
sink.Append("stringify");
}
};
TEST(StatusOr, OkPrinting) {
absl::StatusOr<PrintTestStruct> print_me = PrintTestStruct{};
std::stringstream stream;
stream << print_me;
EXPECT_EQ(stream.str(), "ostream");
EXPECT_EQ(absl::StrCat(print_me), "stringify");
}
TEST(StatusOr, ErrorPrinting) {
absl::StatusOr<PrintTestStruct> print_me = absl::UnknownError("error");
std::stringstream stream;
stream << print_me;
const auto error_matcher =
AllOf(HasSubstr("UNKNOWN"), HasSubstr("error"),
AnyOf(AllOf(StartsWith("("), EndsWith(")")),
AllOf(StartsWith("["), EndsWith("]"))));
EXPECT_THAT(stream.str(), error_matcher);
EXPECT_THAT(absl::StrCat(print_me), error_matcher);
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/status/statusor.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/status/statusor_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
df77bd3e-397c-4a38-83b1-b0e4b4a814d7 | cpp | google/cel-cpp | bytes_wrapper_type | common/types/bytes_wrapper_type.h | common/types/bytes_wrapper_type_test.cc | #ifndef THIRD_PARTY_CEL_CPP_COMMON_TYPES_BYTES_WRAPPER_TYPE_H_
#define THIRD_PARTY_CEL_CPP_COMMON_TYPES_BYTES_WRAPPER_TYPE_H_
#include <ostream>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "common/type_kind.h"
namespace cel {
class Type;
class TypeParameters;
class BytesWrapperType final {
public:
static constexpr TypeKind kKind = TypeKind::kBytesWrapper;
static constexpr absl::string_view kName = "google.protobuf.BytesValue";
BytesWrapperType() = default;
BytesWrapperType(const BytesWrapperType&) = default;
BytesWrapperType(BytesWrapperType&&) = default;
BytesWrapperType& operator=(const BytesWrapperType&) = default;
BytesWrapperType& operator=(BytesWrapperType&&) = default;
static TypeKind kind() { return kKind; }
static absl::string_view name() { return kName; }
static TypeParameters GetParameters();
static std::string DebugString() { return std::string(name()); }
constexpr void swap(BytesWrapperType&) noexcept {}
};
inline constexpr void swap(BytesWrapperType& lhs,
BytesWrapperType& rhs) noexcept {
lhs.swap(rhs);
}
inline constexpr bool operator==(BytesWrapperType, BytesWrapperType) {
return true;
}
inline constexpr bool operator!=(BytesWrapperType lhs, BytesWrapperType rhs) {
return !operator==(lhs, rhs);
}
template <typename H>
H AbslHashValue(H state, BytesWrapperType) {
return std::move(state);
}
inline std::ostream& operator<<(std::ostream& out,
const BytesWrapperType& type) {
return out << type.DebugString();
}
}
#endif | #include <sstream>
#include "absl/hash/hash.h"
#include "common/type.h"
#include "internal/testing.h"
namespace cel {
namespace {
TEST(BytesWrapperType, Kind) {
EXPECT_EQ(BytesWrapperType().kind(), BytesWrapperType::kKind);
EXPECT_EQ(Type(BytesWrapperType()).kind(), BytesWrapperType::kKind);
}
TEST(BytesWrapperType, Name) {
EXPECT_EQ(BytesWrapperType().name(), BytesWrapperType::kName);
EXPECT_EQ(Type(BytesWrapperType()).name(), BytesWrapperType::kName);
}
TEST(BytesWrapperType, DebugString) {
{
std::ostringstream out;
out << BytesWrapperType();
EXPECT_EQ(out.str(), BytesWrapperType::kName);
}
{
std::ostringstream out;
out << Type(BytesWrapperType());
EXPECT_EQ(out.str(), BytesWrapperType::kName);
}
}
TEST(BytesWrapperType, Hash) {
EXPECT_EQ(absl::HashOf(BytesWrapperType()), absl::HashOf(BytesWrapperType()));
}
TEST(BytesWrapperType, Equal) {
EXPECT_EQ(BytesWrapperType(), BytesWrapperType());
EXPECT_EQ(Type(BytesWrapperType()), BytesWrapperType());
EXPECT_EQ(BytesWrapperType(), Type(BytesWrapperType()));
EXPECT_EQ(Type(BytesWrapperType()), Type(BytesWrapperType()));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/bytes_wrapper_type.h | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/bytes_wrapper_type_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
5054f737-a436-48d6-bda7-202ce4f02830 | cpp | tensorflow/tensorflow | session_mgr | tensorflow/core/distributed_runtime/session_mgr.cc | tensorflow/core/distributed_runtime/session_mgr_test.cc | #include "tensorflow/core/distributed_runtime/session_mgr.h"
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "xla/tsl/distributed_runtime/coordination/coordination_service.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_service_agent.h"
#include "xla/tsl/protobuf/coordination_config.pb.h"
#include "xla/tsl/protobuf/coordination_service.pb.h"
#include "xla/tsl/protobuf/distributed_runtime_payloads.pb.h"
#include "tensorflow/core/activity_watcher/activity.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/renamed_device.h"
#include "tensorflow/core/distributed_runtime/cluster_function_library_runtime.h"
#include "tensorflow/core/distributed_runtime/error_payloads.h"
#include "tensorflow/core/distributed_runtime/graph_mgr.h"
#include "tensorflow/core/distributed_runtime/remote_device.h"
#include "tensorflow/core/distributed_runtime/worker_cache_wrapper.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/protobuf/cluster.pb.h"
#include "tensorflow/core/protobuf/tensorflow_server.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace {
bool IsMultiClientLeader(const ServerDef& server_def,
const CoordinationServiceConfig& config) {
DeviceNameUtils::ParsedName leader_pn;
DeviceNameUtils::ParseFullName(config.service_leader(), &leader_pn);
return server_def.job_name() == leader_pn.job &&
server_def.task_index() == leader_pn.task;
}
void SetCoordinationServiceLeader(const ServerDef& server_def,
CoordinationServiceConfig* config) {
const std::string& collective_leader = server_def.default_session_config()
.experimental()
.collective_group_leader();
if (!collective_leader.empty()) {
config->set_service_leader(collective_leader);
LOG(INFO) << "No coordination leader is set, using the collective leader "
<< collective_leader;
} else {
const std::string& default_leader =
strings::StrCat("/job:", server_def.job_name(), "/replica:0/task:0");
config->set_service_leader(default_leader);
LOG(INFO) << "No coordination leader is set, using the default leader "
<< default_leader;
}
}
void SetCoordinatedJobList(const ServerDef& server_def,
CoordinationServiceConfig* config) {
for (const auto& job : server_def.cluster().job()) {
tensorflow::CoordinatedJob* coordinated_job =
config->mutable_coordinated_job_list()->Add();
coordinated_job->set_name(job.name());
coordinated_job->set_num_tasks(job.tasks().size());
}
}
}
SessionMgr::SessionMgr(
WorkerEnv* worker_env, const std::string& default_worker_name,
std::unique_ptr<WorkerCacheInterface> default_worker_cache,
WorkerCacheFactory worker_cache_factory,
tsl::CoordinationServiceRpcHandler* coordination_handler)
: worker_env_(worker_env),
default_worker_cache_(std::move(default_worker_cache)),
legacy_session_(WorkerSession::CreateWithBorrowedDeviceMgr(
"", default_worker_name,
std::unique_ptr<WorkerCacheInterface>(
new WorkerCacheWrapper(default_worker_cache_.get())),
worker_env->device_mgr,
std::make_unique<GraphMgr>(worker_env, worker_env->device_mgr),
nullptr,
[](WorkerSession* worker_session, bool create_worker_session_called,
DeviceMgr* remote_device_mgr)
-> std::unique_ptr<DistributedFunctionLibraryRuntime> {
return std::make_unique<ClusterFunctionLibraryRuntime>(
worker_session, create_worker_session_called,
remote_device_mgr);
})),
worker_cache_factory_(std::move(worker_cache_factory)),
coordination_handler_(coordination_handler) {}
std::string SessionMgr::WorkerNameFromServerDef(const ServerDef& server_def) {
return strings::StrCat("/job:", server_def.job_name(),
"/replica:", server_def.replica(),
"/task:", server_def.task_index());
}
Status SessionMgr::CreateSession(const std::string& session,
const ServerDef& server_def,
bool isolate_session_state,
StatusCallback coordination_error_callback) {
return CreateSession(session, server_def, {}, isolate_session_state,
"",
0, coordination_error_callback);
}
Status SessionMgr::CreateSession(
const std::string& session, const ServerDef& server_def,
const protobuf::RepeatedPtrField<DeviceAttributes>&
cluster_device_attributes,
bool isolate_session_state) {
return CreateSession(session, server_def, cluster_device_attributes,
isolate_session_state,
"",
0);
}
Status SessionMgr::CreateSession(
const std::string& session, const ServerDef& server_def,
const protobuf::RepeatedPtrField<DeviceAttributes>&
cluster_device_attributes,
bool isolate_session_state, std::string master_task,
int64_t master_incarnation, StatusCallback coordination_error_callback) {
mutex_lock l(mu_);
if (session.empty()) {
return errors::InvalidArgument("Session must be non-empty.");
}
if (!master_task.empty()) {
auto it_range = master_to_associated_sessions_.equal_range(master_task);
if (it_range.first != it_range.second &&
it_range.first->second.master_incarnation != master_incarnation) {
LOG(INFO) << "When creating WorkerSession for master task " << master_task
<< ", found old WorkerSessions created by the same master task "
<< "with a different incarnation. These sessions will "
<< "be garbage collected. Current WorkerSession count: "
<< sessions_.size();
auto it = it_range.first;
while (it != it_range.second) {
auto session_it = sessions_.find(it->second.session_handle);
if (session_it != sessions_.end()) {
sessions_.erase(session_it);
}
it = master_to_associated_sessions_.erase(it);
}
}
}
WorkerCacheInterface* worker_cache = nullptr;
std::string worker_name;
if (server_def.cluster().job().empty()) {
worker_cache = new WorkerCacheWrapper(default_worker_cache_.get());
worker_name = legacy_session_->worker_name();
} else {
TF_RETURN_IF_ERROR(worker_cache_factory_(server_def, &worker_cache));
worker_name = WorkerNameFromServerDef(server_def);
}
if (worker_cache != nullptr && default_worker_cache_ != nullptr) {
worker_cache->SetLogging(this->is_logging_active_);
}
CHECK(worker_env_->device_mgr)
<< "The WorkerEnv must have a device manager.";
std::vector<Device*> local_devices = worker_env_->device_mgr->ListDevices();
CHECK(!local_devices.empty())
<< "The WorkerEnv must have at least one device in `local_devices`.";
std::shared_ptr<WorkerSession> worker_session;
std::vector<std::unique_ptr<Device>> cluster_devices;
if (isolate_session_state || server_def.cluster().job_size()) {
if (server_def.cluster().job_size()) {
VLOG(1) << "ClusterSpec propagation is enabled.";
}
if (!isolate_session_state) {
VLOG(1) << "Session state isolation is disabled.";
}
std::vector<std::unique_ptr<Device>> renamed_devices;
renamed_devices.reserve(local_devices.size());
for (Device* d : local_devices) {
renamed_devices.push_back(RenamedDevice::NewRenamedDevice(
worker_name, d, false, isolate_session_state));
}
auto device_mgr =
std::make_unique<StaticDeviceMgr>(std::move(renamed_devices));
LookupLocalDevice cb = [&device_mgr](StringPiece name, Device** device) {
return device_mgr->LookupDevice(name, device);
};
AsRemoteDevices(worker_env_->env, cluster_device_attributes, cb,
&cluster_devices);
std::unique_ptr<DynamicDeviceMgr> remote_devices;
if (!cluster_device_attributes.empty()) {
remote_devices = std::make_unique<DynamicDeviceMgr>();
TF_RETURN_IF_ERROR(
remote_devices->AddDevices(std::move(cluster_devices)));
}
auto graph_mgr = std::make_unique<GraphMgr>(worker_env_, device_mgr.get());
worker_session.reset(new WorkerSession(
session, worker_name,
std::unique_ptr<WorkerCacheInterface>(worker_cache),
std::move(device_mgr), std::move(graph_mgr), std::move(remote_devices),
[](WorkerSession* worker_session, bool create_worker_session_called,
DeviceMgr* remote_device_mgr)
-> std::unique_ptr<DistributedFunctionLibraryRuntime> {
return std::make_unique<ClusterFunctionLibraryRuntime>(
worker_session, create_worker_session_called, remote_device_mgr);
}));
} else {
AsRemoteDevices(worker_env_->env, cluster_device_attributes, nullptr,
&cluster_devices);
std::unique_ptr<DynamicDeviceMgr> remote_devices;
if (!cluster_device_attributes.empty()) {
remote_devices = std::make_unique<DynamicDeviceMgr>();
TF_RETURN_IF_ERROR(
remote_devices->AddDevices(std::move(cluster_devices)));
}
auto graph_mgr =
std::make_unique<GraphMgr>(worker_env_, worker_env_->device_mgr);
worker_session = WorkerSession::CreateWithBorrowedDeviceMgr(
session, worker_name,
std::unique_ptr<WorkerCacheInterface>(worker_cache),
worker_env_->device_mgr, std::move(graph_mgr),
std::move(remote_devices),
[](WorkerSession* worker_session, bool create_worker_session_called,
DeviceMgr* remote_device_mgr)
-> std::unique_ptr<DistributedFunctionLibraryRuntime> {
return std::make_unique<ClusterFunctionLibraryRuntime>(
worker_session, create_worker_session_called, remote_device_mgr);
});
}
sessions_.insert(std::make_pair(session, std::move(worker_session)));
if (!master_task.empty()) {
MasterAssociatedSession s{master_incarnation, session};
master_to_associated_sessions_.emplace(master_task, s);
}
CoordinationServiceConfig coordination_config =
server_def.default_session_config().experimental().coordination_config();
if (!coordination_config.service_type().empty() &&
!coordination_config.force_disable() &&
coordination_service_agent_ == nullptr) {
std::unique_ptr<CoordinationClientCache> client_cache;
TF_RETURN_IF_ERROR(worker_cache->GetCoordinationClientCache(&client_cache));
if (coordination_config.service_leader().empty()) {
SetCoordinationServiceLeader(server_def, &coordination_config);
}
if (coordination_config.coordinated_job_list().empty()) {
SetCoordinatedJobList(server_def, &coordination_config);
}
if (IsMultiClientLeader(server_def, coordination_config)) {
coordination_service_ =
tsl::CoordinationServiceInterface::EnableCoordinationService(
worker_env_->env, coordination_config, std::move(client_cache));
if (coordination_handler_ != nullptr) {
coordination_handler_->SetServiceInstance(coordination_service_.get());
}
}
std::unique_ptr<CoordinationClientCache> agent_cache;
TF_RETURN_IF_ERROR(worker_cache->GetCoordinationClientCache(&agent_cache));
coordination_service_agent_ = tsl::CreateCoordinationServiceAgent();
TF_RETURN_IF_ERROR(coordination_service_agent_->Initialize(
worker_env_->env, server_def.job_name(), server_def.task_index(),
coordination_config,
agent_cache->GetOwnedClient(coordination_config.service_leader()),
std::move(coordination_error_callback)));
activity_watcher::MaybeEnableMultiWorkersWatching(
coordination_service_agent_.get());
}
return absl::OkStatus();
}
void SessionMgr::ResetDefaultWorkerCache(WorkerCacheInterface* worker_cache) {
default_worker_cache_.reset(worker_cache);
}
Status SessionMgr::UpdateSession(
const std::string& session, const ServerDef& server_def,
const protobuf::RepeatedPtrField<DeviceAttributes>&
cluster_device_attributes) {
mutex_lock l(mu_);
if (session.empty()) {
return errors::InvalidArgument("Session must be non-empty.");
}
auto it = sessions_.find(session);
if (it == sessions_.end()) {
return errors::InvalidArgument("Cannot update session ", session,
" because it does not exist.");
}
std::shared_ptr<WorkerSession> worker_session = it->second;
WorkerCacheInterface* worker_cache = nullptr;
if (server_def.cluster().job().empty()) {
worker_cache = new WorkerCacheWrapper(default_worker_cache_.get());
} else {
TF_RETURN_IF_ERROR(worker_cache_factory_(server_def, &worker_cache));
}
std::vector<std::string> updated_remote_workers;
worker_cache->ListWorkers(&updated_remote_workers);
std::vector<std::unique_ptr<Device>> cluster_devices;
const DeviceMgr* local_device_mgr = worker_session->device_mgr();
DeviceMgr* remote_device_mgr = worker_session->remote_device_mgr();
std::vector<Device*> curr_remote_devices = remote_device_mgr->ListDevices();
std::vector<std::unique_ptr<Device>> added_remote_devices;
std::vector<Device*> removed_remote_devices;
std::vector<DeviceAttributes> added_cluster_device_attrs;
for (const auto& da : cluster_device_attributes) {
Device* device;
if (!local_device_mgr->LookupDevice(da.name(), &device).ok() &&
!remote_device_mgr->LookupDevice(da.name(), &device).ok()) {
added_cluster_device_attrs.emplace_back(da);
} else if (device != nullptr &&
device->attributes().incarnation() != da.incarnation()) {
removed_remote_devices.emplace_back(device);
added_cluster_device_attrs.emplace_back(da);
}
}
for (Device* device : curr_remote_devices) {
std::string task_name;
DeviceNameUtils::GetTaskName(device->parsed_name(), &task_name);
if (std::find(updated_remote_workers.begin(), updated_remote_workers.end(),
task_name) == updated_remote_workers.end()) {
removed_remote_devices.emplace_back(device);
}
}
protobuf::RepeatedPtrField<DeviceAttributes> added_cluster_device_attrs_pb(
added_cluster_device_attrs.begin(), added_cluster_device_attrs.end());
AsRemoteDevices(worker_env_->env, added_cluster_device_attrs_pb, nullptr,
&added_remote_devices);
TF_RETURN_IF_ERROR(worker_session->UpdateWorkerCacheAndDevices(
std::unique_ptr<WorkerCacheInterface>(worker_cache),
std::move(added_remote_devices), removed_remote_devices));
return absl::OkStatus();
}
Status SessionMgr::DeleteSession(const std::string& session) {
mutex_lock l(mu_);
auto it = sessions_.find(session);
if (it != sessions_.end()) {
sessions_.erase(it);
}
return absl::OkStatus();
}
Status SessionMgr::DeleteAllSessions() {
std::map<std::string, std::shared_ptr<WorkerSession>> tmp_sessions;
{
mutex_lock l(mu_);
swap(sessions_, tmp_sessions);
}
for (auto& session : tmp_sessions) {
session.second.reset();
}
return absl::OkStatus();
}
Status SessionMgr::WorkerSessionForSessionLocked(
const std::string& session_handle,
std::shared_ptr<WorkerSession>* out_session) {
if (session_handle.empty()) {
*out_session = legacy_session_;
} else {
auto it = sessions_.find(session_handle);
if (it == sessions_.end()) {
return errors::AbortedWithPayloads(
strings::StrCat("Session handle is not found: ", session_handle,
". Possibly this worker (\"",
legacy_session_->worker_name(),
"\") just restarted."),
{{kWorkerPossiblyRestarted,
distributed_runtime::WorkerPossiblyRestarted()
.SerializeAsString()}});
} else {
*out_session = it->second;
}
}
return absl::OkStatus();
}
Status SessionMgr::WorkerSessionForSession(
const std::string& session_handle,
std::shared_ptr<WorkerSession>* out_session) {
mutex_lock l(mu_);
return WorkerSessionForSessionLocked(session_handle, out_session);
}
std::shared_ptr<WorkerSession> SessionMgr::LegacySession() {
return legacy_session_;
}
tsl::CoordinationServiceAgent* SessionMgr::GetCoordinationServiceAgent() {
return coordination_service_agent_.get();
}
void SessionMgr::SetLogging(bool active) {
mutex_lock l(mu_);
this->is_logging_active_ = active;
if (legacy_session_) {
auto* worker_cache = legacy_session_->worker_cache();
if (worker_cache) {
worker_cache->SetLogging(active);
}
}
for (const auto& session_kv : sessions_) {
auto session = session_kv.second.get();
if (session) {
auto* worker_cache = session->worker_cache();
if (worker_cache) {
worker_cache->SetLogging(active);
}
}
}
}
void SessionMgr::RetrieveLogs(int64_t step_id, LoggingResponse* response) {
mutex_lock l(mu_);
if (legacy_session_) {
auto* worker_cache = legacy_session_->worker_cache();
if (worker_cache) {
auto step_stats = StepStats();
if (worker_cache->RetrieveLogs(step_id, &step_stats)) {
auto* labeled_step_stats = response->add_step();
labeled_step_stats->set_step_id(step_id);
labeled_step_stats->mutable_step_stats()->Swap(&step_stats);
}
}
}
for (const auto& session_kv : sessions_) {
auto session = session_kv.second.get();
if (session) {
auto* worker_cache = session->worker_cache();
if (worker_cache) {
auto step_stats = StepStats();
if (worker_cache->RetrieveLogs(step_id, &step_stats)) {
auto* labeled_step_stats = response->add_step();
labeled_step_stats->set_step_id(step_id);
labeled_step_stats->mutable_step_stats()->Swap(&step_stats);
}
}
}
}
}
void SessionMgr::ClearLogs() {
mutex_lock l(mu_);
if (legacy_session_) {
auto* worker_cache = legacy_session_->worker_cache();
if (worker_cache) {
worker_cache->ClearLogs();
}
}
for (const auto& session_kv : sessions_) {
auto session = session_kv.second.get();
if (session) {
auto* worker_cache = session->worker_cache();
if (worker_cache) {
worker_cache->ClearLogs();
}
}
}
}
void SessionMgr::TeardownCoordinationService() {
coordination_service_ = nullptr;
}
void SessionMgr::TeardownCoordinationServiceAgent() {
coordination_service_agent_ = nullptr;
}
} | #include "tensorflow/core/distributed_runtime/session_mgr.h"
#include <string>
#include "absl/status/status.h"
#include "tensorflow/core/distributed_runtime/error_payloads.h"
#include "tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr.h"
#include "tensorflow/core/distributed_runtime/worker_env.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/cluster.pb.h"
namespace tensorflow {
class FakeDevice : public Device {
private:
explicit FakeDevice(const DeviceAttributes& device_attributes)
: Device(nullptr, device_attributes) {}
public:
Status Sync() override { return errors::Unimplemented("FakeDevice::Sync()"); }
Allocator* GetAllocator(AllocatorAttributes attr) override { return nullptr; }
static std::unique_ptr<Device> MakeCPU(const std::string& name) {
DeviceAttributes device_attributes;
device_attributes.set_name(name);
device_attributes.set_device_type(DeviceType("FakeCPU").type());
return std::unique_ptr<Device>(new FakeDevice(device_attributes));
}
};
class SessionMgrTest : public ::testing::Test {
protected:
SessionMgrTest()
: mgr_(&env_, "/job:mnist/replica:0/task:0",
std::unique_ptr<WorkerCacheInterface>(), factory_,
nullptr) {
device_mgr_ = std::make_unique<DynamicDeviceMgr>(
FakeDevice::MakeCPU("/job:mnist/replica:0/task:0/device:fakecpu:0"));
env_.device_mgr = device_mgr_.get();
}
std::unique_ptr<DeviceMgr> device_mgr_;
WorkerEnv env_;
SessionMgr::WorkerCacheFactory factory_ =
[](const ServerDef& server_def, WorkerCacheInterface** worker_cache) {
*worker_cache = nullptr;
return absl::OkStatus();
};
SessionMgr mgr_;
};
TEST_F(SessionMgrTest, CreateSessionSimple) {
ServerDef server_def;
server_def.set_job_name("worker");
server_def.set_task_index(3);
std::string session_handle = "test_session_handle";
TF_EXPECT_OK(mgr_.CreateSession(session_handle, server_def, true));
std::shared_ptr<WorkerSession> session;
TF_EXPECT_OK(mgr_.WorkerSessionForSession(session_handle, &session));
EXPECT_NE(nullptr, session) << "Session for " << session_handle << "was null";
EXPECT_NE(mgr_.LegacySession(), session);
TF_EXPECT_OK(mgr_.DeleteSession(session_handle));
}
TEST_F(SessionMgrTest, CreateSessionClusterDefWorkerName) {
ServerDef server_def;
server_def.set_job_name("worker");
server_def.set_task_index(3);
auto job = server_def.mutable_cluster()->add_job();
job->set_name("worker");
job->mutable_tasks()->insert({3, "localhost:3333"});
protobuf::RepeatedPtrField<DeviceAttributes> cluster_device_attributes;
DeviceAttributes* local_cpu = cluster_device_attributes.Add();
local_cpu->set_name("/job:worker/replica:0/task:3/device:fakecpu:0");
DeviceAttributes* remote_cpu = cluster_device_attributes.Add();
remote_cpu->set_name("/job:coordinator/replica:0/task:0/device:fakecpu:0");
std::string session_handle = "test_session_handle";
TF_EXPECT_OK(mgr_.CreateSession(session_handle, server_def,
cluster_device_attributes, true));
std::shared_ptr<WorkerSession> session;
TF_EXPECT_OK(mgr_.WorkerSessionForSession(session_handle, &session));
Device* device;
TF_EXPECT_OK(
session->remote_device_mgr()->LookupDevice(local_cpu->name(), &device));
EXPECT_TRUE(device->IsLocal());
EXPECT_NE(nullptr, session) << "Session for " << session_handle << "was null";
EXPECT_EQ("/job:worker/replica:0/task:3", session->worker_name());
TF_EXPECT_OK(mgr_.DeleteSession(session_handle));
}
TEST_F(SessionMgrTest, CreateSessionDefaultWorkerName) {
ServerDef server_def;
std::string session_handle = "test_session_handle";
TF_EXPECT_OK(mgr_.CreateSession(session_handle, server_def, true));
std::shared_ptr<WorkerSession> session;
TF_EXPECT_OK(mgr_.WorkerSessionForSession(session_handle, &session));
EXPECT_NE(nullptr, session) << "Session for " << session_handle << "was null";
EXPECT_EQ("/job:mnist/replica:0/task:0", session->worker_name());
TF_EXPECT_OK(mgr_.DeleteSession(session_handle));
}
TEST_F(SessionMgrTest, CreateSessionIsolateSessionState) {
ServerDef server_def;
server_def.set_job_name("worker");
server_def.set_task_index(3);
TF_EXPECT_OK(mgr_.CreateSession("handle_1", server_def, false));
std::shared_ptr<WorkerSession> session_1;
TF_EXPECT_OK(mgr_.WorkerSessionForSession("handle_1", &session_1));
std::vector<Device*> devices_1 = session_1->device_mgr()->ListDevices();
EXPECT_EQ(1, devices_1.size());
TF_EXPECT_OK(mgr_.CreateSession("handle_2", server_def, false));
std::shared_ptr<WorkerSession> session_2;
TF_EXPECT_OK(mgr_.WorkerSessionForSession("handle_2", &session_2));
std::vector<Device*> devices_2 = session_2->device_mgr()->ListDevices();
EXPECT_EQ(1, devices_2.size());
TF_EXPECT_OK(mgr_.CreateSession("handle_3", server_def, true));
std::shared_ptr<WorkerSession> session_3;
TF_EXPECT_OK(mgr_.WorkerSessionForSession("handle_3", &session_3));
std::vector<Device*> devices_3 = session_3->device_mgr()->ListDevices();
EXPECT_EQ(1, devices_3.size());
TF_EXPECT_OK(mgr_.CreateSession("handle_4", server_def, true));
std::shared_ptr<WorkerSession> session_4;
TF_EXPECT_OK(mgr_.WorkerSessionForSession("handle_4", &session_4));
std::vector<Device*> devices_4 = session_4->device_mgr()->ListDevices();
EXPECT_EQ(1, devices_4.size());
EXPECT_EQ(devices_1[0]->resource_manager(), devices_2[0]->resource_manager());
EXPECT_NE(devices_1[0]->resource_manager(), devices_3[0]->resource_manager());
EXPECT_NE(devices_1[0]->resource_manager(), devices_4[0]->resource_manager());
EXPECT_NE(devices_3[0]->resource_manager(), devices_4[0]->resource_manager());
}
TEST_F(SessionMgrTest, CreateSessionWithMasterName) {
ServerDef server_def;
server_def.set_job_name("worker");
server_def.set_task_index(3);
auto job = server_def.mutable_cluster()->add_job();
job->set_name("worker");
job->mutable_tasks()->insert({3, "localhost:3333"});
protobuf::RepeatedPtrField<DeviceAttributes> cluster_device_attributes;
const std::string master_name = "/job:master/replica:0/task:1";
const int64_t old_incarnation = random::New64();
const int64_t new_incarnation = random::New64();
std::string sess_handle1 = "test_session_handle_1";
TF_EXPECT_OK(mgr_.CreateSession(sess_handle1, server_def,
cluster_device_attributes, true, master_name,
old_incarnation));
std::string sess_handle2 = "test_session_handle_2";
TF_EXPECT_OK(mgr_.CreateSession(sess_handle2, server_def,
cluster_device_attributes, true, master_name,
old_incarnation));
std::shared_ptr<WorkerSession> session;
TF_EXPECT_OK(mgr_.WorkerSessionForSession(sess_handle1, &session));
EXPECT_NE(nullptr, session) << "Session for " << sess_handle1 << "was null";
TF_EXPECT_OK(mgr_.WorkerSessionForSession(sess_handle2, &session));
EXPECT_NE(nullptr, session) << "Session for " << sess_handle2 << "was null";
std::string sess_handle3 = "test_session_handle_3";
TF_EXPECT_OK(mgr_.CreateSession(sess_handle3, server_def,
cluster_device_attributes, true, master_name,
new_incarnation));
EXPECT_NE(mgr_.WorkerSessionForSession(sess_handle1, &session),
absl::OkStatus())
<< "Session for " << sess_handle1
<< " should have been garbage collected.";
EXPECT_NE(mgr_.WorkerSessionForSession(sess_handle2, &session),
absl::OkStatus())
<< "Session for " << sess_handle2
<< " should have been garbage collected.";
TF_EXPECT_OK(mgr_.WorkerSessionForSession(sess_handle3, &session));
EXPECT_NE(nullptr, session) << "Session for " << sess_handle3 << "was null";
TF_EXPECT_OK(mgr_.DeleteSession(sess_handle2));
TF_EXPECT_OK(mgr_.DeleteSession(sess_handle3));
}
TEST_F(SessionMgrTest, CreateSessionWithoutMasterName) {
ServerDef server_def;
server_def.set_job_name("worker");
server_def.set_task_index(3);
auto job = server_def.mutable_cluster()->add_job();
job->set_name("worker");
job->mutable_tasks()->insert({3, "localhost:3333"});
protobuf::RepeatedPtrField<DeviceAttributes> cluster_device_attributes;
std::string sess_handle1 = "test_session_handle_no_master_1";
TF_EXPECT_OK(mgr_.CreateSession(sess_handle1, server_def,
cluster_device_attributes, true, "", 0));
std::string sess_handle2 = "test_session_handle_no_master_2";
TF_EXPECT_OK(mgr_.CreateSession(sess_handle2, server_def,
cluster_device_attributes, true, "", 0));
std::shared_ptr<WorkerSession> session;
TF_EXPECT_OK(mgr_.WorkerSessionForSession(sess_handle1, &session));
EXPECT_NE(nullptr, session) << "Session for " << sess_handle1 << "was null";
TF_EXPECT_OK(mgr_.WorkerSessionForSession(sess_handle2, &session));
EXPECT_NE(nullptr, session) << "Session for " << sess_handle2 << "was null";
TF_EXPECT_OK(mgr_.DeleteSession(sess_handle1));
TF_EXPECT_OK(mgr_.DeleteSession(sess_handle2));
}
TEST_F(SessionMgrTest, LegacySession) {
std::string session_handle = "";
std::shared_ptr<WorkerSession> session;
TF_EXPECT_OK(mgr_.WorkerSessionForSession(session_handle, &session));
EXPECT_EQ(mgr_.LegacySession(), session);
TF_EXPECT_OK(mgr_.DeleteSession(session_handle));
}
TEST_F(SessionMgrTest, UnknownSessionHandle) {
std::string session_handle = "unknown_session_handle";
std::shared_ptr<WorkerSession> session;
Status s = mgr_.WorkerSessionForSession(session_handle, &session);
EXPECT_TRUE(absl::IsAborted(s));
EXPECT_TRUE(absl::StrContains(s.message(), "Session handle is not found"));
EXPECT_TRUE(s.GetPayload(kWorkerPossiblyRestarted).has_value());
}
TEST_F(SessionMgrTest, WorkerNameFromServerDef) {
ServerDef server_def;
server_def.set_job_name("worker");
server_def.set_task_index(3);
std::string worker_name = SessionMgr::WorkerNameFromServerDef(server_def);
EXPECT_EQ("/job:worker/replica:0/task:3", worker_name);
}
TEST_F(SessionMgrTest, DeleteLegacySession) {
TF_EXPECT_OK(mgr_.DeleteSession(""));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/session_mgr.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/session_mgr_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b4ad9eac-84ab-45ce-8e5f-fd72109f0f90 | cpp | tensorflow/tensorflow | hlo_utils | third_party/xla/xla/hlo/translate/hlo_to_mhlo/hlo_utils.cc | third_party/xla/xla/hlo/translate/hlo_to_mhlo/hlo_utils_test.cc | #include "xla/hlo/translate/hlo_to_mhlo/hlo_utils.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <vector>
#include "absl/status/statusor.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/IR/ValueRange.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/mlir/utils/type_util.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/primitive_util.h"
#include "xla/shape.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using mlir::AffineMap;
using mlir::Builder;
using mlir::DenseElementsAttr;
using mlir::ShapedType;
template <typename CppType>
::mlir::DenseElementsAttr CreateDenseAttrFromLiteral(
const ShapedType& type, const LiteralBase& literal) {
if constexpr (is_intN_v<CppType>) {
auto data_span = literal.data<CppType>();
std::vector<char> packed_padded_data;
packed_padded_data.reserve(literal.element_count());
for (size_t i = 0; i < literal.element_count(); i++) {
packed_padded_data.push_back(static_cast<char>(data_span[i]));
}
return ::mlir::DenseElementsAttr::getFromRawBuffer(type,
packed_padded_data);
} else {
auto data_span = literal.data<CppType>();
return ::mlir::DenseElementsAttr::get(
type, llvm::ArrayRef(data_span.data(), data_span.size()));
}
}
absl::StatusOr<AffineMap> GetPermutationIfAvailable(const Shape& shape,
mlir::Builder builder) {
if (!shape.layout().tiles().empty()) {
return Internal("Tiled layouts are not yet supported");
}
if (!shape.has_layout() ||
LayoutUtil::IsMonotonicWithDim0Major(shape.layout())) {
return AffineMap();
}
if (!shape.is_static()) {
return Internal("Permutations for dynamic shapes are not yet supported");
}
int64_t accumulated_stride = 1;
llvm::SmallVector<int64_t, 4> strides(shape.rank(), 1);
for (int64_t dim : LayoutUtil::MinorToMajor(shape)) {
strides[dim] = accumulated_stride;
accumulated_stride *= shape.dimensions(dim);
}
if (accumulated_stride == 0) {
return AffineMap();
}
return makeStridedLinearLayoutMap(strides, 0,
builder.getContext());
}
}
absl::StatusOr<mlir::MemRefType> ConvertTensorShapeToMemRefType(
const Shape& shape, mlir::Builder builder) {
auto element_type_or =
ConvertPrimitiveTypeToMlirType(shape.element_type(), builder);
if (!element_type_or.ok()) return element_type_or.status();
using mlir::MemRefType;
auto dimensions = shape.dimensions();
llvm::SmallVector<int64_t, 4> array(dimensions.begin(), dimensions.end());
auto permutation_or = GetPermutationIfAvailable(shape, builder);
if (!permutation_or.ok()) return permutation_or.status();
return MemRefType::get(array, element_type_or.value(),
permutation_or.value());
}
absl::StatusOr<mlir::DenseElementsAttr> CreateDenseElementsAttrFromLiteral(
const LiteralBase& literal, Builder builder) {
TF_ASSIGN_OR_RETURN(auto type,
ConvertTensorShapeToType<mlir::RankedTensorType>(
literal.shape(), builder));
auto element_type = literal.shape().element_type();
return primitive_util::PrimitiveTypeSwitch<
absl::StatusOr<mlir::DenseElementsAttr>>(
[&](auto primitive_type_constant)
-> absl::StatusOr<mlir::DenseElementsAttr> {
if constexpr (primitive_util::IsArrayType(primitive_type_constant)) {
return CreateDenseAttrFromLiteral<
primitive_util::NativeTypeOf<primitive_type_constant>>(type,
literal);
}
return Internal("Unsupported type: %s",
PrimitiveType_Name(element_type));
},
element_type);
}
mlir::DenseIntElementsAttr CreateDenseIntElementsAttrFromVector(
const llvm::ArrayRef<int64_t> vector, mlir::Builder builder,
llvm::ArrayRef<int64_t> shape) {
return mlir::DenseIntElementsAttr::get(
mlir::RankedTensorType::get(shape.empty() ? vector.size() : shape,
builder.getIntegerType(64)),
vector);
}
mlir::Value CreateTupleValue(mlir::OpBuilder* func_builder, mlir::Location loc,
mlir::ValueRange& flatten_values,
mlir::Type type) {
auto tuple_type = type.dyn_cast<mlir::TupleType>();
if (!tuple_type) {
assert(!flatten_values.empty());
auto retval = flatten_values.front();
flatten_values = flatten_values.drop_front();
return retval;
}
llvm::SmallVector<mlir::Value> flatten_sub_values;
for (auto child_type : tuple_type.getTypes())
flatten_sub_values.push_back(
CreateTupleValue(func_builder, loc, flatten_values, child_type));
return func_builder->create<mlir::mhlo::TupleOp>(loc, flatten_sub_values)
.getResult();
}
mlir::Operation* CreateTupleFromOpResults(mlir::OpBuilder* func_builder,
mlir::Location loc,
mlir::Operation* op,
mlir::Type type) {
if (!type.isa<mlir::TupleType>()) return op;
mlir::ValueRange flattened_results_ref(op->getResults());
auto result =
CreateTupleValue(func_builder, loc, flattened_results_ref, type);
auto defining_tuple_op = result.getDefiningOp<mlir::mhlo::TupleOp>();
assert(defining_tuple_op && "builder didn't return the right type");
auto tupleOp = defining_tuple_op.getOperation();
return tupleOp;
}
mlir::TypeRange Untuple(const mlir::Type& type) {
if (llvm::isa<mlir::TupleType>(type)) {
return llvm::dyn_cast<mlir::TupleType>(type).getTypes();
}
return type;
}
} | #include "xla/hlo/translate/hlo_to_mhlo/hlo_utils.h"
#include <cstdint>
#include <cstring>
#include <vector>
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Support/DebugStringHelper.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/types.h"
namespace xla {
namespace {
TEST(ConvertTensorShapeToType, Simple) {
mlir::MLIRContext context;
context.loadDialect<mlir::mhlo::MhloDialect>();
mlir::Builder builder(&context);
{
auto shape = ShapeUtil::MakeShape(PrimitiveType::S32, {8, 128});
TF_ASSERT_OK_AND_ASSIGN(
auto type,
ConvertTensorShapeToType<mlir::RankedTensorType>(shape, builder));
auto expected = mlir::RankedTensorType::get({8, 128}, builder.getI32Type());
EXPECT_TRUE(type == expected)
<< " Expected: " << mlir::debugString(expected)
<< " Computed: " << mlir::debugString(type);
}
{
auto shape =
ShapeUtil::MakeShape(PrimitiveType::S32, {8, 128}, {true, false});
TF_ASSERT_OK_AND_ASSIGN(
auto type,
ConvertTensorShapeToType<mlir::RankedTensorType>(shape, builder));
int64_t bounds[] = {8, mlir::ShapedType::kDynamic};
auto extensions = mlir::mhlo::TypeExtensionsAttr::get(&context, bounds);
auto expected = mlir::RankedTensorType::get(
{mlir::ShapedType::kDynamic, 128}, builder.getI32Type(), extensions);
EXPECT_TRUE(type == expected)
<< " Expected: " << mlir::debugString(expected)
<< " Computed: " << mlir::debugString(type);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/translate/hlo_to_mhlo/hlo_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/translate/hlo_to_mhlo/hlo_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c6068034-ee18-406b-bba3-544be2c8deaf | cpp | tensorflow/tensorflow | optional_debug_tools | tensorflow/lite/optional_debug_tools.cc | tensorflow/lite/optional_debug_tools_test.cc | #include "tensorflow/lite/optional_debug_tools.h"
#include <cassert>
#include <cinttypes>
#include <cstddef>
#include <cstdint>
#include <cstdio>
#include <functional>
#include <limits>
#include <set>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
const char* AllocTypeName(TfLiteAllocationType type) {
switch (type) {
case kTfLiteMemNone:
return "kTfLiteMemNone";
case kTfLiteMmapRo:
return "kTfLiteMmapRo";
case kTfLiteDynamic:
return "kTfLiteDynamic";
case kTfLiteArenaRw:
return "kTfLiteArenaRw";
case kTfLiteArenaRwPersistent:
return "kTfLiteArenaRwPersistent";
case kTfLitePersistentRo:
return "kTfLitePersistentRo";
case kTfLiteCustom:
return "kTfLiteCustom";
case kTfLiteVariantObject:
return "kTfLiteVariantObject";
}
return "(invalid)";
}
SubgraphDelegationMetadata GetNodeDelegationMetadata(const Subgraph& subgraph) {
SubgraphDelegationMetadata metadata;
metadata.is_node_delegated.resize(subgraph.nodes_size());
metadata.replaced_by_node.resize(subgraph.nodes_size());
metadata.has_delegate_applied = false;
for (size_t node_index = 0; node_index < subgraph.nodes_size();
node_index++) {
metadata.is_node_delegated[node_index] = false;
metadata.replaced_by_node[node_index] = -1;
const std::pair<TfLiteNode, TfLiteRegistration>* node_and_reg =
subgraph.node_and_registration(static_cast<int>(node_index));
const TfLiteNode& node = node_and_reg->first;
auto* const delegate = node.delegate;
if (delegate != nullptr) {
metadata.has_delegate_applied = true;
auto* params = static_cast<TfLiteDelegateParams*>(node.builtin_data);
for (int nid : TfLiteIntArrayView(params->nodes_to_replace)) {
metadata.is_node_delegated[nid] = true;
metadata.replaced_by_node[nid] = node_index;
}
}
}
return metadata;
}
namespace {
void PrintIntVector(const std::vector<int>& v,
bool collapse_consecutives = true,
bool add_newline = false);
class MemoryArenaInfo {
public:
explicit MemoryArenaInfo(TfLiteAllocationType type)
: allocation_type_(type) {}
void Update(size_t tensor_index, const TfLiteTensor& tensor) {
if (tensor.allocation_type != allocation_type_) return;
if (tensor.data.data == nullptr) return;
if (tensor.bytes > max_tensor_mem_bytes_) {
max_tensor_mem_bytes_ = tensor.bytes;
max_tensor_id_ = tensor_index;
}
size_t current_start_addr = reinterpret_cast<size_t>(tensor.data.data);
size_t current_end_addr = current_start_addr + tensor.bytes;
if (current_start_addr < min_tensor_start_addr_) {
min_tensor_start_addr_ = current_start_addr;
}
if (current_end_addr > max_tensor_end_addr_) {
max_tensor_end_addr_ = current_end_addr;
}
TensorAllocInfo info;
info.tensor_id = tensor_index;
info.start_addr = current_start_addr;
info.bytes = tensor.bytes;
const auto result = alloc_info_.insert(info);
assert(result.second);
(void)result;
}
size_t GetArenaStartingAddress() const { return min_tensor_start_addr_; }
void Print() const {
printf("%s Info: ", AllocTypeName(allocation_type_));
if (max_tensor_end_addr_ == 0) {
printf("not holding any allocation.\n");
return;
}
printf("\nTensor %zu has the max size %zu bytes (%.3f MB).\n",
max_tensor_id_, max_tensor_mem_bytes_,
static_cast<float>(max_tensor_mem_bytes_) / (1 << 20));
const size_t arena_size = max_tensor_end_addr_ - min_tensor_start_addr_;
printf(
"This memory arena is estimated as[0x%zx, 0x%zx), taking %zu bytes "
"(%.3f MB).\n",
max_tensor_end_addr_, min_tensor_start_addr_, arena_size,
static_cast<float>(arena_size) / (1 << 20));
std::vector<const TensorAllocInfo*> arena_increase_trace;
size_t last_end_addr = 0;
for (const auto& info : alloc_info_) {
if (info.start_addr >= last_end_addr) {
arena_increase_trace.emplace_back(&info);
last_end_addr = info.start_addr + info.bytes;
}
}
printf(
"One possible set of tensors that have non-overlapping memory spaces "
"with each other, and they take up the whole arena:\n");
printf("Tensor ");
for (int i = 0; i < arena_increase_trace.size() - 1; ++i) {
printf("%zu -> ", arena_increase_trace[i]->tensor_id);
}
printf("%zu.\n", arena_increase_trace.back()->tensor_id);
}
private:
struct TensorAllocInfo {
size_t tensor_id;
size_t start_addr;
size_t bytes;
};
struct TensorAllocInfoCompare {
bool operator()(const TensorAllocInfo& lhs,
const TensorAllocInfo& rhs) const {
if (lhs.start_addr < rhs.start_addr) return true;
if (lhs.start_addr == rhs.start_addr) {
if (lhs.bytes > rhs.bytes) return true;
if (lhs.bytes == rhs.bytes) return lhs.tensor_id < rhs.tensor_id;
return false;
}
return false;
}
};
const TfLiteAllocationType allocation_type_;
size_t max_tensor_mem_bytes_ = 0;
size_t max_tensor_id_ = -1;
size_t min_tensor_start_addr_ = std::numeric_limits<size_t>::max();
size_t max_tensor_end_addr_ = 0;
std::set<TensorAllocInfo, TensorAllocInfoCompare> alloc_info_;
};
class DynamicMemoryInfo {
public:
void Update(size_t tensor_index, const TfLiteTensor& tensor) {
if (tensor.allocation_type != kTfLiteDynamic) return;
if (tensor.data.data == nullptr) return;
if (tensor.bytes > max_tensor_mem_bytes_) {
max_tensor_mem_bytes_ = tensor.bytes;
max_tensor_ids_.clear();
max_tensor_ids_.push_back(tensor_index);
} else if (tensor.bytes == max_tensor_mem_bytes_) {
max_tensor_ids_.push_back(static_cast<int>(tensor_index));
}
total_mem_bytes_ += tensor.bytes;
num_total_tensors_++;
}
void Print() const {
printf("kTfLiteDynamic Info: ");
if (total_mem_bytes_ == 0) {
printf("not holding any allocation.\n");
return;
}
printf("\n%zu Tensors ", max_tensor_ids_.size());
PrintIntVector(max_tensor_ids_, false);
printf(" have the max size %zu bytes (%.3f MB).\n", max_tensor_mem_bytes_,
static_cast<float>(max_tensor_mem_bytes_) / (1 << 20));
printf("There are %d dynamic tensors, taking %zu bytes (%.3f MB).\n",
num_total_tensors_, total_mem_bytes_,
static_cast<float>(total_mem_bytes_) / (1 << 20));
}
private:
size_t max_tensor_mem_bytes_ = 0;
std::vector<int> max_tensor_ids_;
size_t total_mem_bytes_ = 0;
int num_total_tensors_ = 0;
};
class ModelTensorMemoryInfo {
public:
ModelTensorMemoryInfo()
: rw_info_(kTfLiteArenaRw),
rw_persistent_info_(kTfLiteArenaRwPersistent),
mmap_info_(kTfLiteMmapRo) {}
void Update(size_t tensor_index, const TfLiteTensor& tensor) {
rw_info_.Update(tensor_index, tensor);
rw_persistent_info_.Update(tensor_index, tensor);
mmap_info_.Update(tensor_index, tensor);
dynamic_info_.Update(tensor_index, tensor);
}
int64_t GetOffsetFromArenaStart(const TfLiteTensor& tensor) const {
if (tensor.data.data == nullptr) return -1;
size_t tensor_address = reinterpret_cast<size_t>(tensor.data.data);
if (tensor.allocation_type == kTfLiteArenaRw) {
return static_cast<int64_t>(tensor_address -
rw_info_.GetArenaStartingAddress());
}
if (tensor.allocation_type == kTfLiteArenaRwPersistent) {
return static_cast<int64_t>(
tensor_address - rw_persistent_info_.GetArenaStartingAddress());
}
if (tensor.allocation_type == kTfLiteMmapRo) {
return static_cast<int64_t>(tensor_address -
mmap_info_.GetArenaStartingAddress());
}
return -1;
}
void Print() const {
printf("\n");
rw_info_.Print();
printf("\n");
rw_persistent_info_.Print();
printf("\n");
mmap_info_.Print();
printf("\n");
dynamic_info_.Print();
printf("\n");
}
private:
MemoryArenaInfo rw_info_;
MemoryArenaInfo rw_persistent_info_;
MemoryArenaInfo mmap_info_;
DynamicMemoryInfo dynamic_info_;
};
template <typename T>
void PrintTotalBytesOfTensors(const Subgraph& subgraph, const T& tensor_ids,
const std::string& prefix = " -> ") {
size_t total = 0;
for (const auto id : tensor_ids) {
const TfLiteTensor* tensor = subgraph.tensor(id);
if (tensor == nullptr) continue;
total += tensor->bytes;
}
printf("%s%zuB (%.2fMB)\n", prefix.c_str(), total,
static_cast<float>(total) / (1 << 20));
}
void PrintIntVector(const std::vector<int>& v, bool collapse_consecutives,
bool add_newline) {
if (v.empty()) {
printf("(null)");
if (add_newline) {
printf("\n");
}
return;
}
int range_start = v[0];
int range_end = range_start;
std::function<void(const char*)> print_range = [&](const char* suffix) {
if (range_end == range_start) {
printf("%d%s", range_start, suffix);
} else if (range_end == range_start + 1) {
printf("%d,%d%s", range_start, range_end, suffix);
} else {
printf("%d-%d%s", range_start, range_end, suffix);
}
};
printf("[");
for (int i = 1; i < v.size(); ++i) {
int current = v[i];
if (collapse_consecutives && (current == range_end + 1)) {
range_end = current;
} else {
print_range(",");
range_start = range_end = current;
}
}
print_range("]");
if (add_newline) {
printf("\n");
}
}
void PrintTfLiteIntVector(const TfLiteIntArray* v,
bool collapse_consecutives = true,
bool add_newline = false) {
std::vector<int> tmp;
if (!v || v->size <= 0) {
PrintIntVector(tmp, collapse_consecutives, add_newline);
return;
}
tmp.insert(tmp.end(), v->data, v->data + v->size);
PrintIntVector(tmp, collapse_consecutives, add_newline);
}
const char* TensorTypeName(TfLiteType type) {
switch (type) {
case kTfLiteNoType:
return "kTfLiteNoType";
case kTfLiteFloat32:
return "kTfLiteFloat32";
case kTfLiteInt32:
return "kTfLiteInt32";
case kTfLiteUInt32:
return "kTfLiteUInt32";
case kTfLiteUInt8:
return "kTfLiteUInt8";
case kTfLiteInt8:
return "kTfLiteInt8";
case kTfLiteInt64:
return "kTfLiteInt64";
case kTfLiteUInt64:
return "kTfLiteUInt64";
case kTfLiteString:
return "kTfLiteString";
case kTfLiteBool:
return "kTfLiteBool";
case kTfLiteUInt16:
return "kTfLiteUInt16";
case kTfLiteInt16:
return "kTfLiteInt16";
case kTfLiteComplex64:
return "kTfLiteComplex64";
case kTfLiteComplex128:
return "kTfLiteComplex128";
case kTfLiteFloat16:
return "kTfLiteFloat16";
case kTfLiteBFloat16:
return "kTfLiteBFloat16";
case kTfLiteFloat64:
return "kTfLiteFloat64";
case kTfLiteResource:
return "kTfLiteResource";
case kTfLiteVariant:
return "kTfLiteVariant";
case kTfLiteInt4:
return "kTfLiteInt4";
}
return "(invalid)";
}
std::string TruncateString(const char* str, int size_limit,
bool truncate_at_end = false) {
if (str == nullptr) return "(nil)";
std::string truncated(str);
const size_t length = truncated.size();
if (length <= size_limit) return truncated;
if (size_limit <= 3) return std::string(size_limit, '.');
if (truncate_at_end) {
truncated.resize(size_limit);
truncated.replace(size_limit - 3, 3, "...");
} else {
truncated.erase(0, length - size_limit);
truncated.replace(0, 3, "...");
}
return truncated;
}
}
void PrintInterpreterState(const Interpreter* interpreter,
const int32_t tensor_name_display_length,
const int32_t tensor_type_display_length,
const int32_t alloc_type_display_length) {
const size_t num_subgraphs = interpreter->subgraphs_size();
printf("Interpreter has %zu subgraphs.\n\n", num_subgraphs);
for (int i = 0; i < num_subgraphs; ++i) {
const Subgraph& subgraph = *(interpreter->subgraph(i));
printf("-----------Subgraph-%d has %zu tensors and %zu nodes------------\n",
i, subgraph.tensors_size(), subgraph.nodes_size());
printf("%zu Inputs: ", subgraph.inputs().size());
PrintIntVector(subgraph.inputs());
PrintTotalBytesOfTensors(subgraph, subgraph.inputs());
printf("%zu Outputs: ", subgraph.outputs().size());
PrintIntVector(subgraph.outputs());
PrintTotalBytesOfTensors(subgraph, subgraph.outputs());
printf("\n");
ModelTensorMemoryInfo tensor_mem_info;
for (size_t tensor_index = 0; tensor_index < subgraph.tensors_size();
tensor_index++) {
const TfLiteTensor* tensor =
subgraph.tensor(static_cast<int>(tensor_index));
tensor_mem_info.Update(tensor_index, *tensor);
}
std::stringstream var_length_fs;
var_length_fs << "%-" << tensor_name_display_length << "s %-"
<< tensor_type_display_length << "s %-"
<< alloc_type_display_length << "s";
printf(
("Tensor %3s " + var_length_fs.str() + " %-18s %-10s %-16s\n").c_str(),
"ID", "Name", "Type", "AllocType", "Size (Bytes/MB)", "Shape",
"MemAddr-Offset");
for (size_t tensor_index = 0; tensor_index < subgraph.tensors_size();
tensor_index++) {
const TfLiteTensor* tensor =
subgraph.tensor(static_cast<int>(tensor_index));
printf(("Tensor %3zu " + var_length_fs.str() + " %-8zu / %.2f ").c_str(),
tensor_index,
TruncateString(tensor->name, tensor_name_display_length,
true)
.c_str(),
TruncateString(TensorTypeName(tensor->type),
tensor_type_display_length)
.c_str(),
TruncateString(AllocTypeName(tensor->allocation_type),
alloc_type_display_length)
.c_str(),
tensor->bytes, (static_cast<float>(tensor->bytes) / (1 << 20)));
PrintTfLiteIntVector(tensor->dims, false);
const int64_t start_offset =
tensor_mem_info.GetOffsetFromArenaStart(*tensor);
const int64_t end_offset =
start_offset == -1
? -1
: start_offset + static_cast<int64_t>(tensor->bytes);
printf(" [%" PRId64 ", %" PRId64 ")\n", start_offset, end_offset);
}
tensor_mem_info.Print();
subgraph.DumpMemoryPlannerDebugInfo();
SubgraphDelegationMetadata delegation_metadata =
GetNodeDelegationMetadata(subgraph);
for (size_t node_index = 0; node_index < subgraph.nodes_size();
node_index++) {
const std::pair<TfLiteNode, TfLiteRegistration>* node_and_reg =
subgraph.node_and_registration(static_cast<int>(node_index));
const TfLiteNode& node = node_and_reg->first;
const TfLiteRegistration& reg = node_and_reg->second;
std::string delegated_status;
bool is_node_delegated = false;
TfLiteIntArray empty_int_array;
empty_int_array.size = 0;
if (node.delegate == nullptr) {
if (delegation_metadata.is_node_delegated[node_index]) {
delegated_status = "(delegated by node ";
delegated_status.append(
std::to_string(delegation_metadata.replaced_by_node[node_index]));
delegated_status.append(")");
is_node_delegated = true;
} else {
delegated_status = "(not delegated)";
}
}
if (reg.custom_name != nullptr) {
printf("Node %3zu Operator Custom Name %s %s\n", node_index,
reg.custom_name, delegated_status.c_str());
} else {
printf("Node %3zu Operator Builtin Code %3d %s %s\n", node_index,
reg.builtin_code, EnumNamesBuiltinOperator()[reg.builtin_code],
delegated_status.c_str());
}
printf(" %d Input Tensors:",
node.inputs != nullptr ? node.inputs->size : 0);
if (node.inputs) {
PrintTfLiteIntVector(
node.inputs,
(node.delegate != nullptr));
PrintTotalBytesOfTensors(
subgraph, is_node_delegated ? TfLiteIntArrayView(&empty_int_array)
: TfLiteIntArrayView(node.inputs));
}
printf(" %d Output Tensors:",
node.outputs != nullptr ? node.outputs->size : 0);
if (node.outputs) {
PrintTfLiteIntVector(node.outputs);
PrintTotalBytesOfTensors(
subgraph, is_node_delegated ? TfLiteIntArrayView(&empty_int_array)
: TfLiteIntArrayView(node.outputs));
}
if (node.intermediates && node.intermediates->size) {
printf(" %d Intermediate Tensors:", node.intermediates->size);
PrintTfLiteIntVector(node.intermediates);
PrintTotalBytesOfTensors(subgraph,
is_node_delegated
? TfLiteIntArrayView(&empty_int_array)
: TfLiteIntArrayView(node.intermediates));
}
if (node.temporaries && node.temporaries->size) {
printf(" %d Temporary Tensors:", node.temporaries->size);
PrintTfLiteIntVector(node.temporaries);
PrintTotalBytesOfTensors(
subgraph, is_node_delegated ? TfLiteIntArrayView(&empty_int_array)
: TfLiteIntArrayView(node.temporaries));
}
}
printf("\nExecution plan as the list of %zu nodes invoked in-order: ",
subgraph.execution_plan().size());
PrintIntVector(subgraph.execution_plan(), true,
true);
if (delegation_metadata.has_delegate_applied) {
printf("Among these nodes in the execution plan:\n");
for (int node_id : subgraph.execution_plan()) {
const std::pair<TfLiteNode, TfLiteRegistration>* node_and_reg =
subgraph.node_and_registration(node_id);
const TfLiteNode& node = node_and_reg->first;
auto* const delegate = node.delegate;
if (delegate == nullptr) continue;
const char* delegate_name = node_and_reg->second.custom_name;
auto* delegate_params =
static_cast<TfLiteDelegateParams*>(node.builtin_data);
printf(" Node %d is a %s node (%p), which has delegated %d nodes: ",
node_id, delegate_name == nullptr ? "[n/a]" : delegate_name,
delegate, delegate_params->nodes_to_replace->size);
PrintTfLiteIntVector(delegate_params->nodes_to_replace,
true,
true);
}
}
printf("--------------Subgraph-%d dump has completed--------------\n\n", i);
}
printf("--------------Memory Arena Status Start--------------\n");
size_t total_arena_memory_bytes = 0;
size_t total_dynamic_memory_bytes = 0;
size_t total_resource_bytes = 0;
for (int i = 0; i < num_subgraphs; ++i) {
const Subgraph& subgraph = *(interpreter->subgraph(i));
Subgraph::SubgraphAllocInfo alloc_info;
subgraph.GetMemoryAllocInfo(&alloc_info);
total_arena_memory_bytes += alloc_info.arena_size;
total_arena_memory_bytes += alloc_info.arena_persist_size;
total_dynamic_memory_bytes += alloc_info.dynamic_size;
if (i == 0) {
total_resource_bytes = alloc_info.resource_size;
}
}
size_t total_memory_bytes = total_arena_memory_bytes +
total_dynamic_memory_bytes + total_resource_bytes;
printf("Total memory usage: %zu bytes (%.3f MB)\n", total_memory_bytes,
static_cast<float>(total_memory_bytes) / (1 << 20));
printf("- Total arena memory usage: %zu bytes (%.3f MB)\n",
total_arena_memory_bytes,
static_cast<float>(total_arena_memory_bytes) / (1 << 20));
printf("- Total dynamic memory usage: %zu bytes (%.3f MB)\n",
total_dynamic_memory_bytes,
static_cast<float>(total_dynamic_memory_bytes) / (1 << 20));
if (total_resource_bytes) {
printf("- Total resource memory usage: %zu bytes (%.3f MB)\n",
total_resource_bytes,
static_cast<float>(total_resource_bytes) / (1 << 20));
}
putchar('\n');
for (int i = 0; i < num_subgraphs; ++i) {
const Subgraph& subgraph = *(interpreter->subgraph(i));
Subgraph::SubgraphAllocInfo alloc_info;
subgraph.GetMemoryAllocInfo(&alloc_info);
if (alloc_info.arena_size) {
printf(
"Subgraph#%-3d %-18s %10zu (%.2f%%)\n", i, "Arena (Normal)",
alloc_info.arena_size,
static_cast<float>(alloc_info.arena_size * 100) / total_memory_bytes);
}
if (alloc_info.arena_persist_size) {
printf("Subgraph#%-3d %-18s %10zu (%.2f%%)\n", i, "Arena (Persistent)",
alloc_info.arena_persist_size,
static_cast<float>(alloc_info.arena_persist_size * 100) /
total_memory_bytes);
}
if (alloc_info.dynamic_size) {
printf("Subgraph#%-3d %-18s %10zu (%.2f%%)\n", i, "Dyanmic Tensors",
alloc_info.dynamic_size,
static_cast<float>(alloc_info.dynamic_size * 100) /
total_memory_bytes);
}
}
printf("--------------Memory Arena Status End--------------\n\n");
}
} | #include "tensorflow/lite/optional_debug_tools.h"
#include <algorithm>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/core/model_builder.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace {
void InitInputTensorData(Interpreter* interpreter) {
ASSERT_EQ(interpreter->inputs().size(), 1);
TfLiteTensor* t = interpreter->input_tensor(0);
ASSERT_EQ(t->type, kTfLiteFloat32);
float* data = static_cast<float*>(t->data.data);
int num_elements = t->bytes / sizeof(float);
std::fill(data, data + num_elements, 1.0f);
}
}
TEST(OptionalDebugTools, PrintInterpreterState) {
auto model = FlatBufferModel::BuildFromFile(
"tensorflow/lite/testdata/add.bin");
ASSERT_TRUE(model);
std::unique_ptr<Interpreter> interpreter;
ASSERT_EQ(
InterpreterBuilder(
*model, ops::builtin::BuiltinOpResolverWithoutDefaultDelegates())(
&interpreter),
kTfLiteOk);
PrintInterpreterState(interpreter.get());
ASSERT_EQ(interpreter->AllocateTensors(), kTfLiteOk);
PrintInterpreterState(interpreter.get());
InitInputTensorData(interpreter.get());
ASSERT_EQ(interpreter->Invoke(), kTfLiteOk);
PrintInterpreterState(interpreter.get());
}
TEST(OptionalDebugTools, PrintInterpreterStateWithDelegate) {
auto model = FlatBufferModel::BuildFromFile(
"tensorflow/lite/testdata/add.bin");
ASSERT_TRUE(model);
std::unique_ptr<Interpreter> interpreter;
ASSERT_EQ(
InterpreterBuilder(
*model, ops::builtin::BuiltinOpResolverWithoutDefaultDelegates())(
&interpreter),
kTfLiteOk);
ASSERT_EQ(interpreter->AllocateTensors(), kTfLiteOk);
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
ASSERT_EQ(interpreter->ModifyGraphWithDelegate(xnnpack_delegate.get()),
kTfLiteOk);
InitInputTensorData(interpreter.get());
ASSERT_EQ(interpreter->Invoke(), kTfLiteOk);
PrintInterpreterState(interpreter.get());
}
TEST(OptionalDebugTools, GetNodeDelegationMetadata) {
auto model = FlatBufferModel::BuildFromFile(
"tensorflow/lite/testdata/add.bin");
ASSERT_TRUE(model);
std::unique_ptr<Interpreter> interpreter;
ASSERT_EQ(
InterpreterBuilder(
*model, ops::builtin::BuiltinOpResolverWithoutDefaultDelegates())(
&interpreter),
kTfLiteOk);
ASSERT_EQ(interpreter->AllocateTensors(), kTfLiteOk);
auto metadata = GetNodeDelegationMetadata(*interpreter->subgraph(0));
EXPECT_FALSE(metadata.has_delegate_applied);
for (int i = 0; i < metadata.is_node_delegated.size(); ++i) {
EXPECT_FALSE(metadata.is_node_delegated[i]);
EXPECT_EQ(metadata.replaced_by_node[i], -1);
}
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
ASSERT_EQ(interpreter->ModifyGraphWithDelegate(xnnpack_delegate.get()),
kTfLiteOk);
auto metadata_with_delegate =
GetNodeDelegationMetadata(*interpreter->subgraph(0));
EXPECT_TRUE(metadata_with_delegate.has_delegate_applied);
EXPECT_EQ(metadata_with_delegate.is_node_delegated[0], true);
EXPECT_EQ(metadata_with_delegate.replaced_by_node[0], 2);
EXPECT_EQ(metadata_with_delegate.is_node_delegated[1], true);
EXPECT_EQ(metadata_with_delegate.replaced_by_node[1], 2);
EXPECT_EQ(metadata_with_delegate.is_node_delegated[2], false);
EXPECT_EQ(metadata_with_delegate.replaced_by_node[2], -1);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/optional_debug_tools.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/optional_debug_tools_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b5e79173-7ffb-4558-b0cf-0e4bb23d8675 | cpp | tensorflow/tensorflow | arena_planner | tensorflow/lite/arena_planner.cc | tensorflow/lite/arena_planner_test.cc | #include "tensorflow/lite/arena_planner.h"
#include <stddef.h>
#include <algorithm>
#include <cstdint>
#include <limits>
#include <memory>
#include <utility>
#include <vector>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/graph_info.h"
#include "tensorflow/lite/simple_memory_arena.h"
namespace tflite {
constexpr int32_t kLastActiveNodeUndefined =
std::numeric_limits<int32_t>::max();
constexpr int32_t kNodeNotAssigned = std::numeric_limits<int32_t>::max();
constexpr int32_t kScalarTensorBytes = 4;
ArenaPlanner::ArenaPlanner(TfLiteContext* context,
std::unique_ptr<GraphInfo> graph_info,
bool preserve_all_tensors, int tensor_alignment,
int subgraph_index)
: context_(context),
graph_info_(std::move(graph_info)),
arena_(kDefaultArenaAlignment, subgraph_index),
has_nonpersistent_memory_(false),
persistent_arena_(kDefaultArenaAlignment, subgraph_index),
preserve_all_tensors_(preserve_all_tensors),
tensor_alignment_(tensor_alignment),
last_active_node_(kLastActiveNodeUndefined) {}
ArenaPlanner::~ArenaPlanner() {
arena_.ReleaseBuffer();
persistent_arena_.ReleaseBuffer();
}
std::intptr_t ArenaPlanner::BasePointer(TfLiteAllocationType type) {
if (type == kTfLiteArenaRwPersistent) {
return persistent_arena_.BasePointer();
}
if (type == kTfLiteArenaRw) {
return arena_.BasePointer();
}
return 0;
}
TfLiteStatus ArenaPlanner::ResetAllocations() {
TF_LITE_ENSURE_STATUS(arena_.ClearPlan());
TF_LITE_ENSURE_STATUS(persistent_arena_.ClearPlan());
allocs_.clear();
allocs_.resize(graph_info_->num_tensors());
last_active_node_ = kLastActiveNodeUndefined;
return kTfLiteOk;
}
TfLiteStatus ArenaPlanner::ResetAllocationsAfter(int node) {
TfLiteTensor* tensors = graph_info_->tensors();
for (int i = 0; i < static_cast<int>(allocs_.size()); ++i) {
if (allocs_[i].first_node > node && allocs_[i].size > 0) {
TfLiteTensor& tensor = tensors[i];
if (tensor.allocation_type == kTfLiteArenaRw) {
allocs_[i].reset();
tensor.data.raw = nullptr;
}
}
}
if (last_active_node_ > node) {
arena_.CalculateActiveAllocs(allocs_, node);
} else {
arena_.PurgeAfter(node);
}
last_active_node_ = node;
return kTfLiteOk;
}
int ArenaPlanner::FindSharedTensor(int tensor_index) {
auto actual_tensor_it = actual_tensor_id_.find(tensor_index);
if (actual_tensor_it != actual_tensor_id_.end()) {
tensor_index = actual_tensor_it->second;
}
return tensor_index;
}
bool ArenaPlanner::InputTensorCanBeShared(const TfLiteTensor& input_tensor,
const TfLiteTensor& output_tensor,
int input_id, int output_id,
bool tensor_changed) {
if (tensor_changed) {
if (input_tensor.bytes != output_tensor.bytes ||
input_tensor.bytes <= kScalarTensorBytes) {
return false;
}
if (refcounts_[input_id] > 1) {
return false;
}
}
for (int input : graph_info_->inputs()) {
if (input == input_id) {
return false;
}
}
for (int output : graph_info_->outputs()) {
if (output == output_id) {
return false;
}
}
TfLiteAllocationType input_allocation_type = input_tensor.allocation_type;
TfLiteAllocationType output_allocation_type = output_tensor.allocation_type;
if (input_allocation_type != output_allocation_type &&
input_allocation_type != kTfLiteArenaRw) {
return false;
}
if (preserve_all_tensors_) {
return false;
}
return true;
}
void ArenaPlanner::IdentifyInPlaceTensors() {
actual_tensor_id_.clear();
const int num_execution_nodes = graph_info_->num_execution_nodes();
TfLiteTensor* tensors = graph_info_->tensors();
for (int i = 0; i < num_execution_nodes; ++i) {
const TfLiteRegistration& registration = graph_info_->registration(i);
const TfLiteNode& node = graph_info_->node(i);
if (node.outputs->size < 1) continue;
bool tensor_changed =
!(registration.inplace_operator & kTfLiteInplaceOpDataUnmodified);
if (registration.inplace_operator == kTfLiteInplaceOpNone) {
continue;
}
int32_t input_id = -1;
int32_t output_id = node.outputs->data[0];
const TfLiteTensor& output_tensor = tensors[output_id];
const int loop_end =
std::min(kTfLiteMaxSharableOpInputs, node.inputs->size);
for (int i = 0; i < loop_end; ++i) {
if (node.inputs->data[i] == kTfLiteOptionalTensor) {
continue;
}
const bool input_shareable =
registration.inplace_operator & (kTfLiteInplaceOpInput0Shared << i);
if (input_shareable) {
const TfLiteTensor& input_tensor = tensors[node.inputs->data[i]];
if (InputTensorCanBeShared(input_tensor, output_tensor,
node.inputs->data[i], output_id,
tensor_changed)) {
input_id = node.inputs->data[i];
break;
}
}
}
if (input_id == -1) {
continue;
}
int32_t actual_output_tensor_id = FindSharedTensor(input_id);
if (tensor_changed) {
if (refcounts_[actual_output_tensor_id] > 1) {
continue;
}
}
actual_tensor_id_[output_id] = actual_output_tensor_id;
}
}
TfLiteStatus ArenaPlanner::PlanAllocations() {
const size_t num_tensors = graph_info_->num_tensors();
TF_LITE_ENSURE_STATUS(ResetAllocations());
alloc_node_.assign(num_tensors, kNodeNotAssigned);
dealloc_node_.assign(num_tensors, kNodeNotAssigned);
nodes_to_tensors_.clear();
nodes_to_tensors_.resize(
std::max(graph_info_->num_execution_nodes(), (size_t)1), {});
refcounts_.assign(num_tensors, 0);
auto allocate = [this](int node, int tensor) -> TfLiteStatus {
if (alloc_node_[tensor] != kNodeNotAssigned) {
return kTfLiteOk;
}
TF_LITE_ENSURE(context_, dealloc_node_[tensor] == kNodeNotAssigned);
alloc_node_[tensor] = node;
return kTfLiteOk;
};
auto deallocate = [this](int node, int tensor) -> TfLiteStatus {
if (alloc_node_[tensor] == kNodeNotAssigned) {
return kTfLiteOk;
}
TF_LITE_ENSURE(context_, dealloc_node_[tensor] == kNodeNotAssigned);
dealloc_node_[tensor] = node;
return kTfLiteOk;
};
for (int tensor_index : graph_info_->outputs()) {
if (tensor_index != kTfLiteOptionalTensor) {
++refcounts_[tensor_index];
}
}
for (int tensor_index : graph_info_->variables()) {
++refcounts_[tensor_index];
TF_LITE_ENSURE(context_, tensor_index != kTfLiteOptionalTensor);
TF_LITE_ENSURE_STATUS(allocate(0, tensor_index));
nodes_to_tensors_[0].insert(tensor_index);
}
for (int tensor_index : graph_info_->inputs()) {
if (tensor_index != kTfLiteOptionalTensor) {
++refcounts_[tensor_index];
TF_LITE_ENSURE_STATUS(allocate(0, tensor_index));
nodes_to_tensors_[0].insert(tensor_index);
}
}
std::vector<int> refcounts = refcounts_;
const int num_execution_nodes = graph_info_->num_execution_nodes();
for (size_t i = 0; i < num_execution_nodes; ++i) {
const TfLiteNode& node = graph_info_->node(i);
TfLiteIntArray* node_inputs = node.inputs;
for (int j = 0; j < node_inputs->size; ++j) {
int tensor_index = node_inputs->data[j];
if (tensor_index != kTfLiteOptionalTensor) {
++refcounts_[tensor_index];
}
}
}
IdentifyInPlaceTensors();
for (size_t i = 0; i < num_execution_nodes; ++i) {
const TfLiteNode& node = graph_info_->node(i);
TfLiteIntArray* node_inputs = node.inputs;
for (int j = 0; j < node_inputs->size; ++j) {
int tensor_index = node_inputs->data[j];
if (tensor_index != kTfLiteOptionalTensor) {
tensor_index = FindSharedTensor(tensor_index);
++refcounts[tensor_index];
}
}
}
for (size_t i = 0; i < num_execution_nodes; ++i) {
const TfLiteNode& node = graph_info_->node(i);
TfLiteIntArray* node_outputs = node.outputs;
for (int j = 0; j < node_outputs->size; ++j) {
int tensor_index = node_outputs->data[j];
if (tensor_index == kTfLiteOptionalTensor) continue;
nodes_to_tensors_[i].insert(tensor_index);
TF_LITE_ENSURE_STATUS(allocate(i, tensor_index));
}
if (!preserve_all_tensors_) {
TfLiteIntArray* node_inputs = node.inputs;
for (int j = 0; j < node_inputs->size; ++j) {
int tensor_index = node_inputs->data[j];
if (tensor_index != kTfLiteOptionalTensor) {
tensor_index = FindSharedTensor(tensor_index);
--refcounts[tensor_index];
if (refcounts[tensor_index] == 0) {
TF_LITE_ENSURE_STATUS(deallocate(i, tensor_index));
}
}
}
}
}
return kTfLiteOk;
}
TfLiteStatus ArenaPlanner::ExecuteAllocations(int first_node, int last_node) {
const size_t num_tensors = graph_info_->num_tensors();
TF_LITE_ENSURE(context_, num_tensors >= allocs_.size());
alloc_node_.resize(num_tensors, kNodeNotAssigned);
dealloc_node_.resize(num_tensors, kNodeNotAssigned);
allocs_.resize(num_tensors);
const int num_execution_nodes = graph_info_->num_execution_nodes();
for (size_t i = first_node;
i <= static_cast<size_t>(last_node) && i < num_execution_nodes; ++i) {
const TfLiteNode& node = graph_info_->node(i);
TfLiteIntArray* node_temporaries = node.temporaries;
for (int j = 0; j < node_temporaries->size; ++j) {
int tensor_index = node_temporaries->data[j];
alloc_node_[tensor_index] = i;
nodes_to_tensors_[i].insert(tensor_index);
if (!preserve_all_tensors_) {
dealloc_node_[tensor_index] = i;
}
}
}
std::vector<int32_t> tensors_allocated;
TF_LITE_ENSURE_STATUS(
CalculateAllocations(first_node, last_node, &tensors_allocated));
bool arena_reallocated = false;
TF_LITE_ENSURE_STATUS(Commit(&arena_reallocated));
TfLiteTensor* tensors = graph_info_->tensors();
if (arena_reallocated) {
for (int i = 0; i < static_cast<int>(num_tensors); ++i) {
TF_LITE_ENSURE_STATUS(ResolveTensorAllocation(i, tensors));
}
} else {
for (int i = 0; i < static_cast<int>(tensors_allocated.size()); ++i) {
TF_LITE_ENSURE_STATUS(
ResolveTensorAllocation(tensors_allocated[i], tensors));
}
}
return kTfLiteOk;
}
TfLiteStatus ArenaPlanner::ReleaseNonPersistentMemory() {
TF_LITE_ENSURE_STATUS(arena_.ReleaseBuffer());
has_nonpersistent_memory_ = false;
TfLiteTensor* tensors = graph_info_->tensors();
for (int i = 0; i < static_cast<int>(graph_info_->num_tensors()); ++i) {
TfLiteTensor& tensor = tensors[i];
if (tensor.allocation_type == kTfLiteArenaRw) {
tensor.data.raw = nullptr;
}
}
return kTfLiteOk;
}
TfLiteStatus ArenaPlanner::AcquireNonPersistentMemory() {
bool reallocated;
TF_LITE_ENSURE_STATUS(arena_.Commit(&reallocated));
has_nonpersistent_memory_ = true;
TfLiteTensor* tensors = graph_info_->tensors();
for (int i = 0; i < static_cast<int>(graph_info_->num_tensors()); ++i) {
TfLiteTensor& tensor = tensors[i];
if (tensor.allocation_type == kTfLiteArenaRw) {
TF_LITE_ENSURE_STATUS(ResolveTensorAllocation(i, tensors));
}
}
return kTfLiteOk;
}
bool ArenaPlanner::HasNonPersistentMemory() {
return has_nonpersistent_memory_;
}
void ArenaPlanner::DumpDebugInfo(const std::vector<int>& execution_plan) const {
arena_.DumpDebugInfo("kTfLiteArenaRw Dump:", execution_plan);
persistent_arena_.DumpDebugInfo("kTfLiteArenaRwPersistent Dump:",
execution_plan);
}
void ArenaPlanner::GetAllocInfo(size_t* arena_size,
size_t* arena_persist_size) const {
*arena_size = arena_.GetBufferSize();
*arena_persist_size = persistent_arena_.GetBufferSize();
}
TfLiteStatus ArenaPlanner::Commit(bool* reallocated) {
bool arena_reallocated, persistent_arena_reallocated;
TF_LITE_ENSURE_STATUS(arena_.Commit(&arena_reallocated));
has_nonpersistent_memory_ = true;
TF_LITE_ENSURE_STATUS(
persistent_arena_.Commit(&persistent_arena_reallocated));
*reallocated = arena_reallocated;
*reallocated |= persistent_arena_reallocated;
return kTfLiteOk;
}
void ArenaPlanner::CreateTensorAllocationVector(
std::vector<int32_t>* tensors_to_allocate) {
const TfLiteTensor* tensors = this->graph_info_->tensors();
auto tensor_compare = [&](int idx1, int idx2) {
if (alloc_node_[idx1] == 0 && dealloc_node_[idx1] == kNodeNotAssigned) {
if (alloc_node_[idx2] == 0 && dealloc_node_[idx2] == kNodeNotAssigned) {
return idx1 < idx2;
}
return true;
}
if (alloc_node_[idx2] == 0 && dealloc_node_[idx2] == kNodeNotAssigned) {
return false;
}
auto size1 = tensors[idx1].bytes;
auto size2 = tensors[idx2].bytes;
if (size1 != size2) {
return size1 > size2;
}
return alloc_node_[idx1] < alloc_node_[idx2];
};
std::sort(tensors_to_allocate->begin(), tensors_to_allocate->end(),
tensor_compare);
}
std::vector<int32_t> ArenaPlanner::GetTensorsToAllocate(int first_node,
int last_node) {
int num_tensors = static_cast<int>(graph_info_->num_tensors());
std::vector<int32_t> tensors_to_allocate;
tensors_to_allocate.reserve(num_tensors);
for (int i = first_node; i <= last_node; ++i) {
tensors_to_allocate.insert(tensors_to_allocate.end(),
nodes_to_tensors_[i].begin(),
nodes_to_tensors_[i].end());
}
return tensors_to_allocate;
}
TfLiteStatus ArenaPlanner::CalculateAllocations(
int first_node, int last_node, std::vector<int32_t>* tensors_allocated) {
const std::vector<int32_t> tensors_to_allocate =
GetTensorsToAllocate(first_node, last_node);
tensors_allocated->reserve(tensors_to_allocate.size());
TfLiteTensor* tensors = graph_info_->tensors();
for (const auto& tensor_index : tensors_to_allocate) {
TfLiteTensor& tensor = tensors[tensor_index];
if (tensor.allocation_type == kTfLiteArenaRw) {
if (allocs_[tensor_index].size < tensor.bytes) {
tensors_allocated->push_back(tensor_index);
}
} else if (tensor.allocation_type == kTfLiteArenaRwPersistent) {
tensors_allocated->push_back(tensor_index);
}
}
if (tensors_allocated->empty()) {
last_active_node_ = last_node;
return kTfLiteOk;
}
if (first_node < last_active_node_) {
arena_.ResetAllocs();
last_active_node_ = first_node;
} else {
arena_.PurgeActiveAllocs(first_node);
}
CreateTensorAllocationVector(tensors_allocated);
for (const auto& tensor_index : *tensors_allocated) {
TfLiteTensor& tensor = tensors[tensor_index];
auto it = actual_tensor_id_.find(tensor_index);
if (it != actual_tensor_id_.end()) {
TfLiteAllocationType allocation_type =
tensors[it->second].allocation_type;
if (allocation_type != kTfLiteArenaRw ||
tensors[it->second].bytes != tensors[it->first].bytes) {
actual_tensor_id_.erase(it);
} else {
continue;
}
}
if (tensor.allocation_type == kTfLiteArenaRw) {
TF_LITE_ENSURE_STATUS(
arena_.Allocate(context_, tensor_alignment_, tensor.bytes,
tensor_index, alloc_node_[tensor_index],
dealloc_node_[tensor_index], &allocs_[tensor_index]));
}
if (tensor.allocation_type == kTfLiteArenaRwPersistent &&
allocs_[tensor_index].size == 0) {
if (allocs_[tensor_index].size < tensor.bytes) {
TF_LITE_ENSURE_STATUS(persistent_arena_.Allocate(
context_, tensor_alignment_, tensor.bytes, tensor_index,
alloc_node_[tensor_index],
std::numeric_limits<int32_t>::max(),
&allocs_[tensor_index]));
}
}
}
last_active_node_ = last_node;
return kTfLiteOk;
}
bool AreTensorsAllocatedInSameArena(int32_t root_tensor_index,
int32_t tensor_index,
const TfLiteTensor* tensors) {
if (tensors[root_tensor_index].allocation_type == kTfLiteArenaRw &&
tensors[tensor_index].allocation_type == kTfLiteArenaRw) {
return true;
}
if (tensors[root_tensor_index].allocation_type == kTfLiteArenaRwPersistent &&
tensors[tensor_index].allocation_type == kTfLiteArenaRwPersistent) {
return true;
}
return false;
}
TfLiteStatus ArenaPlanner::ResolveTensorAllocation(int32_t tensor_index,
TfLiteTensor* tensors) {
auto actual_tensor_it = actual_tensor_id_.find(tensor_index);
TfLiteTensor& tensor = tensors[tensor_index];
int32_t root_tensor_index = actual_tensor_it == actual_tensor_id_.end()
? tensor_index
: actual_tensor_it->second;
const TfLiteTensor& root_tensor = tensors[root_tensor_index];
if (root_tensor_index != tensor_index) {
if (AreTensorsAllocatedInSameArena(root_tensor_index, tensor_index,
tensors)) {
ResolveTensorAllocation(root_tensor_index, tensors);
tensor.data.data = root_tensor.data.data;
return kTfLiteOk;
}
}
if (tensor.allocation_type == kTfLiteArenaRw) {
if (allocs_[tensor_index].size != 0) {
return arena_.ResolveAlloc(context_, allocs_[tensor_index],
&tensor.data.raw);
}
}
if (tensor.allocation_type == kTfLiteArenaRwPersistent) {
return persistent_arena_.ResolveAlloc(context_, allocs_[tensor_index],
&tensor.data.raw);
}
return kTfLiteOk;
}
} | #include "tensorflow/lite/arena_planner.h"
#include <algorithm>
#include <cstdarg>
#include <cstddef>
#include <cstdint>
#include <cstdio>
#include <initializer_list>
#include <memory>
#include <set>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/graph_info.h"
namespace tflite {
int gNumAlloc = 0;
void OnTfLiteArenaAlloc(int subgraph_index, int arena_id, size_t num_bytes) {
gNumAlloc++;
}
int gNumDealloc = 0;
void OnTfLiteArenaDealloc(int subgraph_index, int arena_id, size_t num_bytes) {
gNumDealloc++;
}
namespace {
constexpr const int kTensorAlignment = 4;
class TestOp {
public:
TestOp(std::initializer_list<int> inputs, std::initializer_list<int> outputs,
std::initializer_list<int> temporaries,
int builtin_code = kTfLiteBuiltinAdd,
int inplace_operator = kTfLiteInplaceOpInput0Shared)
: inputs_(inputs),
outputs_(outputs),
temporaries_(temporaries),
registration_{} {
registration_.builtin_code = builtin_code;
registration_.inplace_operator = inplace_operator;
}
const std::vector<int>& inputs() const { return inputs_; }
const std::vector<int>& outputs() const { return outputs_; }
const std::vector<int>& temporaries() const { return temporaries_; }
const TfLiteRegistration& registration() const { return registration_; }
private:
std::vector<int> inputs_;
std::vector<int> outputs_;
std::vector<int> temporaries_;
TfLiteRegistration registration_;
};
class TestGraph {
public:
TestGraph(std::initializer_list<int> inputs,
std::initializer_list<TestOp> nodes,
std::initializer_list<int> outputs)
: inputs_(inputs), outputs_(outputs) {
int max_tensor_index = 0;
for (int t : inputs) {
max_tensor_index = std::max(max_tensor_index, t);
}
for (int t : outputs) {
max_tensor_index = std::max(max_tensor_index, t);
}
for (const auto& node : nodes) {
auto int_array = [](const std::vector<int>& x) {
TfLiteIntArray* lite = TfLiteIntArrayCreate(x.size());
for (size_t i = 0; i < x.size(); i++) lite->data[i] = x[i];
return lite;
};
registrations_.push_back(node.registration());
nodes_.push_back(TfLiteNode());
nodes_.back().inputs = int_array(node.inputs());
for (int t : node.inputs()) {
max_tensor_index = std::max(max_tensor_index, t);
}
nodes_.back().outputs = int_array(node.outputs());
for (int t : node.outputs()) {
max_tensor_index = std::max(max_tensor_index, t);
}
nodes_.back().temporaries = int_array(node.temporaries());
for (int t : node.temporaries()) {
max_tensor_index = std::max(max_tensor_index, t);
}
}
for (int i = 0; i <= max_tensor_index; ++i) {
tensors_.push_back(TfLiteTensor());
tensors_.back().allocation_type = kTfLiteArenaRw;
tensors_.back().bytes = (i + 1) * 3;
}
}
~TestGraph() {
for (auto node : nodes_) {
TfLiteIntArrayFree(node.inputs);
TfLiteIntArrayFree(node.outputs);
TfLiteIntArrayFree(node.temporaries);
}
}
const std::vector<TfLiteNode>& nodes() { return nodes_; }
std::vector<TfLiteTensor>* tensors() { return &tensors_; }
const std::vector<int>& inputs() { return inputs_; }
const std::vector<int>& outputs() { return outputs_; }
const std::vector<int>& variables() { return variables_; }
const std::vector<TfLiteRegistration>& registrations() {
return registrations_;
}
void SetVariables(const std::vector<int>& variables) {
variables_ = variables;
}
void Swap(TestGraph* other) {
std::swap(nodes_, other->nodes_);
std::swap(tensors_, other->tensors_);
std::swap(inputs_, other->inputs_);
std::swap(outputs_, other->outputs_);
std::swap(variables_, other->variables_);
}
private:
std::vector<TfLiteNode> nodes_;
std::vector<TfLiteTensor> tensors_;
std::vector<TfLiteRegistration> registrations_;
std::vector<int> inputs_;
std::vector<int> outputs_;
std::vector<int> variables_;
};
class TestGraphInfo : public GraphInfo {
public:
explicit TestGraphInfo(TestGraph* graph) : graph_(graph) {}
size_t num_tensors() const override { return graph_->tensors()->size(); }
TfLiteTensor* tensors() override { return graph_->tensors()->data(); }
TfLiteTensor* tensor(size_t index) override {
return &graph_->tensors()->at(index);
}
size_t num_execution_nodes() const override { return graph_->nodes().size(); }
size_t num_total_nodes() const override { return graph_->nodes().size(); }
const TfLiteNode& node(size_t index) const override {
return graph_->nodes()[index];
}
const TfLiteRegistration& registration(size_t index) const override {
return graph_->registrations()[index];
}
size_t node_index(size_t index) const override { return index; }
const std::vector<int>& inputs() const override { return graph_->inputs(); }
const std::vector<int>& outputs() const override { return graph_->outputs(); }
const std::vector<int>& variables() const override {
return graph_->variables();
}
private:
TestGraph* graph_;
};
void ReportError(TfLiteContext* context, const char* format, ...) {
const size_t kBufferSize = 1024;
char temp_buffer[kBufferSize];
va_list args;
va_start(args, format);
vsnprintf(temp_buffer, kBufferSize, format, args);
va_end(args);
LOG(INFO) << temp_buffer;
}
class ArenaPlannerTest : public ::testing::Test {
protected:
void SetGraph(TestGraph* graph, bool preserve_all_tensors = false) {
graph_ = graph;
context_.ReportError = ReportError;
planner_ = std::make_unique<ArenaPlanner>(
&context_, std::unique_ptr<GraphInfo>(new TestGraphInfo(graph)),
preserve_all_tensors, kTensorAlignment);
CHECK(planner_->ResetAllocations() == kTfLiteOk);
CHECK(planner_->PlanAllocations() == kTfLiteOk);
}
void SwapGraph(TestGraph* graph) {
graph_->Swap(graph);
CHECK(planner_->PlanAllocations() == kTfLiteOk);
}
void Execute(int start, int end) {
CHECK(planner_->ExecuteAllocations(start, end) == kTfLiteOk);
}
void ReleaseNonPersistentMemory() {
CHECK(planner_->ReleaseNonPersistentMemory() == kTfLiteOk);
}
void AcquireNonPersistentMemory() {
CHECK(planner_->AcquireNonPersistentMemory() == kTfLiteOk);
}
void ResetAllocations() { CHECK(planner_->ResetAllocations() == kTfLiteOk); }
void ResetAllocationsAfter(int node) {
CHECK(planner_->ResetAllocationsAfter(node) == kTfLiteOk);
}
bool HasNonPersistentMemory() {
return planner_ && planner_->HasNonPersistentMemory();
}
void Destroy() { planner_.reset(); }
std::ptrdiff_t GetOffset(int tensor_index) {
const TfLiteTensor& tensor = (*graph_->tensors())[tensor_index];
return reinterpret_cast<std::intptr_t>(tensor.data.raw) -
planner_->BasePointer(tensor.allocation_type);
}
std::ptrdiff_t GetOffsetAfter(int tensor_index) {
const TfLiteTensor& tensor = (*graph_->tensors())[tensor_index];
std::ptrdiff_t offset = GetOffset(tensor_index) + tensor.bytes;
if (offset % kTensorAlignment != 0) {
offset += kTensorAlignment - offset % kTensorAlignment;
}
return offset;
}
bool IsUnallocated(int tensor_index) {
return (*graph_->tensors())[tensor_index].data.raw == nullptr;
}
TfLiteContext context_;
TestGraph* graph_;
std::unique_ptr<ArenaPlanner> planner_;
};
TEST_F(ArenaPlannerTest, EmptyGraph) {
TestGraph graph({}, {}, {});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
}
TEST_F(ArenaPlannerTest, GraphWithOneOp) {
TestGraph graph({0, 10}, {{{0}, {}, {}}, {{10}, {}, {}}}, {5, 11});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(0), 0);
EXPECT_EQ(GetOffset(10), GetOffsetAfter(0));
EXPECT_TRUE((*graph.tensors())[5].data.raw == nullptr);
EXPECT_TRUE((*graph.tensors())[11].data.raw == nullptr);
}
TEST_F(ArenaPlannerTest, GraphWithOneOp2) {
TestGraph graph({1}, {{{1}, {2}, {}}}, {2});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(2), 8);
EXPECT_EQ(GetOffsetAfter(2), 20);
}
TEST_F(ArenaPlannerTest, ZeroSizedTensors) {
TestGraph graph({1}, {{{1}, {2}, {}}}, {2});
(*graph.tensors())[1].bytes = 0;
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ((*graph_->tensors())[1].data.raw, nullptr);
}
TEST_F(ArenaPlannerTest, SimpleGraph) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4, 5}, {}},
{{4, 5}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(5), 12);
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(3), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(2), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(1), 4);
}
TEST_F(ArenaPlannerTest, AllocsCorrectlyReset) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4, 5}, {}},
{{4, 5}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(5), 12);
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(3), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(2), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(1), 4);
ResetAllocations();
std::vector<TfLiteTensor>& tensors = *graph.tensors();
tensors[0].bytes += 1;
tensors[1].bytes += 1;
tensors[2].bytes += 1;
tensors[3].bytes += 1;
tensors[4].bytes += 1;
tensors[5].bytes += 1;
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(5), 12);
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(3), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(2), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(1), 4);
}
TEST_F(ArenaPlannerTest, SimpleGraphInputsPreserved) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4, 5}, {}},
{{4, 5}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(0), 0);
EXPECT_EQ(GetOffset(1), GetOffsetAfter(0));
EXPECT_EQ(GetOffset(5), GetOffsetAfter(1));
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(3), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(2), GetOffsetAfter(4));
}
TEST_F(ArenaPlannerTest, SimpleGraphWithTemporary) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4}, {5}},
{{4}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(3), 12);
EXPECT_EQ(GetOffset(5), 12);
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(2), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(1), 4);
}
TEST_F(ArenaPlannerTest, SimpleGraphWithInplaceReshape) {
TestGraph graph(
{0, 1},
{
{{0}, {2}, {}},
{{1}, {3}, {}},
{{2, 3},
{4},
{},
kTfLiteBuiltinReshape,
kTfLiteInplaceOpInput0Shared | kTfLiteInplaceOpDataUnmodified},
{{4}, {5}, {}}
},
{5});
(*graph.tensors())[2].bytes = 24;
(*graph.tensors())[4].bytes = 24;
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(2), GetOffset(4));
}
TEST_F(ArenaPlannerTest, SimpleGraphWithChainOfInplaceOps) {
TestGraph graph(
{0, 1},
{
{{0}, {2}, {}},
{{2, 3},
{4},
{},
kTfLiteBuiltinReshape,
kTfLiteInplaceOpInput0Shared | kTfLiteInplaceOpDataUnmodified},
{{4, 3},
{5},
{},
kTfLiteBuiltinExpandDims,
kTfLiteInplaceOpInput0Shared | kTfLiteInplaceOpDataUnmodified},
{{5, 3},
{6},
{},
kTfLiteBuiltinSqueeze,
kTfLiteInplaceOpInput0Shared | kTfLiteInplaceOpDataUnmodified},
{{6, 3},
{7},
{},
kTfLiteBuiltinReshape,
kTfLiteInplaceOpInput0Shared | kTfLiteInplaceOpDataUnmodified},
{{7}, {8}, {}},
},
{8});
(*graph.tensors())[2].bytes = 24;
(*graph.tensors())[4].bytes = 24;
(*graph.tensors())[5].bytes = 24;
(*graph.tensors())[6].bytes = 24;
(*graph.tensors())[7].bytes = 24;
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(2), GetOffset(2));
EXPECT_EQ(GetOffset(2), GetOffset(4));
EXPECT_EQ(GetOffset(2), GetOffset(5));
EXPECT_EQ(GetOffset(2), GetOffset(6));
EXPECT_EQ(GetOffset(2), GetOffset(7));
}
TEST_F(ArenaPlannerTest, SimpleGraphsWithReshapeInputOutput) {
TestGraph graph(
{0, 1},
{
{{0}, {2}, {}},
{{2, 1},
{3},
{},
kTfLiteBuiltinReshape,
kTfLiteInplaceOpInput0Shared | kTfLiteInplaceOpDataUnmodified},
{{3}, {4}, {}}},
{4});
(*graph.tensors())[2].bytes = 24;
(*graph.tensors())[3].bytes = 24;
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(2), GetOffset(3));
}
TEST_F(ArenaPlannerTest, SimpleGraphsWithReshapeInputTensor) {
TestGraph graph({0, 1},
{
{{0, 1},
{2},
{},
kTfLiteBuiltinReshape,
kTfLiteInplaceOpInput0Shared |
kTfLiteInplaceOpDataUnmodified},
{{4}, {3}, {}}},
{3});
SetGraph(&graph);
(*graph.tensors())[0].allocation_type = kTfLiteDynamic;
Execute(0, graph.nodes().size() - 1);
EXPECT_NE(GetOffset(0), GetOffset(2));
}
TEST_F(ArenaPlannerTest, SimpleGraphsWithReshapeOutputTensor) {
TestGraph graph(
{0, 1},
{
{{0}, {2}, {}},
{{2, 1},
{3},
{},
kTfLiteBuiltinReshape,
kTfLiteInplaceOpInput0Shared |
kTfLiteInplaceOpDataUnmodified},
},
{3});
SetGraph(&graph);
(*graph.tensors())[0].allocation_type = kTfLiteDynamic;
Execute(0, graph.nodes().size() - 1);
EXPECT_NE(GetOffset(2), GetOffset(3));
}
TEST_F(ArenaPlannerTest, SimpleGraphsWithReshapeDynamicInput) {
TestGraph graph({0, 1},
{
{{0, 1},
{2},
{},
kTfLiteBuiltinReshape,
kTfLiteInplaceOpDataUnmodified}
},
{2});
SetGraph(&graph);
(*graph.tensors())[0].allocation_type = kTfLiteDynamic;
Execute(0, graph.nodes().size() - 1);
EXPECT_NE(GetOffset(0), GetOffset(2));
}
TEST_F(ArenaPlannerTest, SimpleGraphsWithBroadcastingAddInPlace) {
TestGraph graph(
{0, 1},
{
{{0, 1}, {3}, {}},
{{1, 2}, {4}, {}},
{{3, 4},
{5},
{},
kTfLiteBuiltinAdd,
kTfLiteInplaceOpInput0Shared | kTfLiteInplaceOpInput1Shared},
{{5}, {6}, {}},
},
{6});
(*graph.tensors())[3].bytes = 8;
(*graph.tensors())[4].bytes = 16;
(*graph.tensors())[5].bytes = 16;
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_NE(GetOffset(3), GetOffset(5));
EXPECT_EQ(GetOffset(4), GetOffset(5));
}
TEST_F(ArenaPlannerTest, SimpleGraphsWithBroadcastingAddNotInPlace) {
TestGraph graph(
{0, 1},
{
{{0, 1}, {3}, {}},
{{1, 2}, {4}, {}},
{{3, 4}, {5}, {}, kTfLiteBuiltinAdd, kTfLiteInplaceOpInput0Shared},
{{5}, {6}, {}},
},
{6});
(*graph.tensors())[3].bytes = 8;
(*graph.tensors())[4].bytes = 8;
(*graph.tensors())[5].bytes = 64;
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_NE(GetOffset(3), GetOffset(5));
EXPECT_NE(GetOffset(4), GetOffset(5));
}
TEST_F(ArenaPlannerTest, SimpleGraphWithResetAllocationsAfter) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4}, {5}},
{{4}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(3), 12);
EXPECT_EQ(GetOffset(5), 12);
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(2), GetOffsetAfter(4));
ResetAllocationsAfter(0);
EXPECT_FALSE(IsUnallocated(0));
EXPECT_FALSE(IsUnallocated(1));
EXPECT_FALSE(IsUnallocated(2));
EXPECT_TRUE(IsUnallocated(3));
EXPECT_TRUE(IsUnallocated(4));
EXPECT_TRUE(IsUnallocated(5));
(*graph.tensors())[4].bytes += 64;
Execute(1, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(3), 12);
EXPECT_EQ(GetOffset(5), 12);
EXPECT_EQ(GetOffset(4), GetOffsetAfter(2));
EXPECT_EQ(GetOffset(2), 48);
}
TEST_F(ArenaPlannerTest, SimpleGraphWithPersistentResetAllocationsAfter) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4}, {5}},
{{4}, {3}, {}}
},
{3});
(*graph.tensors())[5].allocation_type = kTfLiteArenaRwPersistent;
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
void* tensor5_ptr = (*graph.tensors())[5].data.raw;
ResetAllocationsAfter(0);
EXPECT_FALSE(IsUnallocated(0));
EXPECT_FALSE(IsUnallocated(1));
EXPECT_FALSE(IsUnallocated(2));
EXPECT_TRUE(IsUnallocated(3));
EXPECT_TRUE(IsUnallocated(4));
EXPECT_FALSE(IsUnallocated(5));
Execute(0, graph.nodes().size() - 1);
EXPECT_TRUE(tensor5_ptr == (*graph.tensors())[5].data.raw);
}
TEST_F(ArenaPlannerTest, SimpleGraphWithOptionals) {
TestGraph graph({0, -1, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4, 5}, {}},
{{4, -1, 5}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(5), 12);
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(3), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(2), GetOffsetAfter(4));
}
TEST_F(ArenaPlannerTest, SimpleGraphWithOptionalOutput) {
TestGraph graph({0, -1, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4, 5}, {}},
{{4, 5}, {3}, {}}
},
{-1, 3});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(5), 12);
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(3), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(2), GetOffsetAfter(4));
}
TEST_F(ArenaPlannerTest, SimpleGraphWithLargeTensor) {
TestGraph graph({0, -1},
{
{{0}, {1}, {}},
{{1}, {2}, {}},
{{2, 0}, {4}, {5}},
{{4, -1}, {3}, {}}
},
{3});
(*graph.tensors())[1].bytes = 40;
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(1), 4);
EXPECT_EQ(GetOffset(2), GetOffsetAfter(1));
EXPECT_EQ(GetOffset(3), 4);
EXPECT_EQ(GetOffset(5), 4);
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
}
TEST_F(ArenaPlannerTest, SimpleGraphWithPersistentTensor) {
TestGraph graph({0, -1, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4}, {5}},
{{4, -1}, {3}, {}}
},
{3});
(*graph.tensors())[1].allocation_type = kTfLiteArenaRwPersistent;
graph.SetVariables({1});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_NE((*graph.tensors())[0].data.raw, (*graph.tensors())[1].data.raw);
EXPECT_EQ(GetOffset(5), 4);
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(3), 4);
EXPECT_EQ(GetOffset(2), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(0), 0);
EXPECT_EQ(GetOffset(1), 0);
}
TEST_F(ArenaPlannerTest, SimpleGraphWithDynamicTensor) {
TestGraph graph({0, -1, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4}, {5}},
{{4, -1}, {3}, {}}
},
{3});
(*graph.tensors())[1].allocation_type = kTfLiteDynamic;
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ((*graph.tensors())[1].data.raw, nullptr);
EXPECT_EQ(GetOffset(5), 4);
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(3), 4);
EXPECT_EQ(GetOffset(2), GetOffsetAfter(4));
}
TEST_F(ArenaPlannerTest, LargerGraphAndStepwiseAllocation) {
TestGraph graph({0, 1},
{
{{0, 1}, {2, 3}, {}},
{{2, 0}, {4, 5}, {6}},
{{1, -1}, {7}, {}},
{{7, 3}, {8}, {9}},
{{4, 5, 8}, {10}, {}},
},
{10});
SetGraph(&graph);
Execute(0, 0);
EXPECT_EQ(GetOffset(3), 12);
EXPECT_EQ(GetOffset(2), GetOffsetAfter(3));
EXPECT_TRUE(IsUnallocated(6));
EXPECT_TRUE(IsUnallocated(4));
EXPECT_TRUE(IsUnallocated(5));
EXPECT_TRUE(IsUnallocated(7));
EXPECT_TRUE(IsUnallocated(9));
EXPECT_TRUE(IsUnallocated(8));
EXPECT_TRUE(IsUnallocated(10));
Execute(1, 1);
EXPECT_EQ(GetOffset(3), 12);
EXPECT_EQ(GetOffset(2), GetOffsetAfter(3));
EXPECT_EQ(GetOffset(6), GetOffsetAfter(2));
EXPECT_EQ(GetOffset(5), GetOffsetAfter(6));
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_TRUE(IsUnallocated(7));
EXPECT_TRUE(IsUnallocated(9));
EXPECT_TRUE(IsUnallocated(8));
EXPECT_TRUE(IsUnallocated(10));
Execute(2, 2);
EXPECT_EQ(GetOffset(3), 12);
EXPECT_EQ(GetOffset(2), GetOffsetAfter(3));
EXPECT_EQ(GetOffset(6), GetOffsetAfter(2));
EXPECT_EQ(GetOffset(5), GetOffsetAfter(6));
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(7), GetOffsetAfter(3));
EXPECT_TRUE(IsUnallocated(9));
EXPECT_TRUE(IsUnallocated(8));
EXPECT_TRUE(IsUnallocated(10));
Execute(3, 3);
EXPECT_EQ(GetOffset(3), 12);
EXPECT_EQ(GetOffset(2), GetOffsetAfter(3));
EXPECT_EQ(GetOffset(6), GetOffsetAfter(2));
EXPECT_EQ(GetOffset(5), GetOffsetAfter(6));
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(7), GetOffsetAfter(3));
EXPECT_EQ(GetOffset(9), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(8), GetOffsetAfter(9));
EXPECT_TRUE(IsUnallocated(10));
Execute(4, 4);
EXPECT_EQ(GetOffset(3), 12);
EXPECT_EQ(GetOffset(2), GetOffsetAfter(3));
EXPECT_EQ(GetOffset(6), GetOffsetAfter(2));
EXPECT_EQ(GetOffset(5), GetOffsetAfter(6));
EXPECT_EQ(GetOffset(4), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(7), GetOffsetAfter(3));
EXPECT_EQ(GetOffset(9), GetOffsetAfter(4));
EXPECT_EQ(GetOffset(8), GetOffsetAfter(9));
EXPECT_EQ(GetOffset(10), 12);
}
TEST_F(ArenaPlannerTest, ModifiedGraph) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4, 5}, {}},
{{4, 5}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
TestGraph pruned_graph({0, 1},
{
{{0, 1}, {3}, {}},
},
{3});
SwapGraph(&pruned_graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(0), 0);
EXPECT_EQ(GetOffset(1), GetOffsetAfter(0));
EXPECT_EQ(GetOffset(3), GetOffsetAfter(1));
}
TEST_F(ArenaPlannerTest, ModifiedGraph_DeallocateNonPersistentArena) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4, 5}, {}},
{{4, 5}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
AcquireNonPersistentMemory();
AcquireNonPersistentMemory();
EXPECT_TRUE(HasNonPersistentMemory());
ReleaseNonPersistentMemory();
EXPECT_FALSE(HasNonPersistentMemory());
EXPECT_EQ(GetOffset(0), 0);
EXPECT_EQ(GetOffset(1), 0);
EXPECT_EQ(GetOffset(3), 0);
TestGraph pruned_graph({0, 1},
{
{{0, 1}, {3}, {}},
},
{3});
SwapGraph(&pruned_graph);
Execute(0, graph.nodes().size() - 1);
AcquireNonPersistentMemory();
EXPECT_TRUE(HasNonPersistentMemory());
ReleaseNonPersistentMemory();
AcquireNonPersistentMemory();
EXPECT_EQ(GetOffset(0), 0);
EXPECT_EQ(GetOffset(1), GetOffsetAfter(0));
EXPECT_EQ(GetOffset(3), GetOffsetAfter(1));
}
TEST_F(ArenaPlannerTest, ComplexGraph) {
TestGraph graph({0},
{
{{0}, {1}, {}},
{{1}, {2}, {}},
{{1}, {3}, {}},
{{1}, {4}, {}},
{{2, 3, 4}, {5}, {}},
{{5}, {6}, {}},
{{5}, {7}, {}},
{{6, 7}, {8}, {}},
},
{8});
(*graph.tensors())[0].bytes = 32;
(*graph.tensors())[1].bytes = 28;
(*graph.tensors())[2].bytes = 36;
(*graph.tensors())[3].bytes = 16;
(*graph.tensors())[4].bytes = 8;
(*graph.tensors())[5].bytes = 64;
(*graph.tensors())[6].bytes = 10;
(*graph.tensors())[7].bytes = 40;
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(5), 32);
EXPECT_EQ(GetOffset(7), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(6), GetOffsetAfter(7));
EXPECT_EQ(GetOffset(2), GetOffsetAfter(5));
EXPECT_EQ(GetOffset(3), GetOffsetAfter(2));
EXPECT_EQ(GetOffset(4), GetOffsetAfter(3));
EXPECT_EQ(GetOffset(0), 0);
EXPECT_EQ(GetOffset(1), GetOffsetAfter(0));
EXPECT_EQ(GetOffset(8), 32);
}
TEST_F(ArenaPlannerTest, GraphWithIntermediates) {
TestGraph graph({0, 1},
{
{{0}, {2}, {3}},
{{1, 2}, {4, 5}, {}},
{{5}, {6, 7}, {8, 9, 10}},
{{4, 6}, {11}, {12}},
{{11}, {13}, {}},
{{7, 13}, {14}, {15}},
},
{11, 14});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(0), 0);
EXPECT_EQ(GetOffset(1), GetOffsetAfter(0));
EXPECT_EQ(GetOffset(15), GetOffsetAfter(1));
EXPECT_EQ(GetOffset(14), GetOffsetAfter(15));
EXPECT_EQ(GetOffset(13), GetOffsetAfter(14));
EXPECT_EQ(GetOffset(12), GetOffsetAfter(1));
EXPECT_EQ(GetOffset(11), GetOffsetAfter(13));
EXPECT_EQ(GetOffset(10), GetOffsetAfter(1));
EXPECT_EQ(GetOffset(9), GetOffsetAfter(10));
EXPECT_EQ(GetOffset(8), GetOffsetAfter(9));
EXPECT_EQ(GetOffset(7), GetOffsetAfter(11));
EXPECT_EQ(GetOffset(6), GetOffsetAfter(8));
EXPECT_EQ(GetOffset(5), GetOffsetAfter(6));
EXPECT_EQ(GetOffset(4), GetOffsetAfter(7));
EXPECT_EQ(GetOffset(3), GetOffsetAfter(1));
EXPECT_EQ(GetOffset(2), GetOffsetAfter(5));
}
TEST_F(ArenaPlannerTest, DebugTensors) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {5}},
{{2, 0}, {4}, {6}},
{{4}, {3}, {7}}
},
{3});
SetGraph(&graph, false);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(5), GetOffset(6));
EXPECT_EQ(GetOffset(6), GetOffset(7));
SetGraph(&graph, true);
Execute(0, graph.nodes().size() - 1);
std::set<std::ptrdiff_t> tensorOffsets;
for (int i = 0; i < 8; i++) {
tensorOffsets.insert(GetOffset(i));
}
EXPECT_EQ(tensorOffsets.size(), 8);
}
TEST_F(ArenaPlannerTest, DebugTensorsInputReuse) {
TestGraph graph({0, 1},
{
{{0, 1}, {2, 3}, {}},
{{2, 3}, {4}, {}, kTfLiteBuiltinMul},
{{4, 2}, {5}, {}, kTfLiteBuiltinSub},
{{5}, {6}, {}},
},
{6});
(*graph.tensors())[4].bytes = 200;
(*graph.tensors())[5].bytes = 200;
SetGraph(&graph, false);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(GetOffset(4), GetOffset(5));
SetGraph(&graph, true);
Execute(0, graph.nodes().size() - 1);
EXPECT_NE(GetOffset(4), GetOffset(5));
}
TEST_F(ArenaPlannerTest, SimpleProfilerTest) {
gNumAlloc = 0;
gNumDealloc = 0;
TestGraph graph({1}, {{{1}, {2}, {}}}, {2});
SetGraph(&graph);
Execute(0, graph.nodes().size() - 1);
EXPECT_EQ(gNumAlloc, 1);
EXPECT_EQ(gNumDealloc, 0);
Destroy();
EXPECT_EQ(gNumDealloc, 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/arena_planner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/arena_planner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5057728d-228f-4dab-b62d-fbf3fd4efa64 | cpp | tensorflow/tensorflow | trt_shape_optimization_profiles | tensorflow/compiler/tf2tensorrt/utils/trt_shape_optimization_profiles.cc | tensorflow/compiler/tf2tensorrt/utils/trt_shape_optimization_profiles_test.cc | #include "tensorflow/compiler/tf2tensorrt/utils/trt_shape_optimization_profiles.h"
#include <algorithm>
#include <functional>
#include "absl/algorithm/container.h"
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/utils.h"
#include "tensorflow/core/platform/stream_executor.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "third_party/gpus/cuda/include/cuda_runtime_api.h"
namespace tensorflow {
namespace tensorrt {
template <typename TensorShapeType>
std::vector<nvinfer1::Dims> GetDimVec(std::vector<TensorShapeType> shape_vec) {
std::vector<nvinfer1::Dims> dimvec(shape_vec.size());
absl::c_transform(shape_vec, dimvec.begin(), [](TensorShapeType shape) {
auto adap = DimsAdapter::Create(shape);
TF_CHECK_OK(adap.status());
return adap->AsTrtDims();
});
return dimvec;
}
void EnforceCompatibility(nvinfer1::Dims* prof_dims,
const PartialTensorShape& input_shape) {
for (int i = 0; i < input_shape.dims(); i++) {
if (input_shape.dim_size(i) != -1) {
prof_dims->d[i] = input_shape.dim_size(i);
}
}
}
void SetImplicitBatchModeCompatibleProfile(
const std::vector<nvinfer1::Dims>& dimvec, std::vector<nvinfer1::Dims>* min,
std::vector<nvinfer1::Dims>* opt, std::vector<nvinfer1::Dims>* max) {
*min = dimvec;
for (auto& dim : *min) {
if (dim.d[0] != -1) dim.d[0] = 1;
}
*opt = dimvec;
*max = dimvec;
}
void TrtShapeOptimizationProfile::ImplicitBatchModeCompatibleStrategy(
const std::vector<std::vector<nvinfer1::Dims>>& collected_shapes) {
for (auto& shape_vec : collected_shapes) {
std::vector<nvinfer1::Dims> min, opt, max;
SetImplicitBatchModeCompatibleProfile(shape_vec, &min, &opt, &max);
VLOG(2) << "Initializing optimization profile config with min="
<< DebugString(min) << ", opt=max=" << DebugString(max);
OptimizationProfileConfig profConfig{min, opt, max};
profiles_.push_back(std::move(profConfig));
}
}
template <typename BinaryOperation>
Status ShapeProfileBinaryOp(std::vector<nvinfer1::Dims>* x,
const std::vector<nvinfer1::Dims>& y,
BinaryOperation op) {
if (x->size() != y.size())
return errors::InvalidArgument(
"Number of input tensors differ during profile creation");
for (int i = 0; i < x->size(); i++) {
if (x->at(i).nbDims != y[i].nbDims)
return errors::InvalidArgument(
"Number of input dimensions differ during profile creation at dim ",
i, ", values ", x->at(i).nbDims, y[i].nbDims);
for (int j = 0; j < x->at(i).nbDims; j++) {
x->at(i).d[j] = op(x->at(i).d[j], y[i].d[j]);
}
}
return OkStatus();
}
Status TrtShapeOptimizationProfile::RangeStrategy(
const std::vector<std::vector<nvinfer1::Dims>>& collected_shapes) {
if (collected_shapes.empty()) return OkStatus();
std::vector<nvinfer1::Dims> min = collected_shapes[0];
std::vector<nvinfer1::Dims> max = min;
for (int i = 1; i < collected_shapes.size(); i++) {
TF_RETURN_IF_ERROR(
ShapeProfileBinaryOp(&min, collected_shapes[i],
[](int a, int b) { return std::min(a, b); }));
TF_RETURN_IF_ERROR(
ShapeProfileBinaryOp(&max, collected_shapes[i],
[](int a, int b) { return std::max(a, b); }));
}
VLOG(2) << "Initializing optimization profile config with min="
<< DebugString(min) << ", opt=max=" << DebugString(max);
OptimizationProfileConfig profConfig{min, max, max};
profiles_.push_back(std::move(profConfig));
return OkStatus();
}
void TrtShapeOptimizationProfile::OptimalStrategy(
const std::vector<std::vector<nvinfer1::Dims>>& collected_shapes) {
for (auto& shape_vec : collected_shapes) {
std::vector<nvinfer1::Dims> min = shape_vec;
std::vector<nvinfer1::Dims> opt = min;
std::vector<nvinfer1::Dims> max = min;
VLOG(2) << "Initializing optimization profile config with min=opt=max="
<< DebugString(min);
OptimizationProfileConfig profConfig{min, opt, max};
profiles_.push_back(std::move(profConfig));
}
}
Status TrtShapeOptimizationProfile::CollectShapeValues(OpKernelContext* ctx) {
tensorflow::profiler::TraceMe activity(
"TrtShapeOptimizationProfile::CollectShapeValues",
tensorflow::profiler::TraceMeLevel::kInfo);
cudaStream_t stream = reinterpret_cast<cudaStream_t>(CHECK_NOTNULL(
ctx->op_device_context()->stream()->platform_specific_handle().stream));
actual_shape_values_.resize(ctx->num_inputs());
if (is_shape_tensor_.empty()) {
is_shape_tensor_.resize(ctx->num_inputs());
for (int i = 0; i < ctx->num_inputs(); i++) {
is_shape_tensor_[i] = IsTrtShapeTensorCompatible(ctx->input(i));
}
}
int n_shape_val = 0;
for (int i = 0; i < ctx->num_inputs(); i++) {
if (is_shape_tensor_[i]) {
if (ctx->input_dtype(i) != DT_INT32) {
is_shape_tensor_[i] = false;
continue;
}
if (input_shape_values_.size() > 0 &&
input_shape_values_[0][i].nbDims != ctx->input(i).NumElements()) {
is_shape_tensor_[i] = false;
continue;
}
n_shape_val++;
const Tensor& input = ctx->input(i);
actual_shape_values_[i].nbDims = input.NumElements();
auto ret = cudaMemcpyAsync(
actual_shape_values_[i].d, input.flat<int32>().data(),
input.NumElements() * sizeof(int32), cudaMemcpyDeviceToHost, stream);
if (ret != 0) {
return errors::Internal("Could not copy shape tensor values");
}
VLOG(2) << "Input " << i << " is (probably) a shape tensor, n_values="
<< input.NumElements();
} else {
actual_shape_values_[i] = {0, {}};
}
}
if (n_shape_val > 0) {
cudaStreamSynchronize(stream);
}
return OkStatus();
}
Status TrtShapeOptimizationProfile::CollectShapeValues(const DataVec& input) {
actual_shape_values_.resize(input.size());
for (int i = 0; i < input.size(); i++) {
if (is_shape_tensor_[i]) {
if (!IsTrtShapeTensorCompatible(input[i].tensor)) {
return errors::Internal("Inconsistent shape tensor ", input[i].name,
", ", i);
}
int n_elements = input[i].tensor.NumElements();
actual_shape_values_[i].nbDims = n_elements;
std::copy(input[i].tensor.flat<int32>().data(),
input[i].tensor.flat<int32>().data() + n_elements,
actual_shape_values_[i].d);
VLOG(2) << "Collected tensor shape values "
<< DebugString(actual_shape_values_[i]);
} else {
actual_shape_values_[i] = {0, {}};
}
}
return OkStatus();
}
void FixShapeValueProfile(OptimizationProfileConfig* prof,
const std::vector<bool>& is_shape_tensor) {
int shape_value_offset = is_shape_tensor.size();
for (int i = 0; i < is_shape_tensor.size(); i++) {
if (is_shape_tensor[i] &&
std::equal(prof->min[shape_value_offset + i].d,
prof->min[shape_value_offset + i].d +
prof->min[shape_value_offset + i].nbDims,
prof->max[shape_value_offset + i].d)) {
prof->max[shape_value_offset + i].d[0]++;
VLOG(2) << "Adjusted profile for shape value tensor " << i << " "
<< DebugString(prof->max[shape_value_offset + i]);
} else {
VLOG(2) << i << " is not a shape tensor." << is_shape_tensor[i];
}
}
}
bool AlreadyCollected(const std::vector<std::vector<nvinfer1::Dims>>& values,
const std::vector<nvinfer1::Dims>& rhs) {
for (auto& lhs : values) {
bool ret = lhs.size() == rhs.size();
for (int i = 0; ret && i < lhs.size(); i++) {
ret &= lhs[i].nbDims == rhs[i].nbDims;
for (int j = 0; ret && j < lhs[i].nbDims; j++) {
ret &= (lhs[i].d[j] == rhs[i].d[j]);
}
}
if (ret) return true;
}
return false;
}
void TrtShapeOptimizationProfile::InitProfiles(
const std::vector<PartialTensorShape>& input_partial_shapes,
ProfileStrategy strategy) {
strategy_ = strategy;
if (input_shapes_.size() == 0) {
VLOG(1) << "Not creating profiles without input_shapes. "
"You have to enable profile generation mode first (build).";
return;
}
std::vector<std::vector<nvinfer1::Dims>> collected_shapes;
for (int i = 0; i < input_shapes_.size(); i++) {
auto shape_vec = input_shapes_[i];
VLOG(2) << "Initprofiles, processing shape " << i;
if (!shape_vec.empty()) {
for (int k = 0; k < input_shape_values_[i].size(); k++) {
if (!is_shape_tensor_[k])
input_shape_values_[i][k] = nvinfer1::Dims{0, {}};
}
std::vector<nvinfer1::Dims> dimvec = GetDimVec(shape_vec);
dimvec.insert(dimvec.end(), input_shape_values_[i].begin(),
input_shape_values_[i].end());
if (!AlreadyCollected(collected_shapes, dimvec)) {
collected_shapes.push_back(dimvec);
}
}
}
switch (strategy_) {
case ProfileStrategy::kImplicitBatchModeCompatible:
VLOG(1) << "Creating profiles with ImplicitBatchModeCompatible strategy";
ImplicitBatchModeCompatibleStrategy(collected_shapes);
break;
case ProfileStrategy::kRange:
VLOG(1) << "Creating profiles with Range strategy";
TF_CHECK_OK(RangeStrategy(collected_shapes));
break;
case ProfileStrategy::kRangeOptimal:
VLOG(1) << "Creating profiles with RangeOptimal strategy";
OptimalStrategy(collected_shapes);
TF_CHECK_OK(RangeStrategy(collected_shapes));
break;
case ProfileStrategy::kOptimal:
VLOG(1) << "Creating profiles with Optimal strategy";
OptimalStrategy(collected_shapes);
break;
}
SetShapeTensorMask(input_partial_shapes);
if (input_partial_shapes.size() > 0) {
for (OptimizationProfileConfig& prof : profiles_) {
#if !IS_TRT_VERSION_GE(8, 0, 0, 0)
FixShapeValueProfile(&prof, is_shape_tensor_);
#endif
for (int i = 0; i < input_partial_shapes.size(); i++) {
auto network_input = input_partial_shapes[i];
EnforceCompatibility(&prof.min[i], network_input);
EnforceCompatibility(&prof.opt[i], network_input);
EnforceCompatibility(&prof.max[i], network_input);
}
}
}
}
void TrtShapeOptimizationProfile::InitCalibProfile(
const std::vector<TensorShape>& shapes) {
VLOG(1) << "Collected shape(s) " << DebugString(shapes) << " for "
<< " calibration profile.";
auto shape_vec = shapes;
if (!shape_vec.empty()) {
std::vector<nvinfer1::Dims> dimvec = GetDimVec(shape_vec);
dimvec.insert(dimvec.end(), actual_shape_values_.begin(),
actual_shape_values_.end());
VLOG(2) << "Initializing calibration optimization profile config with "
<< "min=opt=max " << DebugString(dimvec);
OptimizationProfileConfig profConfig{dimvec, dimvec, dimvec};
calib_profiles_ = std::move(profConfig);
} else {
VLOG(2) << "Failed to initialize calibration optimization profile.";
}
}
Status TrtShapeOptimizationProfile::AddProfiles(
nvinfer1::IBuilder* builder, nvinfer1::IBuilderConfig* config,
const nvinfer1::INetworkDefinition* network) {
if (!calib_profiles_.min.empty()) {
VLOG(2) << "Setting up calibration profiles";
auto* calibProfile = builder->createOptimizationProfile();
Status status =
calib_profiles_.SetDimensions(network, calibProfile, input_mask_);
if (!status.ok()) {
return status;
}
bool result = false;
if (calibProfile->isValid()) {
result = config->setCalibrationProfile(calibProfile);
} else {
VLOG(2) << "Calibration profile is not valid";
}
if (result) {
VLOG(2) << "Added calibration optimization profile "
<< calib_profiles_.DebugString() << " to builder config.";
} else {
VLOG(2) << "FAILED TO ADD PROFILE";
LOG(ERROR) << "Failed to add calibration optimization profile "
<< calib_profiles_.DebugString()
<< ". This usually happens when profile is invalid.";
}
}
for (int i = 0; i < profiles_.size(); i++) {
auto* optProfile = builder->createOptimizationProfile();
Status status =
profiles_[i].SetDimensions(network, optProfile, input_mask_);
if (!status.ok()) {
return status;
}
int idx = -1;
if (optProfile->isValid()) {
idx = config->addOptimizationProfile(optProfile);
}
if (idx >= 0) {
if (i != idx) {
return errors::Internal(
"Profile index of engine config is different from source profile "
"index: ",
i, " != ", idx);
}
VLOG(1) << "Added optimization profile " << profiles_[i].DebugString()
<< " with idx " << idx << " to builder config.";
} else {
LOG(ERROR) << "Failed to add optimization profile "
<< profiles_[i].DebugString()
<< ". This usually happens when profile is invalid.";
}
}
if (!profiles_.empty() && config->getNbOptimizationProfiles() == 0) {
return errors::Internal("Failure in adding an optimization profile.");
}
need_profiles_ = config->getNbOptimizationProfiles() > 0;
SetShapeTensorMask(network);
is_pruned_input_.resize(network->getNbInputs());
absl::c_fill(is_pruned_input_, false);
return OkStatus();
}
Status TrtShapeOptimizationProfile::ConfigureBuilder(
nvinfer1::IBuilder* builder, nvinfer1::IBuilderConfig* config,
const nvinfer1::INetworkDefinition* network) {
TF_RETURN_IF_ERROR(AddProfiles(builder, config, network));
return OkStatus();
}
void TrtShapeOptimizationProfile::SetShapeTensorMask(
const nvinfer1::ICudaEngine* engine, int n_inputs) {
is_shape_tensor_.resize(n_inputs, false);
for (int i = 0; i < n_inputs; i++) {
int binding_index;
Status status = GetTrtBindingIndex(i, 0, engine, &binding_index);
if (!status.ok()) {
continue;
}
is_shape_tensor_[i] = engine->isShapeBinding(binding_index);
if (is_shape_tensor_[i]) {
VLOG(2) << "Found shape tensor at " << i;
}
}
has_shape_tensor_ =
absl::c_any_of(is_shape_tensor_, [](bool b) { return b; });
}
void TrtShapeOptimizationProfile::SetShapeTensorMask(
const nvinfer1::INetworkDefinition* network) {
int n_inputs = network->getNbInputs();
is_shape_tensor_.resize(n_inputs, false);
for (int i = 0; i < n_inputs; i++) {
const ITensorProxyPtr input = network->getInput(i);
is_shape_tensor_[i] = input->isShapeTensor();
if (is_shape_tensor_[i]) {
VLOG(2) << "Found shape tensor " << input->getName() << " at " << i;
}
}
has_shape_tensor_ =
absl::c_any_of(is_shape_tensor_, [](bool b) { return b; });
}
void TrtShapeOptimizationProfile::SetShapeTensorMask(
const std::vector<PartialTensorShape>& input_partial_shapes) {
if (is_shape_tensor_.size() == input_partial_shapes.size()) {
return;
}
is_shape_tensor_.resize(input_partial_shapes.size(), false);
for (int i = 0; i < input_partial_shapes.size(); i++) {
is_shape_tensor_[i] = IsTrtShapeTensorCompatible(input_partial_shapes[i]);
if (is_shape_tensor_[i]) {
VLOG(2) << "Found shape compatible tensor at " << i;
}
}
has_shape_tensor_ =
absl::c_any_of(is_shape_tensor_, [](bool b) { return b; });
}
int TrtShapeOptimizationProfile::GetProfileNumber(
const std::vector<TensorShape>& shapes) {
tensorflow::profiler::TraceMe activity(
"TrtShapeOptimizationProfile::GetProfileNumber",
tensorflow::profiler::TraceMeLevel::kInfo);
if (!need_profiles_) return 0;
for (int i = 0; i < profiles_.size(); i++) {
if (profiles_[i].IncludesShapes(shapes, HasShapeTensor(),
actual_shape_values_, is_pruned_input_,
is_shape_tensor_)) {
return i;
}
}
VLOG(1) << "Profile not found for input shapes " << DebugString(shapes);
VLOG(2) << " and shape values " << DebugString(actual_shape_values_);
return -1;
}
Status TrtShapeOptimizationProfile::CreateExecutionContexts(
nvinfer1::ICudaEngine* engine,
std::vector<ExecutionContext>* exec_contexts) {
int i = 0;
do {
VLOG(1) << "Creating execution context " << i;
ExecutionContext context = ExecutionContext::Create(engine);
if (i > 0) {
if (!context->setOptimizationProfile(i)) {
return errors::Internal("Could not set TRT optimization profile.");
}
}
exec_contexts->push_back(std::move(context));
i++;
} while (i < profiles_.size());
return OkStatus();
}
Status TrtShapeOptimizationProfile::SetInputShapeBinding(
int input_index, int binding_index, nvinfer1::ICudaEngine* cuda_engine,
nvinfer1::IExecutionContext* exec_context) const {
tensorflow::profiler::TraceMe activity(
"TrtShapeOptimizationProfile::SetInputShapeBinding",
tensorflow::profiler::TraceMeLevel::kInfo);
if (cuda_engine->isShapeBinding(binding_index)) {
VLOG(2) << "Setting input shape binding for idx " << binding_index
<< ", with values "
<< DebugString(actual_shape_values_.at(input_index));
bool ret = exec_context->setInputShapeBinding(
binding_index, actual_shape_values_.at(input_index).d);
if (!ret) {
return errors::Internal("Could not set input shape binding for idx ",
binding_index);
}
}
return OkStatus();
}
nvinfer1::Dims GetDimsFromShapeVal(int prof_idx, int binding_idx,
nvinfer1::OptProfileSelector selector,
const nvinfer1::ICudaEngine* engine) {
if (engine->isShapeBinding(binding_idx)) {
const int32* shape_val_ptr =
engine->getProfileShapeValues(binding_idx, prof_idx, selector);
if (shape_val_ptr) {
VLOG(2) << "Found shape value in prof " << prof_idx << ", binding "
<< binding_idx;
nvinfer1::Dims dims = engine->getBindingDimensions(binding_idx);
int n_values = (dims.nbDims == 0) ? 1 : dims.d[0];
if (n_values > 0) {
dims.nbDims = n_values;
std::copy(shape_val_ptr, shape_val_ptr + n_values, dims.d);
}
return dims;
}
}
return {0, {0}};
}
Status TrtShapeOptimizationProfile::SetPrunedMask(
const nvinfer1::ICudaEngine* engine, int n_network_inputs) {
is_pruned_input_.resize(n_network_inputs);
absl::c_fill(is_pruned_input_, false);
for (int j = 0; j < n_network_inputs; j++) {
int binding_idx;
Status status = GetTrtBindingIndex(j, 0, engine, &binding_idx);
if (!status.ok()) {
is_pruned_input_[j] = true;
VLOG(2) << "Skipping pruned input " << j;
continue;
}
}
return OkStatus();
}
Status TrtShapeOptimizationProfile::RestoreProfiles(
const nvinfer1::ICudaEngine* engine, int n_network_inputs) {
need_profiles_ = false;
if (!engine) {
return OkStatus();
}
if (engine->hasImplicitBatchDimension()) {
return OkStatus();
}
int n_profiles = engine->getNbOptimizationProfiles();
need_profiles_ = n_profiles > 0;
int n_inputs = GetNumberOfEngineInputs(engine);
if (n_inputs > n_network_inputs) {
return errors::Internal("Incorrect number of engine inputs");
}
VLOG(2) << "Attempting to restore " << n_profiles << " profiles, each with "
<< n_inputs << " inputs";
SetShapeTensorMask(engine, n_network_inputs);
TF_RETURN_IF_ERROR(SetPrunedMask(engine, n_network_inputs));
for (int prof_idx = 0; prof_idx < n_profiles; prof_idx++) {
OptimizationProfileConfig cfg;
cfg.min.resize(n_network_inputs * 2);
cfg.max.resize(n_network_inputs * 2);
cfg.opt.resize(n_network_inputs * 2);
for (int j = 0; j < n_network_inputs; j++) {
if (is_pruned_input_[j]) continue;
int binding_idx;
TF_RETURN_IF_ERROR(GetTrtBindingIndex(j, 0, engine, &binding_idx));
nvinfer1::Dims min = engine->getProfileDimensions(
binding_idx, prof_idx, nvinfer1::OptProfileSelector::kMIN);
nvinfer1::Dims max = engine->getProfileDimensions(
binding_idx, prof_idx, nvinfer1::OptProfileSelector::kMAX);
nvinfer1::Dims opt = engine->getProfileDimensions(
binding_idx, prof_idx, nvinfer1::OptProfileSelector::kOPT);
cfg.min[j] = min;
cfg.max[j] = max;
cfg.opt[j] = opt;
cfg.min[j + n_inputs] = GetDimsFromShapeVal(
prof_idx, binding_idx, nvinfer1::OptProfileSelector::kMIN, engine);
cfg.max[j + n_inputs] = GetDimsFromShapeVal(
prof_idx, binding_idx, nvinfer1::OptProfileSelector::kMAX, engine);
cfg.opt[j + n_inputs] = GetDimsFromShapeVal(
prof_idx, binding_idx, nvinfer1::OptProfileSelector::kOPT, engine);
}
VLOG(2) << "Restored profile " << cfg.DebugString();
profiles_.push_back(std::move(cfg));
}
return OkStatus();
}
int TrtShapeOptimizationProfile::GetNumProfiles() const {
return profiles_.size();
}
}
}
#endif | #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include <string.h>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_logger.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_lru_cache.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/test.h"
#include "third_party/tensorrt/NvInfer.h"
namespace tensorflow {
namespace tensorrt {
std::vector<TensorShape> DimVecToShapeVec(
std::vector<nvinfer1::Dims3> dimvec,
bool expand_with_empty_shape_values = false) {
std::vector<TensorShape> shapevec(dimvec.size());
for (int i = 0; i < dimvec.size(); i++) {
TensorShape shape;
TF_CHECK_OK(
TensorShapeUtils::MakeShape(dimvec[i].d, dimvec[i].nbDims, &shape));
shapevec[i] = shape;
}
if (expand_with_empty_shape_values) {
shapevec.resize(2 * dimvec.size());
}
return shapevec;
}
bool DimsContained(const nvinfer1::Dims& dim, const nvinfer1::Dims& min,
const nvinfer1::Dims& max) {
if (dim.nbDims != min.nbDims || dim.nbDims != max.nbDims) {
return false;
}
for (int i = 0; i < dim.nbDims; i++) {
if (dim.d[i] < min.d[i] || dim.d[i] > max.d[i]) {
return false;
}
}
return true;
}
bool DimsEqual(const nvinfer1::Dims& a, const nvinfer1::Dims& b) {
if (a.nbDims != b.nbDims) {
return false;
}
for (int i = 0; i < a.nbDims; i++) {
if (a.d[i] != b.d[i]) {
return false;
}
}
return true;
}
class TrtShapeOptimizationProfileTest
: public ::testing::TestWithParam<ProfileStrategy> {
protected:
TrtShapeOptimizationProfileTest() {
strategy_ = GetParam();
builder_ = TrtUniquePtrType<nvinfer1::IBuilder>(
nvinfer1::createInferBuilder(logger_));
network_ = TrtUniquePtrType<nvinfer1::INetworkDefinition>(
builder_->createNetworkV2(flags_));
builder_config_ = TrtUniquePtrType<nvinfer1::IBuilderConfig>(
builder_->createBuilderConfig());
builder_config_->setMaxWorkspaceSize(1 << 10);
}
void DefineNetwork(nvinfer1::INetworkDefinition* network,
nvinfer1::Dims3& dims) {
ITensorProxyPtr input1 =
network->addInput("input1", nvinfer1::DataType::kFLOAT, dims);
EXPECT_NE(nullptr, input1->trt_tensor());
ITensorProxyPtr input2 =
network->addInput("input2", nvinfer1::DataType::kFLOAT, dims);
EXPECT_NE(nullptr, input2->trt_tensor());
auto layer =
network->addElementWise(*input1->trt_tensor(), *input2->trt_tensor(),
nvinfer1::ElementWiseOperation::kSUM);
EXPECT_NE(nullptr, layer);
ITensorProxyPtr output = layer->getOutput(0);
output->setName("output");
network->markOutput(*output->trt_tensor());
}
void CheckProfile(const std::vector<nvinfer1::Dims3>& dimvec,
TrtShapeOptimizationProfile* profile, bool has_prof,
bool test_optimality) {
std::vector<TensorShape> shape_vec = DimVecToShapeVec(dimvec);
int idx = profile->GetProfileNumber(shape_vec);
ASSERT_EQ(idx >= 0, has_prof);
if (idx < 0) return;
int prof_idx = exec_contexts_[idx]->getOptimizationProfile();
ASSERT_GE(prof_idx, 0);
for (int j = 0; j < dimvec.size(); j++) {
nvinfer1::Dims min = engine->getProfileDimensions(
j, prof_idx, nvinfer1::OptProfileSelector::kMIN);
nvinfer1::Dims max = engine->getProfileDimensions(
j, prof_idx, nvinfer1::OptProfileSelector::kMAX);
nvinfer1::Dims opt = engine->getProfileDimensions(
j, prof_idx, nvinfer1::OptProfileSelector::kOPT);
EXPECT_TRUE(DimsContained(dimvec[j], min, max));
if (test_optimality) {
EXPECT_TRUE(DimsEqual(dimvec[j], opt));
}
}
}
Logger& logger_ = *Logger::GetLogger();
TrtUniquePtrType<nvinfer1::IBuilder> builder_;
TrtUniquePtrType<nvinfer1::INetworkDefinition> network_;
TrtUniquePtrType<nvinfer1::IBuilderConfig> builder_config_;
TrtUniquePtrType<nvinfer1::ICudaEngine> engine;
std::vector<ExecutionContext> exec_contexts_;
const uint32_t flags_ =
1U << static_cast<int>(
nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH);
ProfileStrategy strategy_;
};
INSTANTIATE_TEST_CASE_P(
OptProfilesTestInstantiation, TrtShapeOptimizationProfileTest,
::testing::Values(ProfileStrategy::kRange, ProfileStrategy::kOptimal,
ProfileStrategy::kRangeOptimal,
ProfileStrategy::kImplicitBatchModeCompatible));
TEST_P(TrtShapeOptimizationProfileTest, Static) {
if (strategy_ != ProfileStrategy::kRange) return;
nvinfer1::Dims3 dims(8, 8, 10);
DefineNetwork(network_.get(), dims);
TrtShapeOptimizationProfile profile;
TF_CHECK_OK(profile.ConfigureBuilder(builder_.get(), builder_config_.get(),
network_.get()));
engine = TrtUniquePtrType<nvinfer1::ICudaEngine>(
builder_->buildEngineWithConfig(*network_, *builder_config_));
EXPECT_NE(nullptr, engine);
TF_CHECK_OK(profile.CreateExecutionContexts(engine.get(), &exec_contexts_));
ASSERT_EQ(exec_contexts_.size(), 1);
EXPECT_NE(nullptr, exec_contexts_[0]);
std::vector<nvinfer1::Dims3> dim_vec(2, dims);
std::vector<TensorShape> shape_vec = DimVecToShapeVec(dim_vec);
EXPECT_EQ(0, profile.GetProfileNumber(shape_vec));
}
TEST_P(TrtShapeOptimizationProfileTest, Dynamic) {
nvinfer1::Dims3 dims(-1, -1, 10);
DefineNetwork(network_.get(), dims);
TrtShapeOptimizationProfile profile;
std::vector<bool> input_mask(2, true);
profile.SetInputMask(input_mask);
std::vector<std::vector<nvinfer1::Dims3>> input_profiles{
{nvinfer1::Dims3(2, 2, 10), nvinfer1::Dims3(2, 2, 10)},
{nvinfer1::Dims3(3, 3, 10), nvinfer1::Dims3(3, 3, 10)},
{nvinfer1::Dims3(16, 16, 10), nvinfer1::Dims3(16, 16, 10)},
};
std::vector<nvinfer1::Dims3> unseen_shapes{nvinfer1::Dims3(5, 5, 10),
nvinfer1::Dims3(9, 9, 10)};
for (auto dim_vec : input_profiles) {
std::vector<TensorShape> shape_vec = DimVecToShapeVec(dim_vec, true);
profile.AddShape(shape_vec);
}
std::vector<PartialTensorShape> input_partial_shapes;
TF_CHECK_OK(GetNetworkInputShapes(network_.get(), &input_partial_shapes));
profile.InitProfiles(input_partial_shapes, strategy_);
TF_CHECK_OK(profile.ConfigureBuilder(builder_.get(), builder_config_.get(),
network_.get()));
engine = TrtUniquePtrType<nvinfer1::ICudaEngine>(
builder_->buildEngineWithConfig(*network_.get(), *builder_config_.get()));
ASSERT_NE(nullptr, engine);
TF_CHECK_OK(profile.CreateExecutionContexts(engine.get(), &exec_contexts_));
int n_profiles_exp;
switch (strategy_) {
case (ProfileStrategy::kImplicitBatchModeCompatible):
case (ProfileStrategy::kOptimal):
n_profiles_exp = input_profiles.size();
break;
case (ProfileStrategy::kRange):
n_profiles_exp = 1;
break;
case (ProfileStrategy::kRangeOptimal):
n_profiles_exp = 1 + input_profiles.size();
break;
}
EXPECT_EQ(exec_contexts_.size(), n_profiles_exp);
profile.SetShapeTensorMask(network_.get());
EXPECT_EQ(profile.HasShapeTensor(), false);
for (auto dimvec : input_profiles) {
bool test_optimal_prof = strategy_ == ProfileStrategy::kOptimal ||
strategy_ == ProfileStrategy::kRangeOptimal;
CheckProfile(dimvec, &profile, true, test_optimal_prof);
}
bool has_prof = (strategy_ == ProfileStrategy::kRange ||
strategy_ == ProfileStrategy::kRangeOptimal);
CheckProfile(unseen_shapes, &profile, has_prof, false);
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/utils/trt_shape_optimization_profiles.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/utils/trt_shape_optimization_profiles_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
53279176-e6bf-43d8-9e11-0769e087a2d8 | cpp | tensorflow/tensorflow | literal_test_util | third_party/xla/xla/tests/literal_test_util.cc | third_party/xla/xla/tests/literal_test_util_test.cc | #include "xla/tests/literal_test_util.h"
#include "absl/strings/str_format.h"
#include "xla/literal_comparison.h"
#include "tsl/platform/env.h"
#include "tsl/platform/path.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
void WriteLiteralToTempFile(const LiteralSlice& literal,
const std::string& name) {
std::string outdir;
if (!tsl::io::GetTestUndeclaredOutputsDir(&outdir)) {
outdir = tsl::testing::TmpDir();
}
auto* env = tsl::Env::Default();
std::string filename = tsl::io::JoinPath(
outdir, absl::StrFormat("tempfile-%d-%s", env->NowMicros(), name));
TF_CHECK_OK(tsl::WriteBinaryProto(env, absl::StrCat(filename, ".pb"),
literal.ToProto()));
TF_CHECK_OK(tsl::WriteStringToFile(env, absl::StrCat(filename, ".txt"),
literal.ToString()));
LOG(ERROR) << "wrote Literal to " << name << " file: " << filename
<< ".{pb,txt}";
}
void OnMiscompare(const LiteralSlice& expected, const LiteralSlice& actual,
const LiteralSlice& mismatches,
const ShapeIndex& ,
const literal_comparison::ErrorBuckets& ) {
LOG(INFO) << "expected: " << ShapeUtil::HumanString(expected.shape()) << " "
<< literal_comparison::ToStringTruncated(expected);
LOG(INFO) << "actual: " << ShapeUtil::HumanString(actual.shape()) << " "
<< literal_comparison::ToStringTruncated(actual);
LOG(INFO) << "Dumping literals to temp files...";
WriteLiteralToTempFile(expected, "expected");
WriteLiteralToTempFile(actual, "actual");
WriteLiteralToTempFile(mismatches, "mismatches");
}
::testing::AssertionResult StatusToAssertion(const absl::Status& s) {
if (s.ok()) {
return ::testing::AssertionSuccess();
}
return ::testing::AssertionFailure() << s.message();
}
}
::testing::AssertionResult LiteralTestUtil::EqualShapes(
const Shape& expected, const Shape& actual) {
return StatusToAssertion(literal_comparison::EqualShapes(expected, actual));
}
::testing::AssertionResult LiteralTestUtil::EqualShapesAndLayouts(
const Shape& expected, const Shape& actual) {
if (expected.ShortDebugString() != actual.ShortDebugString()) {
return ::testing::AssertionFailure()
<< "want: " << expected.ShortDebugString()
<< " got: " << actual.ShortDebugString();
}
return ::testing::AssertionSuccess();
}
::testing::AssertionResult LiteralTestUtil::Equal(
const LiteralSlice& expected, const LiteralSlice& actual) {
return StatusToAssertion(literal_comparison::Equal(expected, actual));
}
::testing::AssertionResult LiteralTestUtil::Near(
const LiteralSlice& expected, const LiteralSlice& actual,
const ErrorSpec& error_spec, std::optional<bool> detailed_message) {
return StatusToAssertion(literal_comparison::Near(
expected, actual, error_spec, detailed_message, &OnMiscompare));
}
::testing::AssertionResult LiteralTestUtil::NearOrEqual(
const LiteralSlice& expected, const LiteralSlice& actual,
const std::optional<ErrorSpec>& error) {
if (error.has_value()) {
VLOG(1) << "Expects near";
return StatusToAssertion(literal_comparison::Near(
expected, actual, *error, std::nullopt,
&OnMiscompare));
}
VLOG(1) << "Expects equal";
return StatusToAssertion(literal_comparison::Equal(expected, actual));
}
} | #include "xla/tests/literal_test_util.h"
#include <vector>
#include "absl/strings/str_join.h"
#include "xla/literal.h"
#include "xla/test_helpers.h"
#include "tsl/platform/env.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/path.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
TEST(LiteralTestUtilTest, ComparesEqualTuplesEqual) {
Literal literal = LiteralUtil::MakeTupleFromSlices({
LiteralUtil::CreateR0<int32_t>(42),
LiteralUtil::CreateR0<int32_t>(64),
});
EXPECT_TRUE(LiteralTestUtil::Equal(literal, literal));
}
TEST(LiteralTestUtilTest, ComparesEqualComplex64TuplesEqual) {
Literal literal = LiteralUtil::MakeTupleFromSlices({
LiteralUtil::CreateR0<complex64>({42.0, 64.0}),
LiteralUtil::CreateR0<complex64>({64.0, 42.0}),
});
EXPECT_TRUE(LiteralTestUtil::Equal(literal, literal));
}
TEST(LiteralTestUtilTest, ComparesEqualComplex128TuplesEqual) {
Literal literal = LiteralUtil::MakeTupleFromSlices({
LiteralUtil::CreateR0<complex128>({42.0, 64.0}),
LiteralUtil::CreateR0<complex128>({64.0, 42.0}),
});
EXPECT_TRUE(LiteralTestUtil::Equal(literal, literal));
}
TEST(LiteralTestUtilTest, ComparesUnequalComplex64TuplesUnequal) {
Literal literal0 = LiteralUtil::MakeTupleFromSlices({
LiteralUtil::CreateR0<complex64>({42.0, 64.0}),
LiteralUtil::CreateR0<complex64>({64.0, 42.0}),
});
Literal literal1 = LiteralUtil::MakeTupleFromSlices({
LiteralUtil::CreateR0<complex64>({64.0, 42.0}),
LiteralUtil::CreateR0<complex64>({42.0, 64.0}),
});
Literal literal2 = LiteralUtil::MakeTupleFromSlices({
LiteralUtil::CreateR0<complex64>({42.42, 64.0}),
LiteralUtil::CreateR0<complex64>({64.0, 42.0}),
});
Literal literal3 = LiteralUtil::MakeTupleFromSlices({
LiteralUtil::CreateR0<complex64>({42.0, 64.0}),
LiteralUtil::CreateR0<complex64>({64.0, 42.42}),
});
EXPECT_FALSE(LiteralTestUtil::Equal(literal0, literal1));
EXPECT_FALSE(LiteralTestUtil::Equal(literal0, literal2));
EXPECT_FALSE(LiteralTestUtil::Equal(literal0, literal3));
EXPECT_FALSE(LiteralTestUtil::Equal(literal2, literal3));
}
TEST(LiteralTestUtilTest, ComparesUnequalComplex128TuplesUnequal) {
Literal literal0 = LiteralUtil::MakeTupleFromSlices({
LiteralUtil::CreateR0<complex128>({42.0, 64.0}),
LiteralUtil::CreateR0<complex128>({64.0, 42.0}),
});
Literal literal1 = LiteralUtil::MakeTupleFromSlices({
LiteralUtil::CreateR0<complex128>({64.0, 42.0}),
LiteralUtil::CreateR0<complex128>({42.0, 64.0}),
});
Literal literal2 = LiteralUtil::MakeTupleFromSlices({
LiteralUtil::CreateR0<complex128>({42.42, 64.0}),
LiteralUtil::CreateR0<complex128>({64.0, 42.0}),
});
Literal literal3 = LiteralUtil::MakeTupleFromSlices({
LiteralUtil::CreateR0<complex128>({42.0, 64.0}),
LiteralUtil::CreateR0<complex128>({64.0, 42.42}),
});
EXPECT_FALSE(LiteralTestUtil::Equal(literal0, literal1));
EXPECT_FALSE(LiteralTestUtil::Equal(literal0, literal2));
EXPECT_FALSE(LiteralTestUtil::Equal(literal0, literal3));
EXPECT_FALSE(LiteralTestUtil::Equal(literal2, literal3));
}
TEST(LiteralTestUtilTest, ComparesUnequalTuplesUnequal) {
auto unequal_things_are_equal = [] {
Literal lhs = LiteralUtil::MakeTupleFromSlices({
LiteralUtil::CreateR0<int32_t>(42),
LiteralUtil::CreateR0<int32_t>(64),
});
Literal rhs = LiteralUtil::MakeTupleFromSlices({
LiteralUtil::CreateR0<int32_t>(64),
LiteralUtil::CreateR0<int32_t>(42),
});
CHECK(LiteralTestUtil::Equal(lhs, rhs)) << "LHS and RHS are unequal";
};
ASSERT_DEATH(unequal_things_are_equal(), "LHS and RHS are unequal");
}
TEST(LiteralTestUtilTest, ExpectNearFailurePlacesResultsInTemporaryDirectory) {
auto dummy_lambda = [] {
auto two = LiteralUtil::CreateR0<float>(2);
auto four = LiteralUtil::CreateR0<float>(4);
ErrorSpec error(0.001);
CHECK(LiteralTestUtil::Near(two, four, error)) << "two is not near four";
};
tsl::Env* env = tsl::Env::Default();
std::string outdir;
if (!tsl::io::GetTestUndeclaredOutputsDir(&outdir)) {
outdir = tsl::testing::TmpDir();
}
std::string pattern = tsl::io::JoinPath(outdir, "tempfile-*.pb");
std::vector<std::string> files;
TF_CHECK_OK(env->GetMatchingPaths(pattern, &files));
for (const auto& f : files) {
TF_CHECK_OK(env->DeleteFile(f)) << f;
}
ASSERT_DEATH(dummy_lambda(), "two is not near four");
std::vector<std::string> results;
TF_CHECK_OK(env->GetMatchingPaths(pattern, &results));
LOG(INFO) << "results: [" << absl::StrJoin(results, ", ") << "]";
EXPECT_EQ(3, results.size());
for (const std::string& result : results) {
LiteralProto literal_proto;
TF_CHECK_OK(
tsl::ReadBinaryProto(tsl::Env::Default(), result, &literal_proto));
Literal literal = Literal::CreateFromProto(literal_proto).value();
if (result.find("expected") != std::string::npos) {
EXPECT_EQ("f32[] 2", literal.ToString());
} else if (result.find("actual") != std::string::npos) {
EXPECT_EQ("f32[] 4", literal.ToString());
} else if (result.find("mismatches") != std::string::npos) {
EXPECT_EQ("pred[] true", literal.ToString());
} else {
FAIL() << "unknown file in temporary directory: " << result;
}
}
}
TEST(LiteralTestUtilTest, NotEqualHasValuesInMessage) {
auto expected = LiteralUtil::CreateR1<int32_t>({1, 2, 3});
auto actual = LiteralUtil::CreateR1<int32_t>({4, 5, 6});
::testing::AssertionResult result = LiteralTestUtil::Equal(expected, actual);
EXPECT_THAT(result.message(),
::testing::HasSubstr("Expected literal:\ns32[3] {1, 2, 3}"));
EXPECT_THAT(result.message(),
::testing::HasSubstr("Actual literal:\ns32[3] {4, 5, 6}"));
}
TEST(LiteralTestUtilTest, NearComparatorR1) {
auto a = LiteralUtil::CreateR1<float>(
{0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8});
auto b = LiteralUtil::CreateR1<float>(
{0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8});
EXPECT_TRUE(LiteralTestUtil::Near(a, b, ErrorSpec{0.0001}));
}
TEST(LiteralTestUtilTest, NearComparatorR1Complex64) {
auto a = LiteralUtil::CreateR1<complex64>({{0.0, 1.0},
{0.1, 1.1},
{0.2, 1.2},
{0.3, 1.3},
{0.4, 1.4},
{0.5, 1.5},
{0.6, 1.6},
{0.7, 1.7},
{0.8, 1.8}});
auto b = LiteralUtil::CreateR1<complex64>({{0.0, 1.0},
{0.1, 1.1},
{0.2, 1.2},
{0.3, 1.3},
{0.4, 1.4},
{0.5, 1.5},
{0.6, 1.6},
{0.7, 1.7},
{0.8, 1.8}});
auto c = LiteralUtil::CreateR1<complex64>({{0.0, 1.0},
{0.1, 1.1},
{0.2, 1.2},
{0.3, 1.3},
{0.4, 1.4},
{0.5, 1.5},
{0.6, 1.6},
{0.7, 1.7},
{0.9, 1.8}});
auto d = LiteralUtil::CreateR1<complex64>({{0.0, 1.0},
{0.1, 1.1},
{0.2, 1.2},
{0.3, 1.3},
{0.4, 1.4},
{0.5, 1.5},
{0.6, 1.6},
{0.7, 1.7},
{0.8, 1.9}});
EXPECT_TRUE(LiteralTestUtil::Near(a, b, ErrorSpec{0.0001}));
EXPECT_FALSE(LiteralTestUtil::Near(a, c, ErrorSpec{0.0001}));
EXPECT_FALSE(LiteralTestUtil::Near(a, d, ErrorSpec{0.0001}));
EXPECT_FALSE(LiteralTestUtil::Near(c, d, ErrorSpec{0.0001}));
}
TEST(LiteralTestUtilTest, NearComparatorR1Complex128) {
auto a = LiteralUtil::CreateR1<complex128>({{0.0, 1.0},
{0.1, 1.1},
{0.2, 1.2},
{0.3, 1.3},
{0.4, 1.4},
{0.5, 1.5},
{0.6, 1.6},
{0.7, 1.7},
{0.8, 1.8}});
auto b = LiteralUtil::CreateR1<complex128>({{0.0, 1.0},
{0.1, 1.1},
{0.2, 1.2},
{0.3, 1.3},
{0.4, 1.4},
{0.5, 1.5},
{0.6, 1.6},
{0.7, 1.7},
{0.8, 1.8}});
auto c = LiteralUtil::CreateR1<complex128>({{0.0, 1.0},
{0.1, 1.1},
{0.2, 1.2},
{0.3, 1.3},
{0.4, 1.4},
{0.5, 1.5},
{0.6, 1.6},
{0.7, 1.7},
{0.9, 1.8}});
auto d = LiteralUtil::CreateR1<complex128>({{0.0, 1.0},
{0.1, 1.1},
{0.2, 1.2},
{0.3, 1.3},
{0.4, 1.4},
{0.5, 1.5},
{0.6, 1.6},
{0.7, 1.7},
{0.8, 1.9}});
EXPECT_TRUE(LiteralTestUtil::Near(a, b, ErrorSpec{0.0001}));
EXPECT_FALSE(LiteralTestUtil::Near(a, c, ErrorSpec{0.0001}));
EXPECT_FALSE(LiteralTestUtil::Near(a, d, ErrorSpec{0.0001}));
EXPECT_FALSE(LiteralTestUtil::Near(c, d, ErrorSpec{0.0001}));
}
TEST(LiteralTestUtilTest, NearComparatorR1Nan) {
auto a = LiteralUtil::CreateR1<float>(
{0.0, 0.1, 0.2, 0.3, NAN, 0.5, 0.6, 0.7, 0.8});
auto b = LiteralUtil::CreateR1<float>(
{0.0, 0.1, 0.2, 0.3, NAN, 0.5, 0.6, 0.7, 0.8});
EXPECT_TRUE(LiteralTestUtil::Near(a, b, ErrorSpec{0.0001}));
}
TEST(LiteralTestUtil, NearComparatorDifferentLengths) {
auto a = LiteralUtil::CreateR1<float>(
{0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8});
auto b =
LiteralUtil::CreateR1<float>({0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7});
EXPECT_FALSE(LiteralTestUtil::Near(a, b, ErrorSpec{0.0001}));
EXPECT_FALSE(LiteralTestUtil::Near(b, a, ErrorSpec{0.0001}));
}
TEST(LiteralTestUtilTest, ExpectNearDoubleOutsideFloatValueRange) {
auto two_times_float_max =
LiteralUtil::CreateR0<double>(2.0 * std::numeric_limits<float>::max());
ErrorSpec error(0.001);
EXPECT_TRUE(
LiteralTestUtil::Near(two_times_float_max, two_times_float_max, error));
}
TEST(LiteralTestUtilTest, DynamicEqualityR1) {
auto literal1 = Literal(ShapeUtil::MakeShape(U32, {10}));
literal1.PopulateR1<uint32_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
literal1.SetDynamicSize(0, 5);
auto literal2 = Literal(ShapeUtil::MakeShape(U32, {10}));
literal2.PopulateR1<uint32_t>({1, 2, 3, 4, 5, 99, 99, 99, 99, 99});
literal2.SetDynamicSize(0, 5);
EXPECT_TRUE(LiteralTestUtil::Equal(literal1, literal2));
}
TEST(LiteralTestUtilTest, DynamicEqualityR2Dim) {
auto literal1 = Literal(ShapeUtil::MakeShape(U32, {3, 3}));
literal1.PopulateR2<uint32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
literal1.SetDynamicSize(0, 2);
auto literal2 = Literal(ShapeUtil::MakeShape(U32, {3, 3}));
literal2.PopulateR2<uint32_t>({{1, 2, 3}, {4, 5, 6}, {99, 99, 99}});
literal2.SetDynamicSize(0, 2);
EXPECT_TRUE(LiteralTestUtil::Equal(literal1, literal2));
}
TEST(LiteralTestUtilTest, DynamicEqualityR2Dim1) {
auto literal1 = Literal(ShapeUtil::MakeShape(U32, {3, 3}));
literal1.PopulateR2<uint32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
literal1.SetDynamicSize(1, 2);
auto literal2 = Literal(ShapeUtil::MakeShape(U32, {3, 3}));
literal2.PopulateR2<uint32_t>({{1, 2, 99}, {4, 5, 99}, {7, 8, 99}});
literal2.SetDynamicSize(1, 2);
EXPECT_TRUE(LiteralTestUtil::Equal(literal1, literal2));
}
TEST(LiteralTestUtilTest, DynamicNearEqualityR1) {
auto literal1 = Literal(ShapeUtil::MakeShape(F32, {10}));
literal1.PopulateR1<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
literal1.SetDynamicSize(0, 5);
auto literal2 = Literal(ShapeUtil::MakeShape(F32, {10}));
literal2.PopulateR1<float>({1, 2, 3, 4, 5, 99, 99, 99, 99, 99});
literal2.SetDynamicSize(0, 5);
ErrorSpec error(0.001);
EXPECT_TRUE(LiteralTestUtil::Near(literal1, literal2, error));
}
TEST(LiteralTestUtilTest, DynamicNearEqualityR2Dim) {
auto literal1 = Literal(ShapeUtil::MakeShape(F32, {3, 3}));
literal1.PopulateR2<float>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
literal1.SetDynamicSize(0, 2);
auto literal2 = Literal(ShapeUtil::MakeShape(F32, {3, 3}));
literal2.PopulateR2<float>({{1, 2, 3}, {4, 5, 6}, {99, 99, 99}});
literal2.SetDynamicSize(0, 2);
ErrorSpec error(0.001);
EXPECT_TRUE(LiteralTestUtil::Near(literal1, literal2, error));
}
TEST(LiteralTestUtilTest, DynamicNearEqualityR2Dim1) {
auto literal1 = Literal(ShapeUtil::MakeShape(F32, {3, 3}));
literal1.PopulateR2<float>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
literal1.SetDynamicSize(1, 2);
auto literal2 = Literal(ShapeUtil::MakeShape(F32, {3, 3}));
literal2.PopulateR2<float>({{1, 2, 99}, {4, 5, 99}, {7, 8, 99}});
literal2.SetDynamicSize(1, 2);
ErrorSpec error(0.001);
EXPECT_TRUE(LiteralTestUtil::Near(literal1, literal2, error));
}
TEST(LiteralTestUtilTest, UnequalDynamicDimensionsR1) {
auto literal1 = Literal(ShapeUtil::MakeShape(U32, {10}));
literal1.PopulateR1<uint32_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
literal1.SetDynamicSize(0, 5);
auto literal2 = Literal(ShapeUtil::MakeShape(U32, {10}));
literal2.PopulateR1<uint32_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
literal2.SetDynamicSize(0, 6);
EXPECT_FALSE(LiteralTestUtil::Equal(literal1, literal2));
}
TEST(LiteralTestUtilTest, UnequalDynamicDimensionsR1_F32) {
auto literal1 = Literal(ShapeUtil::MakeShape(F32, {10}));
literal1.PopulateR1<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
literal1.SetDynamicSize(0, 5);
auto literal2 = Literal(ShapeUtil::MakeShape(F32, {10}));
literal2.PopulateR1<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
literal2.SetDynamicSize(0, 6);
EXPECT_FALSE(LiteralTestUtil::Near(literal1, literal2, ErrorSpec{0.0001}));
}
TEST(LiteralTestUtilTest, ExpectedIsDynamicActualIsNotR1) {
auto literal1 = Literal(ShapeUtil::MakeShape(U32, {10}));
literal1.PopulateR1<uint32_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
literal1.SetDynamicSize(0, 5);
auto literal2 = Literal(ShapeUtil::MakeShape(U32, {10}));
literal2.PopulateR1<uint32_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
EXPECT_FALSE(LiteralTestUtil::Equal(literal1, literal2));
}
TEST(LiteralTestUtilTest, ExpectedIsDynamicActualIsNotR1_F32) {
auto literal1 = Literal(ShapeUtil::MakeShape(F32, {10}));
literal1.PopulateR1<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
literal1.SetDynamicSize(0, 5);
auto literal2 = Literal(ShapeUtil::MakeShape(F32, {10}));
literal2.PopulateR1<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
EXPECT_FALSE(LiteralTestUtil::Near(literal1, literal2, ErrorSpec{0.0001}));
}
TEST(LiteralTestUtilTest, ActualIsDynamicExpectedIsNotR1) {
auto literal1 = Literal(ShapeUtil::MakeShape(U32, {10}));
literal1.PopulateR1<uint32_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
auto literal2 = Literal(ShapeUtil::MakeShape(U32, {10}));
literal2.PopulateR1<uint32_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
literal2.SetDynamicSize(0, 5);
EXPECT_FALSE(LiteralTestUtil::Equal(literal1, literal2));
}
TEST(LiteralTestUtilTest, ActualIsDynamicExpectedIsNotR1_F32) {
auto literal1 = Literal(ShapeUtil::MakeShape(F32, {10}));
literal1.PopulateR1<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
auto literal2 = Literal(ShapeUtil::MakeShape(F32, {10}));
literal2.PopulateR1<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
literal2.SetDynamicSize(0, 5);
EXPECT_FALSE(LiteralTestUtil::Near(literal1, literal2, ErrorSpec{0.0001}));
}
TEST(LiteralTestUtilTest, UnequalDynamicDimensionsR2Dim0) {
auto literal1 = Literal(ShapeUtil::MakeShape(U32, {3, 3}));
literal1.PopulateR2<uint32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
literal1.SetDynamicSize(0, 2);
auto literal2 = Literal(ShapeUtil::MakeShape(U32, {3, 3}));
literal2.PopulateR2<uint32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
literal2.SetDynamicSize(0, 3);
EXPECT_FALSE(LiteralTestUtil::Equal(literal1, literal2));
}
TEST(LiteralTestUtilTest, UnequalDynamicDimensionsR2Dim0_F32) {
auto literal1 = Literal(ShapeUtil::MakeShape(F32, {3, 3}));
literal1.PopulateR2<float>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
literal1.SetDynamicSize(0, 2);
auto literal2 = Literal(ShapeUtil::MakeShape(F32, {3, 3}));
literal2.PopulateR2<float>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
literal2.SetDynamicSize(0, 3);
EXPECT_FALSE(LiteralTestUtil::Near(literal1, literal2, ErrorSpec{0.0001}));
}
TEST(LiteralTestUtilTest, UnequalDynamicDimensionsR2Dim1) {
auto literal1 = Literal(ShapeUtil::MakeShape(U32, {3, 3}));
literal1.PopulateR2<uint32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
literal1.SetDynamicSize(1, 2);
auto literal2 = Literal(ShapeUtil::MakeShape(U32, {3, 3}));
literal2.PopulateR2<uint32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
literal2.SetDynamicSize(1, 3);
EXPECT_FALSE(LiteralTestUtil::Equal(literal1, literal2));
}
TEST(LiteralTestUtilTest, UnequalDynamicDimensionsR2Dim1_F32) {
auto literal1 = Literal(ShapeUtil::MakeShape(F32, {3, 3}));
literal1.PopulateR2<float>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
literal1.SetDynamicSize(1, 2);
auto literal2 = Literal(ShapeUtil::MakeShape(F32, {3, 3}));
literal2.PopulateR2<float>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
literal2.SetDynamicSize(1, 3);
EXPECT_FALSE(LiteralTestUtil::Near(literal1, literal2, ErrorSpec{0.0001}));
}
TEST(LiteralTestUtilTest, UnequalDynamicDimensionsR2DifferentDimensions) {
auto literal1 = Literal(ShapeUtil::MakeShape(U32, {3, 3}));
literal1.PopulateR2<uint32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
literal1.SetDynamicSize(1, 2);
auto literal2 = Literal(ShapeUtil::MakeShape(U32, {3, 3}));
literal2.PopulateR2<uint32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
literal2.SetDynamicSize(0, 2);
EXPECT_FALSE(LiteralTestUtil::Equal(literal1, literal2));
}
TEST(LiteralTestUtilTest, UnequalDynamicDimensionsR2DifferentDimensions_F32) {
auto literal1 = Literal(ShapeUtil::MakeShape(F32, {3, 3}));
literal1.PopulateR2<float>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
literal1.SetDynamicSize(1, 2);
auto literal2 = Literal(ShapeUtil::MakeShape(F32, {3, 3}));
literal2.PopulateR2<float>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
literal2.SetDynamicSize(0, 2);
EXPECT_FALSE(LiteralTestUtil::Near(literal1, literal2, ErrorSpec{0.0001}));
}
TEST(LiteralTestUtilTest, DynamicTuplesAreEqual) {
auto literal1 = Literal(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(U32, {5}), ShapeUtil::MakeShape(U32, {5})}));
auto literal2 = Literal(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(U32, {5}), ShapeUtil::MakeShape(U32, {5})}));
MutableBorrowingLiteral(&literal1, {0})
.PopulateR1<uint32_t>({1, 2, 3, 4, 5});
MutableBorrowingLiteral(&literal1, {1})
.PopulateR1<uint32_t>({1, 2, 3, 4, 5});
literal1.SetDynamicSize(0, {0}, 5);
MutableBorrowingLiteral(&literal2, {0})
.PopulateR1<uint32_t>({1, 2, 3, 4, 5});
MutableBorrowingLiteral(&literal2, {1})
.PopulateR1<uint32_t>({1, 2, 3, 4, 5});
literal2.SetDynamicSize(0, {0}, 5);
EXPECT_TRUE(LiteralTestUtil::Equal(literal1, literal2));
}
TEST(LiteralTestUtilTest, DynamicTuplesAreNear) {
auto literal1 = Literal(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {5}), ShapeUtil::MakeShape(F32, {5})}));
auto literal2 = Literal(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {5}), ShapeUtil::MakeShape(F32, {5})}));
MutableBorrowingLiteral(&literal1, {0})
.PopulateR1<float>({1, 2, 3, 4, 5});
MutableBorrowingLiteral(&literal1, {1})
.PopulateR1<float>({1, 2, 3, 4, 5});
literal1.SetDynamicSize(0, {0}, 5);
MutableBorrowingLiteral(&literal2, {0})
.PopulateR1<float>({1, 2, 3, 4, 5});
MutableBorrowingLiteral(&literal2, {1})
.PopulateR1<float>({1, 2, 3, 4, 5});
literal2.SetDynamicSize(0, {0}, 5);
EXPECT_TRUE(LiteralTestUtil::Near(literal1, literal2, ErrorSpec{0.0001}));
}
TEST(LiteralTestUtilTest, DynamicTuplesAreEqualWithinDynamicBounds) {
auto literal1 = Literal(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(U32, {5}), ShapeUtil::MakeShape(U32, {5})}));
auto literal2 = Literal(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(U32, {5}), ShapeUtil::MakeShape(U32, {5})}));
MutableBorrowingLiteral(&literal1, {0})
.PopulateR1<uint32_t>({1, 2, 3, 4, 5});
MutableBorrowingLiteral(&literal1, {1})
.PopulateR1<uint32_t>({1, 2, 3, 4, 5});
literal1.SetDynamicSize(0, {0}, 3);
MutableBorrowingLiteral(&literal2, {0})
.PopulateR1<uint32_t>({1, 2, 3, 99, 99});
MutableBorrowingLiteral(&literal2, {1})
.PopulateR1<uint32_t>({1, 2, 3, 4, 5});
literal2.SetDynamicSize(0, {0}, 3);
EXPECT_TRUE(LiteralTestUtil::Equal(literal1, literal2));
}
TEST(LiteralTestUtilTest, DynamicTuplesAreNearWithinDynamicBounds) {
auto literal1 = Literal(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {5}), ShapeUtil::MakeShape(F32, {5})}));
auto literal2 = Literal(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {5}), ShapeUtil::MakeShape(F32, {5})}));
MutableBorrowingLiteral(&literal1, {0})
.PopulateR1<float>({1, 2, 3, 4, 5});
MutableBorrowingLiteral(&literal1, {1})
.PopulateR1<float>({1, 2, 3, 4, 5});
literal1.SetDynamicSize(0, {0}, 3);
MutableBorrowingLiteral(&literal2, {0})
.PopulateR1<float>({1, 2, 3, 99, 99});
MutableBorrowingLiteral(&literal2, {1})
.PopulateR1<float>({1, 2, 3, 4, 5});
literal2.SetDynamicSize(0, {0}, 3);
EXPECT_TRUE(LiteralTestUtil::Near(literal1, literal2, ErrorSpec{0.0001}));
}
TEST(LiteralTestUtilTest, DynamicTuplesHaveDifferentDynamicSizes) {
auto literal1 = Literal(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(U32, {5}), ShapeUtil::MakeShape(U32, {5})}));
auto literal2 = Literal(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(U32, {5}), ShapeUtil::MakeShape(U32, {5})}));
MutableBorrowingLiteral(&literal1, {0})
.PopulateR1<uint32_t>({1, 2, 3, 4, 5});
MutableBorrowingLiteral(&literal1, {1})
.PopulateR1<uint32_t>({1, 2, 3, 4, 5});
literal1.SetDynamicSize(0, {0}, 5);
MutableBorrowingLiteral(&literal2, {0})
.PopulateR1<uint32_t>({1, 2, 3, 4, 5});
MutableBorrowingLiteral(&literal2, {1})
.PopulateR1<uint32_t>({1, 2, 3, 4, 5});
literal2.SetDynamicSize(0, {0}, 4);
EXPECT_FALSE(LiteralTestUtil::Equal(literal1, literal2));
}
TEST(LiteralTestUtilTest, DynamicTuplesHaveDifferentDynamicSizes_F32) {
auto literal1 = Literal(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {5}), ShapeUtil::MakeShape(F32, {5})}));
auto literal2 = Literal(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {5}), ShapeUtil::MakeShape(F32, {5})}));
MutableBorrowingLiteral(&literal1, {0})
.PopulateR1<float>({1, 2, 3, 4, 5});
MutableBorrowingLiteral(&literal1, {1})
.PopulateR1<float>({1, 2, 3, 4, 5});
literal1.SetDynamicSize(0, {0}, 5);
MutableBorrowingLiteral(&literal2, {0})
.PopulateR1<float>({1, 2, 3, 4, 5});
MutableBorrowingLiteral(&literal2, {1})
.PopulateR1<float>({1, 2, 3, 4, 5});
literal2.SetDynamicSize(0, {0}, 4);
EXPECT_FALSE(LiteralTestUtil::Near(literal1, literal2, ErrorSpec{0.0001}));
}
TEST(LiteralTestUtilTest, OneTupleDynamicOneIsNot) {
auto literal1 = Literal(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(U32, {5}), ShapeUtil::MakeShape(U32, {5})}));
auto literal2 = Literal(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(U32, {5}), ShapeUtil::MakeShape(U32, {5})}));
MutableBorrowingLiteral(&literal1, {0})
.PopulateR1<uint32_t>({1, 2, 3, 4, 5});
MutableBorrowingLiteral(&literal1, {1})
.PopulateR1<uint32_t>({1, 2, 3, 4, 5});
literal1.SetDynamicSize(0, {0}, 5);
MutableBorrowingLiteral(&literal2, {0})
.PopulateR1<uint32_t>({1, 2, 3, 4, 5});
MutableBorrowingLiteral(&literal2, {1})
.PopulateR1<uint32_t>({1, 2, 3, 4, 5});
EXPECT_FALSE(LiteralTestUtil::Equal(literal1, literal2));
}
TEST(LiteralTestUtilTest, OneTupleDynamicOneIsNot_F32) {
auto literal1 = Literal(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {5}), ShapeUtil::MakeShape(F32, {5})}));
auto literal2 = Literal(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {5}), ShapeUtil::MakeShape(F32, {5})}));
MutableBorrowingLiteral(&literal1, {0})
.PopulateR1<float>({1, 2, 3, 4, 5});
MutableBorrowingLiteral(&literal1, {1})
.PopulateR1<float>({1, 2, 3, 4, 5});
literal1.SetDynamicSize(0, {0}, 5);
MutableBorrowingLiteral(&literal2, {0})
.PopulateR1<float>({1, 2, 3, 4, 5});
MutableBorrowingLiteral(&literal2, {1})
.PopulateR1<float>({1, 2, 3, 4, 5});
EXPECT_FALSE(LiteralTestUtil::Near(literal1, literal2, ErrorSpec{0.0001}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tests/literal_test_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tests/literal_test_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
44a5ad0c-6a54-4da0-90bf-7244d1e5c275 | cpp | tensorflow/tensorflow | audio_spectrogram | tensorflow/lite/kernels/audio_spectrogram.cc | tensorflow/lite/kernels/audio_spectrogram_test.cc | #include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <vector>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/spectrogram.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace audio_spectrogram {
constexpr int kInputTensor = 0;
constexpr int kOutputTensor = 0;
enum KernelType {
kReference,
};
typedef struct {
int window_size;
int stride;
bool magnitude_squared;
int output_height;
internal::Spectrogram* spectrogram;
} TfLiteAudioSpectrogramParams;
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* data = new TfLiteAudioSpectrogramParams;
const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer);
const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap();
data->window_size = m["window_size"].AsInt64();
data->stride = m["stride"].AsInt64();
data->magnitude_squared = m["magnitude_squared"].AsBool();
data->spectrogram = new internal::Spectrogram;
return data;
}
void Free(TfLiteContext* context, void* buffer) {
auto* params = reinterpret_cast<TfLiteAudioSpectrogramParams*>(buffer);
delete params->spectrogram;
delete params;
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteAudioSpectrogramParams*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 2);
TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
TF_LITE_ENSURE(context, params->spectrogram->Initialize(params->window_size,
params->stride));
const int64_t sample_count = input->dims->data[0];
const int64_t length_minus_window = (sample_count - params->window_size);
if (length_minus_window < 0) {
params->output_height = 0;
} else {
params->output_height = 1 + (length_minus_window / params->stride);
}
TfLiteIntArray* output_size = TfLiteIntArrayCreate(3);
output_size->data[0] = input->dims->data[1];
output_size->data[1] = params->output_height;
output_size->data[2] = params->spectrogram->output_frequency_channels();
return context->ResizeTensor(context, output, output_size);
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteAudioSpectrogramParams*>(node->user_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
const float* input_data = GetTensorData<float>(input);
const int64_t sample_count = input->dims->data[0];
const int64_t channel_count = input->dims->data[1];
const int64_t output_width = params->spectrogram->output_frequency_channels();
float* output_flat = GetTensorData<float>(output);
std::vector<float> input_for_channel(sample_count);
for (int64_t channel = 0; channel < channel_count; ++channel) {
float* output_slice =
output_flat + (channel * params->output_height * output_width);
for (int i = 0; i < sample_count; ++i) {
input_for_channel[i] = input_data[i * channel_count + channel];
}
std::vector<std::vector<float>> spectrogram_output;
TF_LITE_ENSURE(context, params->spectrogram->Initialize(params->window_size,
params->stride));
TF_LITE_ENSURE(context,
params->spectrogram->ComputeSquaredMagnitudeSpectrogram(
input_for_channel, &spectrogram_output));
TF_LITE_ENSURE_EQ(context, spectrogram_output.size(),
params->output_height);
TF_LITE_ENSURE(context, spectrogram_output.empty() ||
(spectrogram_output[0].size() == output_width));
for (int row_index = 0; row_index < params->output_height; ++row_index) {
const std::vector<float>& spectrogram_row = spectrogram_output[row_index];
TF_LITE_ENSURE_EQ(context, spectrogram_row.size(), output_width);
float* output_row = output_slice + (row_index * output_width);
if (params->magnitude_squared) {
for (int i = 0; i < output_width; ++i) {
output_row[i] = spectrogram_row[i];
}
} else {
for (int i = 0; i < output_width; ++i) {
output_row[i] = sqrtf(spectrogram_row[i]);
}
}
}
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_AUDIO_SPECTROGRAM() {
static TfLiteRegistration r = {
audio_spectrogram::Init, audio_spectrogram::Free,
audio_spectrogram::Prepare,
audio_spectrogram::Eval<audio_spectrogram::kReference>};
return &r;
}
}
}
} | #include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace ops {
namespace custom {
TfLiteRegistration* Register_AUDIO_SPECTROGRAM();
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
class BaseAudioSpectrogramOpModel : public SingleOpModel {
public:
BaseAudioSpectrogramOpModel(const TensorData& input1,
const TensorData& output, int window_size,
int stride, bool magnitude_squared) {
input1_ = AddInput(input1);
output_ = AddOutput(output);
flexbuffers::Builder fbb;
fbb.Map([&]() {
fbb.Int("window_size", window_size);
fbb.Int("stride", stride);
fbb.Bool("magnitude_squared", magnitude_squared);
});
fbb.Finish();
SetCustomOp("AudioSpectrogram", fbb.GetBuffer(),
Register_AUDIO_SPECTROGRAM);
BuildInterpreter({GetShape(input1_)});
}
int input1() { return input1_; }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int input1_;
int output_;
};
TEST(BaseAudioSpectrogramOpModel, NonSquaredTest) {
BaseAudioSpectrogramOpModel m({TensorType_FLOAT32, {8, 1}},
{TensorType_FLOAT32, {}}, 8, 1, false);
m.PopulateTensor<float>(m.input1(),
{-1.0f, 0.0f, 1.0f, 0.0f, -1.0f, 0.0f, 1.0f, 0.0f});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int> output_shape = m.GetOutputShape();
EXPECT_EQ(3, output_shape.size());
EXPECT_THAT(output_shape, ElementsAre(1, 1, 5));
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{0.0f, 1.0f, 2.0f, 1.0f, 0.0f}, 1e-3)));
}
TEST(SpectrogramOpTest, SquaredTest) {
BaseAudioSpectrogramOpModel m({TensorType_FLOAT32, {8, 1}},
{TensorType_FLOAT32, {}}, 8, 1, true);
m.PopulateTensor<float>(m.input1(),
{-1.0f, 0.0f, 1.0f, 0.0f, -1.0f, 0.0f, 1.0f, 0.0f});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int> output_shape = m.GetOutputShape();
EXPECT_EQ(3, output_shape.size());
EXPECT_THAT(output_shape, ElementsAre(1, 1, 5));
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{0.f, 1.f, 4.f, 1.f, 0.f}, 1e-3)));
}
TEST(SpectrogramOpTest, StrideTest) {
BaseAudioSpectrogramOpModel m({TensorType_FLOAT32, {10, 1}},
{TensorType_FLOAT32, {}}, 8, 2, true);
m.PopulateTensor<float>(m.input1(), {-1.0f, 0.0f, 1.0f, 0.0f, -1.0f, 0.0f,
1.0f, 0.0f, 1.0f, 0.0f});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int> output_shape = m.GetOutputShape();
EXPECT_THAT(output_shape, ElementsAre(1, 2, 5));
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{0, 1, 4, 1, 0, 1, 2, 1, 2, 1}, 1e-3)));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/audio_spectrogram.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/audio_spectrogram_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fe64423d-2070-423e-8f2c-0338e81ff7d7 | cpp | google/cel-cpp | bindings_ext | extensions/bindings_ext.cc | extensions/bindings_ext_test.cc | #include "extensions/bindings_ext.h"
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
#include "common/ast.h"
#include "parser/macro.h"
#include "parser/macro_expr_factory.h"
namespace cel::extensions {
namespace {
static constexpr char kCelNamespace[] = "cel";
static constexpr char kBind[] = "bind";
static constexpr char kUnusedIterVar[] = "#unused";
bool IsTargetNamespace(const Expr& target) {
return target.has_ident_expr() && target.ident_expr().name() == kCelNamespace;
}
}
std::vector<Macro> bindings_macros() {
absl::StatusOr<Macro> cel_bind = Macro::Receiver(
kBind, 3,
[](MacroExprFactory& factory, Expr& target,
absl::Span<Expr> args) -> absl::optional<Expr> {
if (!IsTargetNamespace(target)) {
return absl::nullopt;
}
if (!args[0].has_ident_expr()) {
return factory.ReportErrorAt(
args[0], "cel.bind() variable name must be a simple identifier");
}
auto var_name = args[0].ident_expr().name();
return factory.NewComprehension(kUnusedIterVar, factory.NewList(),
std::move(var_name), std::move(args[1]),
factory.NewBoolConst(false),
std::move(args[0]), std::move(args[2]));
});
return {*cel_bind};
}
} | #include "extensions/bindings_ext.h"
#include <cstdint>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "base/attribute.h"
#include "eval/public/activation.h"
#include "eval/public/builtin_func_registrar.h"
#include "eval/public/cel_expr_builder_factory.h"
#include "eval/public/cel_expression.h"
#include "eval/public/cel_function.h"
#include "eval/public/cel_function_adapter.h"
#include "eval/public/cel_options.h"
#include "eval/public/cel_value.h"
#include "eval/public/structs/cel_proto_wrapper.h"
#include "eval/public/testing/matchers.h"
#include "internal/testing.h"
#include "parser/macro.h"
#include "parser/parser.h"
#include "proto/test/v1/proto2/test_all_types.pb.h"
#include "google/protobuf/arena.h"
#include "google/protobuf/text_format.h"
namespace cel::extensions {
namespace {
using ::absl_testing::IsOk;
using ::absl_testing::StatusIs;
using ::google::api::expr::v1alpha1::CheckedExpr;
using ::google::api::expr::v1alpha1::Expr;
using ::google::api::expr::v1alpha1::ParsedExpr;
using ::google::api::expr::v1alpha1::SourceInfo;
using ::google::api::expr::parser::ParseWithMacros;
using ::google::api::expr::runtime::Activation;
using ::google::api::expr::runtime::CelExpressionBuilder;
using ::google::api::expr::runtime::CelFunction;
using ::google::api::expr::runtime::CelFunctionDescriptor;
using ::google::api::expr::runtime::CelProtoWrapper;
using ::google::api::expr::runtime::CelValue;
using ::google::api::expr::runtime::CreateCelExpressionBuilder;
using ::google::api::expr::runtime::FunctionAdapter;
using ::google::api::expr::runtime::InterpreterOptions;
using ::google::api::expr::runtime::RegisterBuiltinFunctions;
using ::google::api::expr::runtime::UnknownProcessingOptions;
using ::google::api::expr::runtime::test::IsCelInt64;
using ::google::api::expr::test::v1::proto2::NestedTestAllTypes;
using ::google::protobuf::Arena;
using ::google::protobuf::TextFormat;
using ::testing::Contains;
using ::testing::HasSubstr;
using ::testing::Pair;
struct TestInfo {
std::string expr;
std::string err = "";
};
class TestFunction : public CelFunction {
public:
explicit TestFunction(absl::string_view name)
: CelFunction(CelFunctionDescriptor(
name, true,
{CelValue::Type::kBool, CelValue::Type::kBool,
CelValue::Type::kBool, CelValue::Type::kBool})) {}
absl::Status Evaluate(absl::Span<const CelValue> args, CelValue* result,
Arena* arena) const override {
*result = CelValue::CreateBool(true);
return absl::OkStatus();
}
};
constexpr absl::string_view kBind = "bind";
std::unique_ptr<CelFunction> CreateBindFunction() {
return std::make_unique<TestFunction>(kBind);
}
class BindingsExtTest
: public testing::TestWithParam<std::tuple<TestInfo, bool, bool>> {
protected:
const TestInfo& GetTestInfo() { return std::get<0>(GetParam()); }
bool GetEnableConstantFolding() { return std::get<1>(GetParam()); }
bool GetEnableRecursivePlan() { return std::get<2>(GetParam()); }
};
TEST_P(BindingsExtTest, Default) {
const TestInfo& test_info = GetTestInfo();
Arena arena;
std::vector<Macro> all_macros = Macro::AllMacros();
std::vector<Macro> bindings_macros = cel::extensions::bindings_macros();
all_macros.insert(all_macros.end(), bindings_macros.begin(),
bindings_macros.end());
auto result = ParseWithMacros(test_info.expr, all_macros, "<input>");
if (!test_info.err.empty()) {
EXPECT_THAT(result.status(), StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr(test_info.err)));
return;
}
EXPECT_THAT(result, IsOk());
ParsedExpr parsed_expr = *result;
Expr expr = parsed_expr.expr();
SourceInfo source_info = parsed_expr.source_info();
InterpreterOptions options;
options.enable_heterogeneous_equality = true;
options.enable_empty_wrapper_null_unboxing = true;
options.constant_folding = GetEnableConstantFolding();
options.constant_arena = &arena;
options.max_recursion_depth = GetEnableRecursivePlan() ? -1 : 0;
std::unique_ptr<CelExpressionBuilder> builder =
CreateCelExpressionBuilder(options);
ASSERT_OK(builder->GetRegistry()->Register(CreateBindFunction()));
ASSERT_OK(RegisterBuiltinFunctions(builder->GetRegistry()));
ASSERT_OK_AND_ASSIGN(auto cel_expr,
builder->CreateExpression(&expr, &source_info));
Activation activation;
ASSERT_OK_AND_ASSIGN(CelValue out, cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(out.IsBool()) << out.DebugString();
EXPECT_EQ(out.BoolOrDie(), true);
}
TEST_P(BindingsExtTest, Tracing) {
const TestInfo& test_info = GetTestInfo();
Arena arena;
std::vector<Macro> all_macros = Macro::AllMacros();
std::vector<Macro> bindings_macros = cel::extensions::bindings_macros();
all_macros.insert(all_macros.end(), bindings_macros.begin(),
bindings_macros.end());
auto result = ParseWithMacros(test_info.expr, all_macros, "<input>");
if (!test_info.err.empty()) {
EXPECT_THAT(result.status(), StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr(test_info.err)));
return;
}
EXPECT_THAT(result, IsOk());
ParsedExpr parsed_expr = *result;
Expr expr = parsed_expr.expr();
SourceInfo source_info = parsed_expr.source_info();
InterpreterOptions options;
options.enable_heterogeneous_equality = true;
options.enable_empty_wrapper_null_unboxing = true;
options.constant_folding = GetEnableConstantFolding();
options.constant_arena = &arena;
options.max_recursion_depth = GetEnableRecursivePlan() ? -1 : 0;
std::unique_ptr<CelExpressionBuilder> builder =
CreateCelExpressionBuilder(options);
ASSERT_OK(builder->GetRegistry()->Register(CreateBindFunction()));
ASSERT_OK(RegisterBuiltinFunctions(builder->GetRegistry()));
ASSERT_OK_AND_ASSIGN(auto cel_expr,
builder->CreateExpression(&expr, &source_info));
Activation activation;
ASSERT_OK_AND_ASSIGN(
CelValue out,
cel_expr->Trace(activation, &arena,
[](int64_t, const CelValue&, google::protobuf::Arena*) {
return absl::OkStatus();
}));
ASSERT_TRUE(out.IsBool()) << out.DebugString();
EXPECT_EQ(out.BoolOrDie(), true);
}
INSTANTIATE_TEST_SUITE_P(
CelBindingsExtTest, BindingsExtTest,
testing::Combine(
testing::ValuesIn<TestInfo>(
{{"cel.bind(t, true, t)"},
{"cel.bind(msg, \"hello\", msg + msg + msg) == "
"\"hellohellohello\""},
{"cel.bind(t1, true, cel.bind(t2, true, t1 && t2))"},
{"cel.bind(valid_elems, [1, 2, 3], "
"[3, 4, 5].exists(e, e in valid_elems))"},
{"cel.bind(valid_elems, [1, 2, 3], "
"![4, 5].exists(e, e in valid_elems))"},
{R"(
cel.bind(
my_list,
['a', 'b', 'c'].map(x, x + '_'),
[0, 1, 2].map(y, my_list[y] + string(y))) ==
['a_0', 'b_1', 'c_2'])"},
{"cel.bind(x, 1, "
" cel.bind(x, x + 1, x)) == 2"},
{"false.bind(false, false, false)"},
{"cel.bind(bad.name, true, bad.name)",
"variable name must be a simple identifier"}}),
testing::Bool(),
testing::Bool()));
constexpr absl::string_view kTraceExpr = R"pb(
expr: {
id: 11
comprehension_expr: {
iter_var: "#unused"
iter_range: {
id: 8
list_expr: {}
}
accu_var: "x"
accu_init: {
id: 4
const_expr: { int64_value: 20 }
}
loop_condition: {
id: 9
const_expr: { bool_value: false }
}
loop_step: {
id: 10
ident_expr: { name: "x" }
}
result: {
id: 6
call_expr: {
function: "_*_"
args: {
id: 5
ident_expr: { name: "x" }
}
args: {
id: 7
ident_expr: { name: "x" }
}
}
}
}
})pb";
TEST(BindingsExtTest, TraceSupport) {
ParsedExpr expr;
ASSERT_TRUE(TextFormat::ParseFromString(kTraceExpr, &expr));
InterpreterOptions options;
options.enable_heterogeneous_equality = true;
options.enable_empty_wrapper_null_unboxing = true;
std::unique_ptr<CelExpressionBuilder> builder =
CreateCelExpressionBuilder(options);
ASSERT_OK(RegisterBuiltinFunctions(builder->GetRegistry()));
ASSERT_OK_AND_ASSIGN(
auto plan, builder->CreateExpression(&expr.expr(), &expr.source_info()));
Activation activation;
google::protobuf::Arena arena;
absl::flat_hash_map<int64_t, CelValue> ids;
ASSERT_OK_AND_ASSIGN(
auto result,
plan->Trace(activation, &arena,
[&](int64_t id, const CelValue& value, google::protobuf::Arena* arena) {
ids[id] = value;
return absl::OkStatus();
}));
EXPECT_TRUE(result.IsInt64() && result.Int64OrDie() == 400)
<< result.DebugString();
EXPECT_THAT(ids, Contains(Pair(4, IsCelInt64(20))));
EXPECT_THAT(ids, Contains(Pair(7, IsCelInt64(20))));
}
constexpr absl::string_view kFieldSelectTestExpr = R"pb(
reference_map: {
key: 4
value: { name: "msg" }
}
reference_map: {
key: 8
value: { overload_id: "conditional" }
}
reference_map: {
key: 9
value: { name: "google.api.expr.test.v1.proto2.TestAllTypes" }
}
reference_map: {
key: 13
value: { name: "submsg" }
}
reference_map: {
key: 18
value: { name: "submsg" }
}
type_map: {
key: 4
value: { message_type: "google.api.expr.test.v1.proto2.NestedTestAllTypes" }
}
type_map: {
key: 5
value: { message_type: "google.api.expr.test.v1.proto2.NestedTestAllTypes" }
}
type_map: {
key: 6
value: { message_type: "google.api.expr.test.v1.proto2.NestedTestAllTypes" }
}
type_map: {
key: 7
value: { primitive: BOOL }
}
type_map: {
key: 8
value: { primitive: INT64 }
}
type_map: {
key: 9
value: { message_type: "google.api.expr.test.v1.proto2.TestAllTypes" }
}
type_map: {
key: 11
value: { primitive: INT64 }
}
type_map: {
key: 12
value: { primitive: INT64 }
}
type_map: {
key: 13
value: { message_type: "google.api.expr.test.v1.proto2.NestedTestAllTypes" }
}
type_map: {
key: 14
value: { message_type: "google.api.expr.test.v1.proto2.TestAllTypes" }
}
type_map: {
key: 15
value: { primitive: INT64 }
}
type_map: {
key: 16
value: { list_type: { elem_type: { dyn: {} } } }
}
type_map: {
key: 17
value: { primitive: BOOL }
}
type_map: {
key: 18
value: { message_type: "google.api.expr.test.v1.proto2.NestedTestAllTypes" }
}
type_map: {
key: 19
value: { primitive: INT64 }
}
source_info: {
location: "<input>"
line_offsets: 120
positions: { key: 1 value: 0 }
positions: { key: 2 value: 8 }
positions: { key: 3 value: 9 }
positions: { key: 4 value: 17 }
positions: { key: 5 value: 20 }
positions: { key: 6 value: 26 }
positions: { key: 7 value: 35 }
positions: { key: 8 value: 42 }
positions: { key: 9 value: 56 }
positions: { key: 10 value: 69 }
positions: { key: 11 value: 71 }
positions: { key: 12 value: 75 }
positions: { key: 13 value: 91 }
positions: { key: 14 value: 97 }
positions: { key: 15 value: 105 }
positions: { key: 16 value: 8 }
positions: { key: 17 value: 8 }
positions: { key: 18 value: 8 }
positions: { key: 19 value: 8 }
macro_calls: {
key: 19
value: {
call_expr: {
target: {
id: 1
ident_expr: { name: "cel" }
}
function: "bind"
args: {
id: 3
ident_expr: { name: "submsg" }
}
args: {
id: 6
select_expr: {
operand: {
id: 5
select_expr: {
operand: {
id: 4
ident_expr: { name: "msg" }
}
field: "child"
}
}
field: "child"
}
}
args: {
id: 8
call_expr: {
function: "_?_:_"
args: {
id: 7
const_expr: { bool_value: false }
}
args: {
id: 12
select_expr: {
operand: {
id: 9
struct_expr: {
message_name: "google.api.expr.test.v1.proto2.TestAllTypes"
entries: {
id: 10
field_key: "single_int64"
value: {
id: 11
const_expr: { int64_value: -42 }
}
}
}
}
field: "single_int64"
}
}
args: {
id: 15
select_expr: {
operand: {
id: 14
select_expr: {
operand: {
id: 13
ident_expr: { name: "submsg" }
}
field: "payload"
}
}
field: "single_int64"
}
}
}
}
}
}
}
}
expr: {
id: 19
comprehension_expr: {
iter_var: "#unused"
iter_range: {
id: 16
list_expr: {}
}
accu_var: "submsg"
accu_init: {
id: 6
select_expr: {
operand: {
id: 5
select_expr: {
operand: {
id: 4
ident_expr: { name: "msg" }
}
field: "child"
}
}
field: "child"
}
}
loop_condition: {
id: 17
const_expr: { bool_value: false }
}
loop_step: {
id: 18
ident_expr: { name: "submsg" }
}
result: {
id: 8
call_expr: {
function: "_?_:_"
args: {
id: 7
const_expr: { bool_value: false }
}
args: {
id: 12
select_expr: {
operand: {
id: 9
struct_expr: {
message_name: "google.api.expr.test.v1.proto2.TestAllTypes"
entries: {
id: 10
field_key: "single_int64"
value: {
id: 11
const_expr: { int64_value: -42 }
}
}
}
}
field: "single_int64"
}
}
args: {
id: 15
select_expr: {
operand: {
id: 14
select_expr: {
operand: {
id: 13
ident_expr: { name: "submsg" }
}
field: "payload"
}
}
field: "single_int64"
}
}
}
}
}
})pb";
class BindingsExtInteractionsTest : public testing::TestWithParam<bool> {
protected:
bool GetEnableSelectOptimization() { return GetParam(); }
};
TEST_P(BindingsExtInteractionsTest, SelectOptimization) {
CheckedExpr expr;
ASSERT_TRUE(TextFormat::ParseFromString(kFieldSelectTestExpr, &expr));
InterpreterOptions options;
options.enable_empty_wrapper_null_unboxing = true;
options.enable_select_optimization = GetEnableSelectOptimization();
std::unique_ptr<CelExpressionBuilder> builder =
CreateCelExpressionBuilder(options);
ASSERT_OK(builder->GetRegistry()->Register(CreateBindFunction()));
ASSERT_OK(RegisterBuiltinFunctions(builder->GetRegistry()));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder->CreateExpression(&expr));
Arena arena;
Activation activation;
NestedTestAllTypes msg;
msg.mutable_child()->mutable_child()->mutable_payload()->set_single_int64(42);
activation.InsertValue("msg", CelProtoWrapper::CreateMessage(&msg, &arena));
ASSERT_OK_AND_ASSIGN(CelValue out, cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(out.IsInt64());
EXPECT_EQ(out.Int64OrDie(), 42);
}
TEST_P(BindingsExtInteractionsTest, UnknownAttributesSelectOptimization) {
CheckedExpr expr;
ASSERT_TRUE(TextFormat::ParseFromString(kFieldSelectTestExpr, &expr));
InterpreterOptions options;
options.enable_empty_wrapper_null_unboxing = true;
options.unknown_processing = UnknownProcessingOptions::kAttributeOnly;
options.enable_select_optimization = GetEnableSelectOptimization();
std::unique_ptr<CelExpressionBuilder> builder =
CreateCelExpressionBuilder(options);
ASSERT_OK(builder->GetRegistry()->Register(CreateBindFunction()));
ASSERT_OK(RegisterBuiltinFunctions(builder->GetRegistry()));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder->CreateExpression(&expr));
Arena arena;
Activation activation;
activation.set_unknown_attribute_patterns({AttributePattern(
"msg", {AttributeQualifierPattern::OfString("child"),
AttributeQualifierPattern::OfString("child")})});
NestedTestAllTypes msg;
msg.mutable_child()->mutable_child()->mutable_payload()->set_single_int64(42);
activation.InsertValue("msg", CelProtoWrapper::CreateMessage(&msg, &arena));
ASSERT_OK_AND_ASSIGN(CelValue out, cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(out.IsUnknownSet());
EXPECT_THAT(out.UnknownSetOrDie()->unknown_attributes(),
testing::ElementsAre(
Attribute("msg", {AttributeQualifier::OfString("child"),
AttributeQualifier::OfString("child")})));
}
TEST_P(BindingsExtInteractionsTest,
UnknownAttributeSelectOptimizationReturnValue) {
CheckedExpr expr;
ASSERT_TRUE(TextFormat::ParseFromString(kFieldSelectTestExpr, &expr));
InterpreterOptions options;
options.enable_empty_wrapper_null_unboxing = true;
options.unknown_processing = UnknownProcessingOptions::kAttributeOnly;
options.enable_select_optimization = GetEnableSelectOptimization();
std::unique_ptr<CelExpressionBuilder> builder =
CreateCelExpressionBuilder(options);
ASSERT_OK(builder->GetRegistry()->Register(CreateBindFunction()));
ASSERT_OK(RegisterBuiltinFunctions(builder->GetRegistry()));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder->CreateExpression(&expr));
Arena arena;
Activation activation;
activation.set_unknown_attribute_patterns({AttributePattern(
"msg", {AttributeQualifierPattern::OfString("child"),
AttributeQualifierPattern::OfString("child"),
AttributeQualifierPattern::OfString("payload"),
AttributeQualifierPattern::OfString("single_int64")})});
NestedTestAllTypes msg;
msg.mutable_child()->mutable_child()->mutable_payload()->set_single_int64(42);
activation.InsertValue("msg", CelProtoWrapper::CreateMessage(&msg, &arena));
ASSERT_OK_AND_ASSIGN(CelValue out, cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(out.IsUnknownSet()) << out.DebugString();
EXPECT_THAT(out.UnknownSetOrDie()->unknown_attributes(),
testing::ElementsAre(Attribute(
"msg", {AttributeQualifier::OfString("child"),
AttributeQualifier::OfString("child"),
AttributeQualifier::OfString("payload"),
AttributeQualifier::OfString("single_int64")})));
}
TEST_P(BindingsExtInteractionsTest, MissingAttributesSelectOptimization) {
CheckedExpr expr;
ASSERT_TRUE(TextFormat::ParseFromString(kFieldSelectTestExpr, &expr));
InterpreterOptions options;
options.enable_empty_wrapper_null_unboxing = true;
options.enable_missing_attribute_errors = true;
options.enable_select_optimization = GetEnableSelectOptimization();
std::unique_ptr<CelExpressionBuilder> builder =
CreateCelExpressionBuilder(options);
ASSERT_OK(builder->GetRegistry()->Register(CreateBindFunction()));
ASSERT_OK(RegisterBuiltinFunctions(builder->GetRegistry()));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder->CreateExpression(&expr));
Arena arena;
Activation activation;
activation.set_missing_attribute_patterns({AttributePattern(
"msg", {AttributeQualifierPattern::OfString("child"),
AttributeQualifierPattern::OfString("child"),
AttributeQualifierPattern::OfString("payload"),
AttributeQualifierPattern::OfString("single_int64")})});
NestedTestAllTypes msg;
msg.mutable_child()->mutable_child()->mutable_payload()->set_single_int64(42);
activation.InsertValue("msg", CelProtoWrapper::CreateMessage(&msg, &arena));
ASSERT_OK_AND_ASSIGN(CelValue out, cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(out.IsError()) << out.DebugString();
EXPECT_THAT(out.ErrorOrDie()->ToString(),
HasSubstr("msg.child.child.payload.single_int64"));
}
TEST_P(BindingsExtInteractionsTest, UnknownAttribute) {
std::vector<Macro> all_macros = Macro::AllMacros();
std::vector<Macro> bindings_macros = cel::extensions::bindings_macros();
all_macros.insert(all_macros.end(), bindings_macros.begin(),
bindings_macros.end());
ASSERT_OK_AND_ASSIGN(ParsedExpr expr, ParseWithMacros(
R"(
cel.bind(
x,
msg.child.payload.single_int64,
x < 42 || 1 == 1))",
all_macros));
InterpreterOptions options;
options.enable_empty_wrapper_null_unboxing = true;
options.unknown_processing = UnknownProcessingOptions::kAttributeOnly;
options.enable_select_optimization = GetEnableSelectOptimization();
std::unique_ptr<CelExpressionBuilder> builder =
CreateCelExpressionBuilder(options);
ASSERT_OK(builder->GetRegistry()->Register(CreateBindFunction()));
ASSERT_OK(RegisterBuiltinFunctions(builder->GetRegistry()));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder->CreateExpression(
&expr.expr(), &expr.source_info()));
Arena arena;
Activation activation;
activation.set_unknown_attribute_patterns({AttributePattern(
"msg", {AttributeQualifierPattern::OfString("child"),
AttributeQualifierPattern::OfString("payload"),
AttributeQualifierPattern::OfString("single_int64")})});
NestedTestAllTypes msg;
msg.mutable_child()->mutable_child()->mutable_payload()->set_single_int64(42);
activation.InsertValue("msg", CelProtoWrapper::CreateMessage(&msg, &arena));
ASSERT_OK_AND_ASSIGN(CelValue out, cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(out.IsBool()) << out.DebugString();
EXPECT_TRUE(out.BoolOrDie());
}
TEST_P(BindingsExtInteractionsTest, UnknownAttributeReturnValue) {
std::vector<Macro> all_macros = Macro::AllMacros();
std::vector<Macro> bindings_macros = cel::extensions::bindings_macros();
all_macros.insert(all_macros.end(), bindings_macros.begin(),
bindings_macros.end());
ASSERT_OK_AND_ASSIGN(ParsedExpr expr, ParseWithMacros(
R"(
cel.bind(
x,
msg.child.payload.single_int64,
x))",
all_macros));
InterpreterOptions options;
options.enable_empty_wrapper_null_unboxing = true;
options.unknown_processing = UnknownProcessingOptions::kAttributeOnly;
options.enable_select_optimization = GetEnableSelectOptimization();
std::unique_ptr<CelExpressionBuilder> builder =
CreateCelExpressionBuilder(options);
ASSERT_OK(builder->GetRegistry()->Register(CreateBindFunction()));
ASSERT_OK(RegisterBuiltinFunctions(builder->GetRegistry()));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder->CreateExpression(
&expr.expr(), &expr.source_info()));
Arena arena;
Activation activation;
activation.set_unknown_attribute_patterns({AttributePattern(
"msg", {AttributeQualifierPattern::OfString("child"),
AttributeQualifierPattern::OfString("payload"),
AttributeQualifierPattern::OfString("single_int64")})});
NestedTestAllTypes msg;
msg.mutable_child()->mutable_child()->mutable_payload()->set_single_int64(42);
activation.InsertValue("msg", CelProtoWrapper::CreateMessage(&msg, &arena));
ASSERT_OK_AND_ASSIGN(CelValue out, cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(out.IsUnknownSet()) << out.DebugString();
EXPECT_THAT(out.UnknownSetOrDie()->unknown_attributes(),
testing::ElementsAre(Attribute(
"msg", {AttributeQualifier::OfString("child"),
AttributeQualifier::OfString("payload"),
AttributeQualifier::OfString("single_int64")})));
}
TEST_P(BindingsExtInteractionsTest, MissingAttribute) {
std::vector<Macro> all_macros = Macro::AllMacros();
std::vector<Macro> bindings_macros = cel::extensions::bindings_macros();
all_macros.insert(all_macros.end(), bindings_macros.begin(),
bindings_macros.end());
ASSERT_OK_AND_ASSIGN(ParsedExpr expr, ParseWithMacros(
R"(
cel.bind(
x,
msg.child.payload.single_int64,
x < 42 || 1 == 2))",
all_macros));
InterpreterOptions options;
options.enable_empty_wrapper_null_unboxing = true;
options.enable_missing_attribute_errors = true;
options.enable_select_optimization = GetEnableSelectOptimization();
std::unique_ptr<CelExpressionBuilder> builder =
CreateCelExpressionBuilder(options);
ASSERT_OK(builder->GetRegistry()->Register(CreateBindFunction()));
ASSERT_OK(RegisterBuiltinFunctions(builder->GetRegistry()));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder->CreateExpression(
&expr.expr(), &expr.source_info()));
Arena arena;
Activation activation;
activation.set_missing_attribute_patterns({AttributePattern(
"msg", {AttributeQualifierPattern::OfString("child"),
AttributeQualifierPattern::OfString("payload"),
AttributeQualifierPattern::OfString("single_int64")})});
NestedTestAllTypes msg;
msg.mutable_child()->mutable_child()->mutable_payload()->set_single_int64(42);
activation.InsertValue("msg", CelProtoWrapper::CreateMessage(&msg, &arena));
ASSERT_OK_AND_ASSIGN(CelValue out, cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(out.IsError()) << out.DebugString();
EXPECT_THAT(out.ErrorOrDie()->ToString(),
HasSubstr("msg.child.payload.single_int64"));
}
INSTANTIATE_TEST_SUITE_P(BindingsExtInteractionsTest,
BindingsExtInteractionsTest,
testing::Bool());
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/extensions/bindings_ext.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/extensions/bindings_ext_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
def39d7c-185b-4df3-ab96-0c863ee2acb0 | cpp | abseil/abseil-cpp | hash_policy_traits | absl/container/internal/hash_policy_traits.h | absl/container/internal/hash_policy_traits_test.cc | #ifndef ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_
#define ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_
#include <cstddef>
#include <memory>
#include <new>
#include <type_traits>
#include <utility>
#include "absl/container/internal/common_policy_traits.h"
#include "absl/meta/type_traits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
template <class Policy, class = void>
struct hash_policy_traits : common_policy_traits<Policy> {
using key_type = typename Policy::key_type;
private:
struct ReturnKey {
#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
template <class Key,
absl::enable_if_t<std::is_lvalue_reference<Key>::value, int> = 0>
static key_type& Impl(Key&& k, int) {
return *std::launder(
const_cast<key_type*>(std::addressof(std::forward<Key>(k))));
}
#endif
template <class Key>
static Key Impl(Key&& k, char) {
return std::forward<Key>(k);
}
template <class Key, class... Args>
auto operator()(Key&& k, const Args&...) const
-> decltype(Impl(std::forward<Key>(k), 0)) {
return Impl(std::forward<Key>(k), 0);
}
};
template <class P = Policy, class = void>
struct ConstantIteratorsImpl : std::false_type {};
template <class P>
struct ConstantIteratorsImpl<P, absl::void_t<typename P::constant_iterators>>
: P::constant_iterators {};
public:
using slot_type = typename Policy::slot_type;
using init_type = typename Policy::init_type;
using reference = decltype(Policy::element(std::declval<slot_type*>()));
using pointer = typename std::remove_reference<reference>::type*;
using value_type = typename std::remove_reference<reference>::type;
using constant_iterators = ConstantIteratorsImpl<>;
template <class P = Policy>
static size_t space_used(const slot_type* slot) {
return P::space_used(slot);
}
template <class F, class... Ts, class P = Policy>
static auto apply(F&& f, Ts&&... ts)
-> decltype(P::apply(std::forward<F>(f), std::forward<Ts>(ts)...)) {
return P::apply(std::forward<F>(f), std::forward<Ts>(ts)...);
}
template <class P = Policy>
static auto mutable_key(slot_type* slot)
-> decltype(P::apply(ReturnKey(), hash_policy_traits::element(slot))) {
return P::apply(ReturnKey(), hash_policy_traits::element(slot));
}
template <class T, class P = Policy>
static auto value(T* elem) -> decltype(P::value(elem)) {
return P::value(elem);
}
using HashSlotFn = size_t (*)(const void* hash_fn, void* slot);
template <class Hash>
static constexpr HashSlotFn get_hash_slot_fn() {
#if defined(__GNUC__) && !defined(__clang__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Waddress"
#endif
return Policy::template get_hash_slot_fn<Hash>() == nullptr
? &hash_slot_fn_non_type_erased<Hash>
: Policy::template get_hash_slot_fn<Hash>();
#if defined(__GNUC__) && !defined(__clang__)
#pragma GCC diagnostic pop
#endif
}
static constexpr bool soo_enabled() { return soo_enabled_impl(Rank1{}); }
private:
template <class Hash>
struct HashElement {
template <class K, class... Args>
size_t operator()(const K& key, Args&&...) const {
return h(key);
}
const Hash& h;
};
template <class Hash>
static size_t hash_slot_fn_non_type_erased(const void* hash_fn, void* slot) {
return Policy::apply(HashElement<Hash>{*static_cast<const Hash*>(hash_fn)},
Policy::element(static_cast<slot_type*>(slot)));
}
struct Rank0 {};
struct Rank1 : Rank0 {};
template <class P = Policy>
static constexpr auto soo_enabled_impl(Rank1) -> decltype(P::soo_enabled()) {
return P::soo_enabled();
}
static constexpr bool soo_enabled_impl(Rank0) { return true; }
};
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/container/internal/hash_policy_traits.h"
#include <cstddef>
#include <functional>
#include <memory>
#include <new>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/internal/container_memory.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
namespace {
using ::testing::MockFunction;
using ::testing::Return;
using ::testing::ReturnRef;
using Alloc = std::allocator<int>;
using Slot = int;
struct PolicyWithoutOptionalOps {
using slot_type = Slot;
using key_type = Slot;
using init_type = Slot;
static std::function<Slot&(Slot*)> element;
static int apply(int v) { return apply_impl(v); }
static std::function<int(int)> apply_impl;
static std::function<Slot&(Slot*)> value;
template <class Hash>
static constexpr HashSlotFn get_hash_slot_fn() {
return nullptr;
}
};
std::function<int(int)> PolicyWithoutOptionalOps::apply_impl;
std::function<Slot&(Slot*)> PolicyWithoutOptionalOps::value;
struct Test : ::testing::Test {
Test() {
PolicyWithoutOptionalOps::apply_impl = [&](int a1) -> int {
return apply.Call(a1);
};
PolicyWithoutOptionalOps::value = [&](Slot* a1) -> Slot& {
return value.Call(a1);
};
}
std::allocator<int> alloc;
int a = 53;
MockFunction<int(int)> apply;
MockFunction<Slot&(Slot*)> value;
};
TEST_F(Test, apply) {
EXPECT_CALL(apply, Call(42)).WillOnce(Return(1337));
EXPECT_EQ(1337, (hash_policy_traits<PolicyWithoutOptionalOps>::apply(42)));
}
TEST_F(Test, value) {
int b = 0;
EXPECT_CALL(value, Call(&a)).WillOnce(ReturnRef(b));
EXPECT_EQ(&b, &hash_policy_traits<PolicyWithoutOptionalOps>::value(&a));
}
struct Hash {
size_t operator()(Slot a) const { return static_cast<size_t>(a) * 5; }
};
struct PolicyNoHashFn {
using slot_type = Slot;
using key_type = Slot;
using init_type = Slot;
static size_t* apply_called_count;
static Slot& element(Slot* slot) { return *slot; }
template <typename Fn>
static size_t apply(const Fn& fn, int v) {
++(*apply_called_count);
return fn(v);
}
template <class Hash>
static constexpr HashSlotFn get_hash_slot_fn() {
return nullptr;
}
};
size_t* PolicyNoHashFn::apply_called_count;
struct PolicyCustomHashFn : PolicyNoHashFn {
template <class Hash>
static constexpr HashSlotFn get_hash_slot_fn() {
return &TypeErasedApplyToSlotFn<Hash, int>;
}
};
TEST(HashTest, PolicyNoHashFn_get_hash_slot_fn) {
size_t apply_called_count = 0;
PolicyNoHashFn::apply_called_count = &apply_called_count;
Hash hasher;
Slot value = 7;
auto* fn = hash_policy_traits<PolicyNoHashFn>::get_hash_slot_fn<Hash>();
EXPECT_NE(fn, nullptr);
EXPECT_EQ(fn(&hasher, &value), hasher(value));
EXPECT_EQ(apply_called_count, 1);
}
TEST(HashTest, PolicyCustomHashFn_get_hash_slot_fn) {
size_t apply_called_count = 0;
PolicyNoHashFn::apply_called_count = &apply_called_count;
Hash hasher;
Slot value = 7;
auto* fn = hash_policy_traits<PolicyCustomHashFn>::get_hash_slot_fn<Hash>();
EXPECT_EQ(fn, PolicyCustomHashFn::get_hash_slot_fn<Hash>());
EXPECT_EQ(fn(&hasher, &value), hasher(value));
EXPECT_EQ(apply_called_count, 0);
}
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/container/internal/hash_policy_traits.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/container/internal/hash_policy_traits_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
8cd340ab-7141-46df-92ad-c49c451a84d9 | cpp | google/arolla | reader | arolla/io/proto/reflection/reader.cc | arolla/io/proto/reflection/reader_test.cc | #include "arolla/io/proto/reflection/reader.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <type_traits>
#include <utility>
#include <variant>
#include <vector>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/reflection.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/io/proto_types/types.h"
#include "arolla/memory/buffer.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/bytes.h"
#include "arolla/util/text.h"
#include "arolla/util/status_macros_backport.h"
namespace {
using ::google::protobuf::FieldDescriptor;
using ::google::protobuf::Message;
using ::google::protobuf::Reflection;
using ::absl::StatusOr;
using ::arolla::Bytes;
using ::arolla::FrameLayout;
using ::arolla::FramePtr;
using ::arolla::GetDenseArrayQType;
using ::arolla::GetOptionalQType;
using ::arolla::GetQType;
using ::arolla::OptionalValue;
using ::arolla::QTypePtr;
using ::arolla::Text;
using ::arolla::TypedSlot;
using ::arolla::DenseArrayShape;
using ::arolla::proto::arolla_size_t;
using ::arolla::proto::ProtoFieldAccessInfo;
using ::arolla::proto::ProtoTypeReader;
using ::arolla::proto::RegularFieldAccess;
using ::arolla::proto::RepeatedFieldAccess;
using ::arolla::proto::RepeatedFieldIndexAccess;
using ::arolla::proto::RepeatedFieldSizeAccess;
using ::arolla::proto::StringFieldType;
using ReadValueFn = std::function<void(const Message&, void*)>;
template <class T, class ProtoGetFn>
struct ByIndexReader {
void operator()(const Message& m, void* res_void) const {
auto* res = reinterpret_cast<OptionalValue<T>*>(res_void);
const auto* ref = m.GetReflection();
if (access_info.idx < ref->FieldSize(m, field)) {
*res = static_cast<T>(
getter.GetFromRepeated(ref, m, field, access_info.idx));
} else {
*res = std::nullopt;
}
}
const FieldDescriptor* field;
RepeatedFieldIndexAccess access_info;
ProtoGetFn getter;
};
template <class T, class ProtoGetFn>
struct FieldReader {
void operator()(const Message& m, void* res_void) const {
auto* res = reinterpret_cast<OptionalValue<T>*>(res_void);
const auto* ref = m.GetReflection();
if (ref->HasField(m, field)) {
*res = static_cast<T>(getter.GetSingular(ref, m, field));
} else {
*res = std::nullopt;
}
}
const FieldDescriptor* field;
ProtoGetFn getter;
};
using PushbackFn = std::function<void(const Message&, void*)>;
template <class T, class ProtoGetFn>
struct ManyPushBackFn {
void operator()(const Message& m, void* res_void) const {
auto* res = reinterpret_cast<std::vector<OptionalValue<T>>*>(res_void);
const auto* ref = m.GetReflection();
for (const auto& val : getter.GetRepeated(ref, m, field)) {
res->push_back(static_cast<T>(val));
}
}
const FieldDescriptor* field;
ProtoGetFn getter;
};
struct SizePushBackFn {
void operator()(const Message& m, void* res_void) const {
auto* res = reinterpret_cast<std::vector<arolla_size_t>*>(res_void);
const auto* ref = m.GetReflection();
res->push_back(ref->FieldSize(m, field));
}
const FieldDescriptor* field;
};
struct SizeToShapeFn {
void operator()(const Message& m, void* res_void) const {
auto* res = reinterpret_cast<DenseArrayShape*>(res_void);
const auto* ref = m.GetReflection();
res->size = ref->FieldSize(m, field);
}
const FieldDescriptor* field;
};
template <class ResultT>
struct SinglePushBackFn {
void operator()(const Message& m, void* res_void) const {
auto* res =
reinterpret_cast<std::vector<OptionalValue<ResultT>>*>(res_void);
res->emplace_back();
get_fn(m, &res->back());
}
ReadValueFn get_fn;
};
#define PROTO_GETTER_OBJ(TYPE, CPP_TYPE) \
struct PFG##TYPE { \
auto GetSingular(const Reflection* ref, const Message& m, \
const FieldDescriptor* field) const { \
return ref->Get##TYPE(m, field); \
} \
auto GetFromRepeated(const Reflection* ref, const Message& m, \
const FieldDescriptor* field, int index) const { \
return ref->GetRepeated##TYPE(m, field, index); \
} \
auto GetRepeated(const Reflection* ref, const Message& m, \
const FieldDescriptor* field) const { \
return ref->GetRepeatedFieldRef<CPP_TYPE>(m, field); \
} \
}; \
constexpr auto kProtoGetter##TYPE = PFG##TYPE{};
PROTO_GETTER_OBJ(Int32, int32_t);
PROTO_GETTER_OBJ(Int64, int64_t);
PROTO_GETTER_OBJ(UInt32, uint32_t);
PROTO_GETTER_OBJ(UInt64, uint64_t);
PROTO_GETTER_OBJ(Float, float);
PROTO_GETTER_OBJ(Double, double);
PROTO_GETTER_OBJ(Bool, bool);
PROTO_GETTER_OBJ(String, std::string);
PROTO_GETTER_OBJ(EnumValue, int32_t);
#undef PROTO_GETTER_OBJ
absl::Status CheckAccessInfo(const FieldDescriptor* field,
const ProtoFieldAccessInfo& info,
bool allow_repeated, bool is_last) {
if (field == nullptr) {
return absl::FailedPreconditionError(
"field is nullptr (incorrect name passed into FindFieldByName?)");
}
if (field->is_repeated()) {
if (std::holds_alternative<RepeatedFieldIndexAccess>(info)) {
return absl::OkStatus();
}
if (allow_repeated && std::holds_alternative<RepeatedFieldAccess>(info)) {
return absl::OkStatus();
}
if (allow_repeated && is_last &&
std::holds_alternative<RepeatedFieldSizeAccess>(info)) {
return absl::OkStatus();
}
return absl::FailedPreconditionError(absl::StrCat(
"incorrect access to the repeated field: ", field->full_name()));
} else {
if (!std::holds_alternative<RegularFieldAccess>(info)) {
return absl::FailedPreconditionError(absl::StrCat(
"incorrect access to the regular field: ", field->full_name()));
}
}
return absl::OkStatus();
}
class Traverser {
public:
Traverser(std::vector<const FieldDescriptor*> fields,
std::vector<ProtoFieldAccessInfo> access_infos)
: fields_(std::move(fields)), access_infos_(std::move(access_infos)) {
DCHECK_EQ(fields_.size(), access_infos_.size());
}
const Message* GetLastSubMessage(const Message& m) const {
const Message* current_message = &m;
for (size_t i = 0; i != fields_.size(); ++i) {
current_message = GetSubMessage(*current_message, i);
if (current_message == nullptr) {
return nullptr;
}
}
return current_message;
}
void TraverseSubmessages(const Message& m, PushbackFn callback,
void* res) const {
using IndexedMessage = std::pair<const Message*,
size_t>;
std::vector<IndexedMessage> stack;
stack.emplace_back(&m, 0);
while (!stack.empty()) {
auto [current_message, i] = stack.back();
stack.pop_back();
if (i != fields_.size()) {
size_t end_id = stack.size();
const auto* field = fields_[i];
const auto& access_info = access_infos_[i];
DCHECK(!std::holds_alternative<RepeatedFieldSizeAccess>(access_info));
if (std::holds_alternative<RepeatedFieldAccess>(access_info)) {
const auto* ref = m.GetReflection();
for (const Message& sub_message :
ref->GetRepeatedFieldRef<Message>(m, field)) {
stack.emplace_back(&sub_message, i + 1);
}
} else {
stack.emplace_back(GetSubMessage(m, i), i + 1);
}
std::reverse(stack.begin() + end_id, stack.end());
} else {
callback(*current_message, res);
}
}
}
private:
const Message* GetSubMessage(const Message& m, int i) const {
const auto* field = fields_[i];
const auto& access_info = access_infos_[i];
DCHECK(!std::holds_alternative<RepeatedFieldAccess>(access_info));
const auto* ref = m.GetReflection();
if (field->is_repeated()) {
auto& access = *std::get_if<RepeatedFieldIndexAccess>(&access_info);
if (access.idx < ref->FieldSize(m, field)) {
return &ref->GetRepeatedMessage(m, field, access.idx);
} else {
return nullptr;
}
} else {
if (ref->HasField(m, field)) {
return &ref->GetMessage(m, field);
} else {
return nullptr;
}
}
}
std::vector<const FieldDescriptor*> fields_;
std::vector<ProtoFieldAccessInfo> access_infos_;
};
template <class T>
struct OptionalReader {
void operator()(const Message& m, FramePtr frame) const {
const Message* last_message = traverser.GetLastSubMessage(m);
if (last_message == nullptr) {
frame.Set(slot, {});
} else {
get_fn(*last_message, frame.GetMutable(slot));
}
}
Traverser traverser;
FrameLayout::Slot<OptionalValue<T>> slot;
ReadValueFn get_fn;
};
template <class T>
struct OptionalReaderFactory {
absl::StatusOr<ProtoTypeReader::BoundReadFn> operator()(
TypedSlot typed_slot) const {
ASSIGN_OR_RETURN(auto slot, typed_slot.ToSlot<OptionalValue<T>>());
return OptionalReader<T>{traverser, slot, get_fn};
}
Traverser traverser;
ReadValueFn get_fn;
};
struct ArraySizeReader {
void operator()(const Message& m, FramePtr frame) const {
std::vector<arolla_size_t> res;
traverser.TraverseSubmessages(m, last_push_back_fn, &res);
frame.Set(slot,
::arolla::DenseArray<arolla_size_t>{
::arolla::Buffer<arolla_size_t>::Create(std::move(res))});
}
Traverser traverser;
FrameLayout::Slot<::arolla::DenseArray<arolla_size_t>> slot;
PushbackFn last_push_back_fn;
};
struct ArraySizeReaderFactory {
absl::StatusOr<ProtoTypeReader::BoundReadFn> operator()(
TypedSlot typed_slot) const {
ASSIGN_OR_RETURN(auto slot,
typed_slot.ToSlot<::arolla::DenseArray<arolla_size_t>>());
return ArraySizeReader{traverser, slot, SizePushBackFn{last_field}};
}
Traverser traverser;
const FieldDescriptor* last_field;
};
struct ShapeSizeReader {
void operator()(const Message& m, FramePtr frame) const {
DenseArrayShape res;
traverser.TraverseSubmessages(m, last_push_back_fn, &res);
frame.Set(slot, res);
}
Traverser traverser;
FrameLayout::Slot<::arolla::DenseArrayShape> slot;
PushbackFn last_push_back_fn;
};
struct ShapeSizeReaderFactory {
absl::StatusOr<ProtoTypeReader::BoundReadFn> operator()(
TypedSlot typed_slot) const {
ASSIGN_OR_RETURN(auto slot, typed_slot.ToSlot<::arolla::DenseArrayShape>());
return ShapeSizeReader{traverser, slot, SizeToShapeFn{last_field}};
}
Traverser traverser;
const FieldDescriptor* last_field;
};
template <class T>
struct DenseArrayReader {
void operator()(const Message& m, FramePtr frame) const {
std::vector<OptionalValue<T>> res;
traverser.TraverseSubmessages(m, last_push_back_fn, &res);
frame.Set(slot, ::arolla::CreateDenseArray<T>(res));
}
Traverser traverser;
FrameLayout::Slot<::arolla::DenseArray<T>> slot;
PushbackFn last_push_back_fn;
};
template <class T>
struct DenseArrayReaderFactory {
absl::StatusOr<ProtoTypeReader::BoundReadFn> operator()(
TypedSlot typed_slot) const {
ASSIGN_OR_RETURN(auto slot, typed_slot.ToSlot<::arolla::DenseArray<T>>());
return DenseArrayReader<T>{traverser, slot, last_push_back_fn};
}
Traverser traverser;
PushbackFn last_push_back_fn;
};
template <class CallBackFn>
auto SwitchByProtoType(FieldDescriptor::Type type, CallBackFn callback,
StringFieldType string_type)
-> decltype(std::declval<CallBackFn>()(std::decay<int32_t>(),
kProtoGetterInt32)) {
switch (type) {
case FieldDescriptor::TYPE_INT32:
case FieldDescriptor::TYPE_SINT32:
case FieldDescriptor::TYPE_SFIXED32:
return callback(std::decay<int32_t>{}, kProtoGetterInt32);
case FieldDescriptor::TYPE_INT64:
case FieldDescriptor::TYPE_SINT64:
case FieldDescriptor::TYPE_SFIXED64:
return callback(std::decay<int64_t>{}, kProtoGetterInt64);
case FieldDescriptor::TYPE_UINT32:
case FieldDescriptor::TYPE_FIXED32:
return callback(std::decay<int64_t>{}, kProtoGetterUInt32);
case FieldDescriptor::TYPE_UINT64:
case FieldDescriptor::TYPE_FIXED64:
return callback(std::decay<uint64_t>{}, kProtoGetterUInt64);
case FieldDescriptor::TYPE_DOUBLE:
return callback(std::decay<double>{}, kProtoGetterDouble);
case FieldDescriptor::TYPE_FLOAT:
return callback(std::decay<float>{}, kProtoGetterFloat);
case FieldDescriptor::TYPE_BOOL:
return callback(std::decay<bool>{}, kProtoGetterBool);
case FieldDescriptor::TYPE_STRING: {
switch (string_type) {
case StringFieldType::kText:
return callback(std::decay<Text>{}, kProtoGetterString);
case StringFieldType::kBytes:
return callback(std::decay<Bytes>{}, kProtoGetterString);
}
}
case FieldDescriptor::TYPE_BYTES:
return callback(std::decay<Bytes>{}, kProtoGetterString);
case FieldDescriptor::TYPE_ENUM:
return callback(std::decay<int32_t>{}, kProtoGetterEnumValue);
default:
return absl::FailedPreconditionError(
absl::StrCat("type ", type, " is not supported"));
}
}
absl::Status VerifyFieldsAndAccessInfos(
absl::Span<const FieldDescriptor* const> fields,
const std::vector<ProtoFieldAccessInfo>& access_infos,
bool allow_repeated = false) {
if (fields.empty()) {
return absl::FailedPreconditionError("fields must be non empty");
}
if (fields.size() != access_infos.size()) {
return absl::FailedPreconditionError(
"fields and access_info must be same size if access_info is not empty");
}
for (size_t i = 0; i != fields.size(); ++i) {
RETURN_IF_ERROR(CheckAccessInfo(fields[i], access_infos[i], allow_repeated,
i + 1 == fields.size()));
}
return absl::OkStatus();
}
class OptionalReaderCallback {
public:
OptionalReaderCallback(absl::Span<const FieldDescriptor* const> fields,
absl::Span<const ProtoFieldAccessInfo> access_infos)
: fields_(fields),
access_infos_(access_infos),
traverser_(
std::vector(fields_.begin(), fields_.end() - 1),
std::vector(access_infos_.begin(), access_infos_.end() - 1)) {}
template <class ResultMetaFn, class ProtoFieldGetter>
absl::StatusOr<std::unique_ptr<ProtoTypeReader>> operator()(
ResultMetaFn, ProtoFieldGetter last_field_getter) const {
using ResultT = typename ResultMetaFn::type;
ProtoFieldAccessInfo last_access_info = access_infos_.back();
const FieldDescriptor* last_field = fields_.back();
ReadValueFn read_fn;
if (last_field->is_repeated()) {
DCHECK(
std::holds_alternative<RepeatedFieldIndexAccess>(last_access_info));
read_fn = ByIndexReader<ResultT, ProtoFieldGetter>{
last_field, *std::get_if<RepeatedFieldIndexAccess>(&last_access_info),
last_field_getter};
} else {
read_fn =
FieldReader<ResultT, ProtoFieldGetter>{last_field, last_field_getter};
}
return std::make_unique<ProtoTypeReader>(
GetOptionalQType<ResultT>(),
OptionalReaderFactory<ResultT>{traverser_, read_fn});
}
absl::StatusOr<std::unique_ptr<ProtoTypeReader>> CreateSizeAccessor() const {
ProtoFieldAccessInfo last_access_info = access_infos_.back();
if (!std::holds_alternative<RepeatedFieldSizeAccess>(last_access_info)) {
return absl::InternalError("size accessor creation expected");
}
const FieldDescriptor* last_field = fields_.back();
return std::make_unique<ProtoTypeReader>(
GetQType<DenseArrayShape>(),
ShapeSizeReaderFactory{traverser_, last_field});
}
private:
absl::Span<const FieldDescriptor* const> fields_;
absl::Span<const ProtoFieldAccessInfo> access_infos_;
Traverser traverser_;
};
class DenseArrayReaderCallback {
public:
DenseArrayReaderCallback(absl::Span<const FieldDescriptor* const> fields,
absl::Span<const ProtoFieldAccessInfo> access_infos)
: fields_(fields),
access_infos_(access_infos),
traverser_(
std::vector(fields_.begin(), fields_.end() - 1),
std::vector(access_infos_.begin(), access_infos_.end() - 1)) {}
absl::StatusOr<std::unique_ptr<ProtoTypeReader>> CreateSizeAccessor() const {
ProtoFieldAccessInfo last_access_info = access_infos_.back();
if (!std::holds_alternative<RepeatedFieldSizeAccess>(last_access_info)) {
return absl::InternalError("size accessor creation expected");
}
const FieldDescriptor* last_field = fields_.back();
return std::make_unique<ProtoTypeReader>(
GetDenseArrayQType<arolla_size_t>(),
ArraySizeReaderFactory{traverser_, last_field});
}
template <class ResultMetaFn, class ProtoFieldGetter>
absl::StatusOr<std::unique_ptr<ProtoTypeReader>> operator()(
ResultMetaFn, ProtoFieldGetter last_field_getter) const {
using ResultT = typename ResultMetaFn::type;
using DenseArrayResultT = ::arolla::DenseArray<ResultT>;
ProtoFieldAccessInfo last_access_info = access_infos_.back();
const FieldDescriptor* last_field = fields_.back();
PushbackFn pb_fn;
if (std::holds_alternative<RepeatedFieldAccess>(last_access_info)) {
pb_fn = ManyPushBackFn<ResultT, ProtoFieldGetter>{last_field,
last_field_getter};
} else if (std::holds_alternative<RepeatedFieldSizeAccess>(
last_access_info)) {
return absl::InternalError(
"size accessor must be created with CreateSizeAccessor");
} else if (last_field->is_repeated()) {
DCHECK(
std::holds_alternative<RepeatedFieldIndexAccess>(last_access_info));
pb_fn =
SinglePushBackFn<ResultT>{ByIndexReader<ResultT, ProtoFieldGetter>{
last_field,
*std::get_if<RepeatedFieldIndexAccess>(&last_access_info),
last_field_getter}};
} else {
pb_fn = SinglePushBackFn<ResultT>{FieldReader<ResultT, ProtoFieldGetter>{
last_field, last_field_getter}};
}
return std::make_unique<ProtoTypeReader>(
GetQType<DenseArrayResultT>(),
DenseArrayReaderFactory<ResultT>{traverser_, pb_fn});
}
private:
absl::Span<const FieldDescriptor* const> fields_;
absl::Span<const ProtoFieldAccessInfo> access_infos_;
Traverser traverser_;
};
}
namespace arolla::proto {
absl::StatusOr<std::unique_ptr<ProtoTypeReader>>
ProtoTypeReader::CreateOptionalReader(
absl::Span<const FieldDescriptor* const> fields,
std::vector<ProtoFieldAccessInfo> access_infos,
proto::StringFieldType string_type) {
RETURN_IF_ERROR(VerifyFieldsAndAccessInfos(fields, access_infos));
const FieldDescriptor* last_field = fields.back();
return SwitchByProtoType(
last_field->type(),
OptionalReaderCallback(fields, std::move(access_infos)), string_type);
}
absl::StatusOr<std::unique_ptr<ProtoTypeReader>>
ProtoTypeReader::CreateDenseArrayShapeReader(
absl::Span<const FieldDescriptor* const> fields,
std::vector<ProtoFieldAccessInfo> access_infos,
proto::StringFieldType string_type) {
RETURN_IF_ERROR(VerifyFieldsAndAccessInfos(fields, access_infos,
true));
return OptionalReaderCallback(fields, std::move(access_infos))
.CreateSizeAccessor();
}
absl::StatusOr<std::unique_ptr<ProtoTypeReader>>
ProtoTypeReader::CreateDenseArrayReader(
absl::Span<const FieldDescriptor* const> fields,
std::vector<ProtoFieldAccessInfo> access_infos,
proto::StringFieldType string_type) {
RETURN_IF_ERROR(VerifyFieldsAndAccessInfos(fields, access_infos,
true));
const FieldDescriptor* last_field = fields.back();
auto last_access = access_infos.back();
DenseArrayReaderCallback callback(fields, std::move(access_infos));
if (std::holds_alternative<proto::RepeatedFieldSizeAccess>(last_access)) {
return callback.CreateSizeAccessor();
} else {
return SwitchByProtoType(last_field->type(), callback, string_type);
}
}
} | #include "arolla/io/proto/reflection/reader.h"
#include <cstddef>
#include <cstdint>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "google/protobuf/descriptor.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/io/proto_types/types.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/memory/optional_value.h"
#include "arolla/proto/testing/test.pb.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/bytes.h"
#include "arolla/util/text.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::proto {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::testing::ElementsAre;
using ::testing::IsEmpty;
using ProtoRoot = ::testing_namespace::Root;
const auto* kRootDescr = ProtoRoot::descriptor();
auto BuildDescriptorSequence(const std::vector<std::string>& field_names) {
std::vector<const google::protobuf::FieldDescriptor*> fields;
const google::protobuf::Descriptor* root_descriptor = kRootDescr;
for (const auto& name : field_names) {
CHECK(root_descriptor != nullptr)
<< "incorrect test fields: " << absl::StrJoin(field_names, ",");
const google::protobuf::FieldDescriptor* field_descriptor =
root_descriptor->FindFieldByName(name);
fields.push_back(field_descriptor);
root_descriptor = field_descriptor->message_type();
}
return fields;
}
template <class T>
absl::StatusOr<T> ReadValue(const ProtoTypeReader& reader,
const google::protobuf::Message& m, T garbage = T{}) {
if (reader.qtype() != GetQType<T>()) {
return absl::FailedPreconditionError(
absl::StrFormat("QType mismatch: expected %s, found %s",
GetQType<T>()->name(), reader.qtype()->name()));
}
FrameLayout::Builder layout_builder;
auto slot = layout_builder.AddSlot<T>();
ASSIGN_OR_RETURN(auto read_fn, reader.BindReadFn(TypedSlot::FromSlot(slot)));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
FramePtr frame = alloc.frame();
frame.Set(slot, garbage);
read_fn(m, frame);
return frame.Get(slot);
}
template <class T>
absl::StatusOr<OptionalValue<T>> ReadOptionalValue(
const ProtoTypeReader& reader, const google::protobuf::Message& m) {
return ReadValue<OptionalValue<T>>(reader, m,
T{});
}
template <class T>
absl::StatusOr<OptionalValue<T>> ReadOptionalTopLevelValue(
const std::string& field_name, const google::protobuf::Message& m) {
ASSIGN_OR_RETURN(auto reader,
ProtoTypeReader::CreateOptionalReader(
{kRootDescr->FindFieldByName(field_name)}, {{}}));
return ReadValue<OptionalValue<T>>(*reader, m);
}
TEST(TopLevelOptionalReaderTest, All) {
::testing_namespace::Root m;
EXPECT_THAT(ReadOptionalTopLevelValue<int32_t>("x", m),
IsOkAndHolds(std::nullopt));
m.set_x(19);
EXPECT_THAT(ReadOptionalTopLevelValue<int32_t>("x", m), IsOkAndHolds(19));
m.set_x_enum(ProtoRoot::SECOND_VALUE);
EXPECT_THAT(ReadOptionalTopLevelValue<int32_t>("x_enum", m),
IsOkAndHolds(ProtoRoot::SECOND_VALUE));
m.set_str("19");
EXPECT_THAT(ReadOptionalTopLevelValue<Text>("str", m),
IsOkAndHolds(Text{"19"}));
m.set_raw_bytes("19");
EXPECT_THAT(ReadOptionalTopLevelValue<Bytes>("raw_bytes", m),
IsOkAndHolds(Bytes{"19"}));
m.set_x_int64(19);
EXPECT_THAT(ReadOptionalTopLevelValue<int64_t>("x_int64", m),
IsOkAndHolds(19));
m.set_x_uint32(19);
EXPECT_THAT(ReadOptionalTopLevelValue<int64_t>("x_uint32", m),
IsOkAndHolds(19));
m.set_x_uint64(19);
EXPECT_THAT(ReadOptionalTopLevelValue<uint64_t>("x_uint64", m),
IsOkAndHolds(19));
m.set_x_float(19.0f);
EXPECT_THAT(ReadOptionalTopLevelValue<float>("x_float", m),
IsOkAndHolds(19.0f));
m.set_x_double(19.0);
EXPECT_THAT(ReadOptionalTopLevelValue<double>("x_double", m),
IsOkAndHolds(19.0));
m.set_x_fixed64(19);
EXPECT_THAT(ReadOptionalTopLevelValue<uint64_t>("x_fixed64", m),
IsOkAndHolds(19));
{
ASSERT_OK_AND_ASSIGN(auto reader,
ProtoTypeReader::CreateOptionalReader(
{kRootDescr->FindFieldByName("raw_bytes")}, {{}},
StringFieldType::kBytes));
m.set_raw_bytes("19");
EXPECT_THAT(ReadOptionalValue<Bytes>(*reader, m),
IsOkAndHolds(Bytes("19")));
}
{
ASSERT_OK_AND_ASSIGN(auto reader, ProtoTypeReader::CreateOptionalReader(
{kRootDescr->FindFieldByName("str")},
{{}}, StringFieldType::kBytes));
m.set_str("19");
EXPECT_THAT(ReadOptionalValue<Bytes>(*reader, m),
IsOkAndHolds(Bytes("19")));
}
}
template <class T>
absl::StatusOr<::arolla::DenseArray<T>> ReadDenseArrayValue(
const ProtoTypeReader& reader, const google::protobuf::Message& m) {
return ReadValue<::arolla::DenseArray<T>>(
reader, m,
::arolla::CreateDenseArray<T>({T{}, T{}}));
}
template <class T>
absl::StatusOr<::arolla::DenseArray<T>> ReadDenseArrayValue(
const std::vector<std::string>& field_names,
std::vector<ProtoFieldAccessInfo> access_infos, const google::protobuf::Message& m) {
ASSIGN_OR_RETURN(auto reader,
ProtoTypeReader::CreateDenseArrayReader(
BuildDescriptorSequence(field_names), access_infos));
return ReadDenseArrayValue<T>(*reader, m);
}
TEST(ProtoTypeReader, CreateTopLevelDenseArrayReaderNonRepeatedField) {
::testing_namespace::Root m;
EXPECT_THAT(ReadDenseArrayValue<int32_t>({"x"}, {{}}, m),
IsOkAndHolds(ElementsAre(std::nullopt)));
m.set_x(19);
EXPECT_THAT(ReadDenseArrayValue<int32_t>({"x"}, {{}}, m),
IsOkAndHolds(ElementsAre(19)));
m.set_str("19");
EXPECT_THAT(ReadDenseArrayValue<Text>({"str"}, {{}}, m),
IsOkAndHolds(ElementsAre("19")));
m.set_raw_bytes("19");
EXPECT_THAT(ReadDenseArrayValue<Bytes>({"raw_bytes"}, {{}}, m),
IsOkAndHolds(ElementsAre("19")));
m.set_x_int64(19);
EXPECT_THAT(ReadDenseArrayValue<int64_t>({"x_int64"}, {{}}, m),
IsOkAndHolds(ElementsAre(19)));
m.set_x_uint32(19);
EXPECT_THAT(ReadDenseArrayValue<int64_t>({"x_uint32"}, {{}}, m),
IsOkAndHolds(ElementsAre(19)));
m.set_x_uint64(19);
EXPECT_THAT(ReadDenseArrayValue<uint64_t>({"x_uint64"}, {{}}, m),
IsOkAndHolds(ElementsAre(19)));
m.set_x_float(19.0f);
EXPECT_THAT(ReadDenseArrayValue<float>({"x_float"}, {{}}, m),
IsOkAndHolds(ElementsAre(19.0f)));
m.set_x_double(19.0);
EXPECT_THAT(ReadDenseArrayValue<double>({"x_double"}, {{}}, m),
IsOkAndHolds(ElementsAre(19.0)));
{
ASSERT_OK_AND_ASSIGN(auto reader,
ProtoTypeReader::CreateDenseArrayReader(
{kRootDescr->FindFieldByName("raw_bytes")}, {{}},
StringFieldType::kBytes));
m.set_raw_bytes("19");
EXPECT_THAT(ReadDenseArrayValue<Bytes>(*reader, m),
IsOkAndHolds(ElementsAre("19")));
}
{
ASSERT_OK_AND_ASSIGN(auto reader, ProtoTypeReader::CreateDenseArrayReader(
{kRootDescr->FindFieldByName("str")},
{{}}, StringFieldType::kBytes));
m.set_str("19");
EXPECT_THAT(ReadDenseArrayValue<Bytes>(*reader, m),
IsOkAndHolds(ElementsAre("19")));
}
}
template <class T>
absl::StatusOr<OptionalValue<T>> ReadOptionalValue(
const std::vector<std::string>& field_names,
std::vector<ProtoFieldAccessInfo> access_infos, const google::protobuf::Message& m) {
std::vector<const google::protobuf::FieldDescriptor*> fields =
BuildDescriptorSequence(field_names);
ASSIGN_OR_RETURN(auto reader,
ProtoTypeReader::CreateOptionalReader(fields, access_infos));
return ReadValue<OptionalValue<T>>(*reader, m);
}
template <class T>
absl::StatusOr<OptionalValue<T>> ReadOptionalValue(
const std::vector<std::string>& field_names, const google::protobuf::Message& m) {
return ReadOptionalValue<T>(
field_names, std::vector<ProtoFieldAccessInfo>(field_names.size()), m);
}
TEST(ProtoTypeReader, CreateInnerOptionalReader) {
::testing_namespace::Root m;
EXPECT_THAT(ReadOptionalValue<int32_t>({"inner", "a"}, m),
IsOkAndHolds(std::nullopt));
m.mutable_inner()->set_a(19);
EXPECT_THAT(ReadOptionalValue<int32_t>({"inner", "a"}, m), IsOkAndHolds(19));
EXPECT_THAT(ReadOptionalValue<int32_t>({"inner", "inner2", "z"}, m),
IsOkAndHolds(std::nullopt));
m.mutable_inner()->mutable_inner2();
EXPECT_THAT(ReadOptionalValue<int32_t>({"inner", "inner2", "z"}, m),
IsOkAndHolds(std::nullopt));
m.mutable_inner()->mutable_inner2()->set_z(19);
EXPECT_THAT(ReadOptionalValue<int32_t>({"inner", "inner2", "z"}, m),
IsOkAndHolds(19));
}
template <class T>
absl::StatusOr<OptionalValue<T>> ReadOptionalTopLevelFromRepeatedValue(
const std::string& field_name, const google::protobuf::Message& m, size_t index = 0) {
return ReadOptionalValue<T>({field_name}, {RepeatedFieldIndexAccess{index}},
m);
}
TEST(ProtoTypeReader, CreateRepeatedIndexAccessOptionalReader) {
::testing_namespace::Root m;
auto read_ys = [](const auto& m) {
return ReadOptionalValue<int32_t>({"ys"}, {RepeatedFieldIndexAccess{1}}, m);
};
EXPECT_THAT(read_ys(m), IsOkAndHolds(std::nullopt));
m.add_ys(89);
EXPECT_THAT(read_ys(m), IsOkAndHolds(std::nullopt));
m.add_ys(77);
EXPECT_THAT(read_ys(m), IsOkAndHolds(77));
auto read_inners_a = [](const auto& m) {
return ReadOptionalValue<int32_t>({"inners", "a"},
{RepeatedFieldIndexAccess{1}, {}}, m);
};
EXPECT_THAT(read_inners_a(m), IsOkAndHolds(std::nullopt));
m.add_inners();
EXPECT_THAT(read_inners_a(m), IsOkAndHolds(std::nullopt));
m.add_inners()->set_a(7);
EXPECT_THAT(read_inners_a(m), IsOkAndHolds(7));
auto read_inners_as = [](const auto& m) {
return ReadOptionalValue<int32_t>(
{"inners", "as"},
{RepeatedFieldIndexAccess{1}, RepeatedFieldIndexAccess{1}}, m);
};
m.mutable_inners(1)->add_as(0);
EXPECT_THAT(read_inners_as(m), IsOkAndHolds(std::nullopt));
m.mutable_inners(1)->add_as(57);
EXPECT_THAT(read_inners_as(m), IsOkAndHolds(57));
*m.add_repeated_str() = "19";
EXPECT_THAT(ReadOptionalTopLevelFromRepeatedValue<Text>("repeated_str", m),
IsOkAndHolds(Text("19")));
*m.add_repeated_raw_bytes() = "19";
EXPECT_THAT(
ReadOptionalTopLevelFromRepeatedValue<Bytes>("repeated_raw_bytes", m),
IsOkAndHolds(Bytes("19")));
m.add_repeated_floats(19.0f);
EXPECT_THAT(
ReadOptionalTopLevelFromRepeatedValue<float>("repeated_floats", m),
IsOkAndHolds(19.0f));
m.add_repeated_doubles(19.0);
EXPECT_THAT(
ReadOptionalTopLevelFromRepeatedValue<double>("repeated_doubles", m),
IsOkAndHolds(19.0));
m.add_repeated_int32s(19);
EXPECT_THAT(ReadOptionalTopLevelFromRepeatedValue<int>("repeated_int32s", m),
IsOkAndHolds(19));
m.add_repeated_int64s(19);
EXPECT_THAT(
ReadOptionalTopLevelFromRepeatedValue<int64_t>("repeated_int64s", m),
IsOkAndHolds(19));
m.add_repeated_uint32s(19);
EXPECT_THAT(
ReadOptionalTopLevelFromRepeatedValue<int64_t>("repeated_uint32s", m),
IsOkAndHolds(19));
m.add_repeated_uint64s(19);
EXPECT_THAT(
ReadOptionalTopLevelFromRepeatedValue<uint64_t>("repeated_uint64s", m),
IsOkAndHolds(19));
m.add_repeated_bools(true);
EXPECT_THAT(ReadOptionalTopLevelFromRepeatedValue<bool>("repeated_bools", m),
IsOkAndHolds(true));
m.add_repeated_enums(ProtoRoot::SECOND_VALUE);
EXPECT_THAT(ReadOptionalTopLevelFromRepeatedValue<int>("repeated_enums", m),
IsOkAndHolds(ProtoRoot::SECOND_VALUE));
}
template <class T>
absl::StatusOr<::arolla::DenseArray<T>> ReadDenseArrayTopLevelValue(
const std::string& field_name, const google::protobuf::Message& m) {
return ReadDenseArrayValue<T>({field_name}, {RepeatedFieldAccess{}}, m);
}
TEST(ProtoTypeReader, CreateRepeatedAccessOptionalReader) {
::testing_namespace::Root m;
EXPECT_THAT(ReadDenseArrayTopLevelValue<int>("ys", m),
IsOkAndHolds(IsEmpty()));
m.add_ys(89);
m.add_ys(57);
EXPECT_THAT(ReadDenseArrayTopLevelValue<int>("ys", m),
IsOkAndHolds(ElementsAre(89, 57)));
auto read_inners_a = [](const auto& m) {
return ReadDenseArrayValue<int32_t>({"inners", "a"},
{RepeatedFieldAccess{}, {}}, m);
};
EXPECT_THAT(read_inners_a(m), IsOkAndHolds(IsEmpty()));
m.add_inners();
EXPECT_THAT(read_inners_a(m), IsOkAndHolds(ElementsAre(std::nullopt)));
m.add_inners()->set_a(7);
EXPECT_THAT(read_inners_a(m), IsOkAndHolds(ElementsAre(std::nullopt, 7)));
m.add_inners()->set_a(37);
EXPECT_THAT(read_inners_a(m), IsOkAndHolds(ElementsAre(std::nullopt, 7, 37)));
auto read_inners_as = [](const auto& m) {
return ReadDenseArrayValue<int32_t>(
{"inners", "as"}, {RepeatedFieldAccess{}, RepeatedFieldAccess{}}, m);
};
EXPECT_THAT(read_inners_as(m), IsOkAndHolds(IsEmpty()));
m.mutable_inners(0)->add_as(0);
m.mutable_inners(0)->add_as(57);
m.mutable_inners(2)->add_as(19);
m.mutable_inners(2)->add_as(3);
m.mutable_inners(2)->add_as(17);
EXPECT_THAT(read_inners_as(m), IsOkAndHolds(ElementsAre(0, 57, 19, 3, 17)));
*m.add_repeated_str() = "19";
*m.add_repeated_str() = "17";
EXPECT_THAT(ReadDenseArrayTopLevelValue<Text>("repeated_str", m),
IsOkAndHolds(ElementsAre("19", "17")));
*m.add_repeated_raw_bytes() = "17";
*m.add_repeated_raw_bytes() = "19";
EXPECT_THAT(ReadDenseArrayTopLevelValue<Bytes>("repeated_raw_bytes", m),
IsOkAndHolds(ElementsAre("17", "19")));
m.add_repeated_floats(19.0f);
m.add_repeated_floats(17.0f);
EXPECT_THAT(ReadDenseArrayTopLevelValue<float>("repeated_floats", m),
IsOkAndHolds(ElementsAre(19.0f, 17.0f)));
m.add_repeated_doubles(19.0);
m.add_repeated_doubles(17.0);
EXPECT_THAT(ReadDenseArrayTopLevelValue<double>("repeated_doubles", m),
IsOkAndHolds(ElementsAre(19.0, 17.0)));
m.add_repeated_int32s(19);
m.add_repeated_int32s(17);
EXPECT_THAT(ReadDenseArrayTopLevelValue<int>("repeated_int32s", m),
IsOkAndHolds(ElementsAre(19, 17)));
m.add_repeated_int64s(19);
m.add_repeated_int64s(17);
EXPECT_THAT(ReadDenseArrayTopLevelValue<int64_t>("repeated_int64s", m),
IsOkAndHolds(ElementsAre(19, 17)));
m.add_repeated_uint32s(19);
m.add_repeated_uint32s(17);
EXPECT_THAT(ReadDenseArrayTopLevelValue<int64_t>("repeated_uint32s", m),
IsOkAndHolds(ElementsAre(19, 17)));
m.add_repeated_uint64s(19);
m.add_repeated_uint64s(17);
EXPECT_THAT(ReadDenseArrayTopLevelValue<uint64_t>("repeated_uint64s", m),
IsOkAndHolds(ElementsAre(19, 17)));
m.add_repeated_bools(true);
m.add_repeated_bools(false);
EXPECT_THAT(ReadDenseArrayTopLevelValue<bool>("repeated_bools", m),
IsOkAndHolds(ElementsAre(true, false)));
m.add_repeated_enums(ProtoRoot::SECOND_VALUE);
m.add_repeated_enums(ProtoRoot::DEFAULT);
EXPECT_THAT(
ReadDenseArrayTopLevelValue<int>("repeated_enums", m),
IsOkAndHolds(ElementsAre(ProtoRoot::SECOND_VALUE, ProtoRoot::DEFAULT)));
}
absl::StatusOr<::arolla::DenseArray<proto::arolla_size_t>>
ReadTopLevelSizeAsArray(const std::string& field_name,
const google::protobuf::Message& m) {
return ReadDenseArrayValue<proto::arolla_size_t>(
{field_name}, {RepeatedFieldSizeAccess{}}, m);
}
absl::StatusOr<::arolla::DenseArrayShape> ReadTopLevelSizeAsShape(
const std::string& field_name, const google::protobuf::Message& m) {
ASSIGN_OR_RETURN(auto reader, ProtoTypeReader::CreateDenseArrayShapeReader(
{kRootDescr->FindFieldByName(field_name)},
{RepeatedFieldSizeAccess{}}));
return ReadValue<::arolla::DenseArrayShape>(*reader, m);
}
TEST(ProtoTypeReader, CreateRepeatedSizeAccessReader) {
::testing_namespace::Root m;
EXPECT_THAT(ReadTopLevelSizeAsArray("ys", m), IsOkAndHolds(ElementsAre(0)));
EXPECT_THAT(ReadTopLevelSizeAsShape("ys", m),
IsOkAndHolds(DenseArrayShape{0}));
m.add_ys(89);
m.add_ys(57);
EXPECT_THAT(ReadTopLevelSizeAsArray("ys", m), IsOkAndHolds(ElementsAre(2)));
EXPECT_THAT(ReadTopLevelSizeAsShape("ys", m),
IsOkAndHolds(DenseArrayShape{2}));
EXPECT_THAT(ReadTopLevelSizeAsArray("inners", m),
IsOkAndHolds(ElementsAre(0)));
EXPECT_THAT(ReadTopLevelSizeAsShape("inners", m),
IsOkAndHolds(DenseArrayShape{0}));
m.add_inners();
EXPECT_THAT(ReadTopLevelSizeAsArray("inners", m),
IsOkAndHolds(ElementsAre(1)));
EXPECT_THAT(ReadTopLevelSizeAsShape("inners", m),
IsOkAndHolds(DenseArrayShape{1}));
m.add_inners();
EXPECT_THAT(ReadTopLevelSizeAsArray("inners", m),
IsOkAndHolds(ElementsAre(2)));
EXPECT_THAT(ReadTopLevelSizeAsShape("inners", m),
IsOkAndHolds(DenseArrayShape{2}));
m.clear_inners();
auto read_inners_as_size = [](const auto& m) {
return ReadDenseArrayValue<proto::arolla_size_t>(
{"inners", "as"}, {RepeatedFieldAccess{}, RepeatedFieldSizeAccess{}},
m);
};
EXPECT_THAT(read_inners_as_size(m), IsOkAndHolds(IsEmpty()));
m.add_inners();
m.mutable_inners(0)->add_as(0);
m.mutable_inners(0)->add_as(57);
m.add_inners();
m.add_inners();
m.mutable_inners(2)->add_as(19);
m.mutable_inners(2)->add_as(3);
m.mutable_inners(2)->add_as(17);
EXPECT_THAT(read_inners_as_size(m), IsOkAndHolds(ElementsAre(2, 0, 3)));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/io/proto/reflection/reader.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/io/proto/reflection/reader_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
0b676bba-ee4c-4fe7-a438-1f7f7ad0d210 | cpp | tensorflow/tensorflow | variant | tensorflow/lite/core/async/interop/variant.cc | tensorflow/lite/core/async/interop/variant_test.cc | #include "tensorflow/lite/core/async/interop/variant.h"
#include <cstring>
#include <utility>
namespace tflite {
namespace interop {
Variant::Variant() {
type = kInvalid;
val.i = 0;
}
bool Variant::operator==(const Variant& other) const {
if (type != other.type) return false;
switch (type) {
case kInvalid:
return true;
case kInt:
return val.i == other.val.i;
case kSizeT:
return val.s == other.val.s;
case kString:
return (val.c == other.val.c) || (strcmp(val.c, other.val.c) == 0);
case kBool:
return val.b == other.val.b;
}
}
bool Variant::operator!=(const Variant& other) const {
return !(*this == other);
}
}
} | #include "tensorflow/lite/core/async/interop/variant.h"
#include <cstddef>
#include <cstdint>
#include <string>
#include <gtest/gtest.h>
namespace tflite::interop {
namespace {
TEST(VariantTest, IntTest) {
{
Variant a(1);
EXPECT_EQ(1, *a.Get<int>());
}
{
Variant a(1);
a.Set(2);
EXPECT_EQ(2, *a.Get<int>());
}
{
Variant a(42);
Variant b(a);
EXPECT_EQ(42, *b.Get<int>());
}
{
Variant a(42);
EXPECT_EQ(42, *static_cast<const int*>(a.GetPtr()));
}
{
Variant a(42);
Variant b(42);
EXPECT_EQ(a, b);
b.Set(21);
EXPECT_NE(a, b);
}
}
TEST(VariantTest, SizeTTest) {
{
size_t v = 1;
Variant a(v);
EXPECT_EQ(1, *a.Get<size_t>());
}
{
size_t v = 1;
Variant a(v);
size_t t = 2;
a.Set(t);
EXPECT_EQ(2, *a.Get<size_t>());
}
{
size_t v = 42;
Variant a(v);
Variant b(a);
EXPECT_EQ(42, *b.Get<size_t>());
}
{
size_t v = 42;
Variant a(v);
EXPECT_EQ(42, *static_cast<const size_t*>(a.GetPtr()));
}
{
Variant a(size_t(42));
Variant b(size_t(42));
EXPECT_EQ(a, b);
b.Set(size_t(21));
EXPECT_NE(a, b);
}
}
TEST(VariantTest, StringTest) {
{
const char v[] = "string";
Variant a(v);
EXPECT_EQ(v, *a.Get<const char*>());
}
{
const char v[] = "string";
Variant a(v);
const char t[] = "another string";
a.Set(t);
EXPECT_EQ(t, *a.Get<const char*>());
}
{
const char v[] = "string";
Variant a(v);
Variant b(a);
EXPECT_EQ(v, *b.Get<const char*>());
}
{
const char v[] = "string";
Variant a(v);
EXPECT_EQ(v, *static_cast<const char* const*>(a.GetPtr()));
}
{
const char v[] = "string";
Variant a(v);
std::string str = "string";
Variant b(str.c_str());
EXPECT_EQ(a, b);
b.Set("another string");
EXPECT_NE(a, b);
}
}
TEST(VariantTest, TypeNotMatch) {
Variant a(1);
EXPECT_EQ(nullptr, a.Get<size_t>());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/async/interop/variant.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/async/interop/variant_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
781f69a4-78b6-4034-b79d-a1bec23c3e92 | cpp | google/cel-cpp | align | internal/align.h | internal/align_test.cc | #ifndef THIRD_PARTY_CEL_CPP_INTERNAL_ALIGN_H_
#define THIRD_PARTY_CEL_CPP_INTERNAL_ALIGN_H_
#include <cstddef>
#include <cstdint>
#include <type_traits>
#include "absl/base/casts.h"
#include "absl/base/config.h"
#include "absl/base/macros.h"
#include "absl/numeric/bits.h"
namespace cel::internal {
template <typename T>
constexpr std::enable_if_t<
std::conjunction_v<std::is_integral<T>, std::is_unsigned<T>>, T>
AlignmentMask(T alignment) {
ABSL_ASSERT(absl::has_single_bit(alignment));
return alignment - T{1};
}
template <typename T>
std::enable_if_t<std::conjunction_v<std::is_integral<T>, std::is_unsigned<T>>,
T>
AlignDown(T x, size_t alignment) {
ABSL_ASSERT(absl::has_single_bit(alignment));
#if ABSL_HAVE_BUILTIN(__builtin_align_up)
return __builtin_align_down(x, alignment);
#else
using C = std::common_type_t<T, size_t>;
return static_cast<T>(static_cast<C>(x) &
~AlignmentMask(static_cast<C>(alignment)));
#endif
}
template <typename T>
std::enable_if_t<std::is_pointer_v<T>, T> AlignDown(T x, size_t alignment) {
return absl::bit_cast<T>(AlignDown(absl::bit_cast<uintptr_t>(x), alignment));
}
template <typename T>
std::enable_if_t<std::conjunction_v<std::is_integral<T>, std::is_unsigned<T>>,
T>
AlignUp(T x, size_t alignment) {
ABSL_ASSERT(absl::has_single_bit(alignment));
#if ABSL_HAVE_BUILTIN(__builtin_align_up)
return __builtin_align_up(x, alignment);
#else
using C = std::common_type_t<T, size_t>;
return static_cast<T>(AlignDown(
static_cast<C>(x) + AlignmentMask(static_cast<C>(alignment)), alignment));
#endif
}
template <typename T>
std::enable_if_t<std::is_pointer_v<T>, T> AlignUp(T x, size_t alignment) {
return absl::bit_cast<T>(AlignUp(absl::bit_cast<uintptr_t>(x), alignment));
}
template <typename T>
constexpr std::enable_if_t<
std::conjunction_v<std::is_integral<T>, std::is_unsigned<T>>, bool>
IsAligned(T x, size_t alignment) {
ABSL_ASSERT(absl::has_single_bit(alignment));
#if ABSL_HAVE_BUILTIN(__builtin_is_aligned)
return __builtin_is_aligned(x, alignment);
#else
using C = std::common_type_t<T, size_t>;
return (static_cast<C>(x) & AlignmentMask(static_cast<C>(alignment))) == C{0};
#endif
}
template <typename T>
std::enable_if_t<std::is_pointer_v<T>, bool> IsAligned(T x, size_t alignment) {
return IsAligned(absl::bit_cast<uintptr_t>(x), alignment);
}
}
#endif | #include "internal/align.h"
#include <cstddef>
#include <cstdint>
#include "internal/testing.h"
namespace cel::internal {
namespace {
TEST(AlignmentMask, Masks) {
EXPECT_EQ(AlignmentMask(size_t{1}), size_t{0});
EXPECT_EQ(AlignmentMask(size_t{2}), size_t{1});
EXPECT_EQ(AlignmentMask(size_t{4}), size_t{3});
}
TEST(AlignDown, Aligns) {
EXPECT_EQ(AlignDown(uintptr_t{3}, 4), 0);
EXPECT_EQ(AlignDown(uintptr_t{0}, 4), 0);
EXPECT_EQ(AlignDown(uintptr_t{5}, 4), 4);
EXPECT_EQ(AlignDown(uintptr_t{4}, 4), 4);
uint64_t val = 0;
EXPECT_EQ(AlignDown(&val, alignof(val)), &val);
}
TEST(AlignUp, Aligns) {
EXPECT_EQ(AlignUp(uintptr_t{0}, 4), 0);
EXPECT_EQ(AlignUp(uintptr_t{3}, 4), 4);
EXPECT_EQ(AlignUp(uintptr_t{5}, 4), 8);
uint64_t val = 0;
EXPECT_EQ(AlignUp(&val, alignof(val)), &val);
}
TEST(IsAligned, Aligned) {
EXPECT_TRUE(IsAligned(uintptr_t{0}, 4));
EXPECT_TRUE(IsAligned(uintptr_t{4}, 4));
EXPECT_FALSE(IsAligned(uintptr_t{3}, 4));
EXPECT_FALSE(IsAligned(uintptr_t{5}, 4));
uint64_t val = 0;
EXPECT_TRUE(IsAligned(&val, alignof(val)));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/align.h | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/align_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
67499580-6a0a-4546-a683-3cbca4e07781 | cpp | google/quiche | event_loop_connecting_client_socket | quiche/quic/core/io/event_loop_connecting_client_socket.cc | quiche/quic/core/io/event_loop_connecting_client_socket_test.cc | #include "quiche/quic/core/io/event_loop_connecting_client_socket.h"
#include <limits>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "absl/types/variant.h"
#include "quiche/quic/core/io/quic_event_loop.h"
#include "quiche/quic/core/io/socket.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_mem_slice.h"
namespace quic {
EventLoopConnectingClientSocket::EventLoopConnectingClientSocket(
socket_api::SocketProtocol protocol,
const quic::QuicSocketAddress& peer_address,
QuicByteCount receive_buffer_size, QuicByteCount send_buffer_size,
QuicEventLoop* event_loop, quiche::QuicheBufferAllocator* buffer_allocator,
AsyncVisitor* async_visitor)
: protocol_(protocol),
peer_address_(peer_address),
receive_buffer_size_(receive_buffer_size),
send_buffer_size_(send_buffer_size),
event_loop_(event_loop),
buffer_allocator_(buffer_allocator),
async_visitor_(async_visitor) {
QUICHE_DCHECK(event_loop_);
QUICHE_DCHECK(buffer_allocator_);
}
EventLoopConnectingClientSocket::~EventLoopConnectingClientSocket() {
QUICHE_DCHECK(connect_status_ != ConnectStatus::kConnecting);
QUICHE_DCHECK(!receive_max_size_.has_value());
QUICHE_DCHECK(absl::holds_alternative<absl::monostate>(send_data_));
if (descriptor_ != kInvalidSocketFd) {
QUICHE_BUG(quic_event_loop_connecting_socket_invalid_destruction)
<< "Must call Disconnect() on connected socket before destruction.";
Close();
}
QUICHE_DCHECK(connect_status_ == ConnectStatus::kNotConnected);
QUICHE_DCHECK(send_remaining_.empty());
}
absl::Status EventLoopConnectingClientSocket::ConnectBlocking() {
QUICHE_DCHECK_EQ(descriptor_, kInvalidSocketFd);
QUICHE_DCHECK(connect_status_ == ConnectStatus::kNotConnected);
QUICHE_DCHECK(!receive_max_size_.has_value());
QUICHE_DCHECK(absl::holds_alternative<absl::monostate>(send_data_));
absl::Status status = Open();
if (!status.ok()) {
return status;
}
status = socket_api::SetSocketBlocking(descriptor_, true);
if (!status.ok()) {
QUICHE_LOG_FIRST_N(WARNING, 100)
<< "Failed to set socket to address: " << peer_address_.ToString()
<< " as blocking for connect with error: " << status;
Close();
return status;
}
status = DoInitialConnect();
if (absl::IsUnavailable(status)) {
QUICHE_LOG_FIRST_N(ERROR, 100)
<< "Non-blocking connect to should-be blocking socket to address:"
<< peer_address_.ToString() << ".";
Close();
connect_status_ = ConnectStatus::kNotConnected;
return status;
} else if (!status.ok()) {
QUICHE_DCHECK_EQ(descriptor_, kInvalidSocketFd);
QUICHE_DCHECK(connect_status_ == ConnectStatus::kNotConnected);
return status;
}
status = socket_api::SetSocketBlocking(descriptor_, false);
if (!status.ok()) {
QUICHE_LOG_FIRST_N(WARNING, 100)
<< "Failed to return socket to address: " << peer_address_.ToString()
<< " to non-blocking after connect with error: " << status;
Close();
connect_status_ = ConnectStatus::kNotConnected;
}
QUICHE_DCHECK(connect_status_ != ConnectStatus::kConnecting);
return status;
}
void EventLoopConnectingClientSocket::ConnectAsync() {
QUICHE_DCHECK(async_visitor_);
QUICHE_DCHECK_EQ(descriptor_, kInvalidSocketFd);
QUICHE_DCHECK(connect_status_ == ConnectStatus::kNotConnected);
QUICHE_DCHECK(!receive_max_size_.has_value());
QUICHE_DCHECK(absl::holds_alternative<absl::monostate>(send_data_));
absl::Status status = Open();
if (!status.ok()) {
async_visitor_->ConnectComplete(status);
return;
}
FinishOrRearmAsyncConnect(DoInitialConnect());
}
void EventLoopConnectingClientSocket::Disconnect() {
QUICHE_DCHECK_NE(descriptor_, kInvalidSocketFd);
QUICHE_DCHECK(connect_status_ != ConnectStatus::kNotConnected);
Close();
QUICHE_DCHECK_EQ(descriptor_, kInvalidSocketFd);
bool require_connect_callback = connect_status_ == ConnectStatus::kConnecting;
connect_status_ = ConnectStatus::kNotConnected;
bool require_receive_callback = receive_max_size_.has_value();
receive_max_size_.reset();
bool require_send_callback =
!absl::holds_alternative<absl::monostate>(send_data_);
send_data_ = absl::monostate();
send_remaining_ = "";
if (require_connect_callback) {
QUICHE_DCHECK(async_visitor_);
async_visitor_->ConnectComplete(absl::CancelledError());
}
if (require_receive_callback) {
QUICHE_DCHECK(async_visitor_);
async_visitor_->ReceiveComplete(absl::CancelledError());
}
if (require_send_callback) {
QUICHE_DCHECK(async_visitor_);
async_visitor_->SendComplete(absl::CancelledError());
}
}
absl::StatusOr<QuicSocketAddress>
EventLoopConnectingClientSocket::GetLocalAddress() {
QUICHE_DCHECK_NE(descriptor_, kInvalidSocketFd);
QUICHE_DCHECK(connect_status_ == ConnectStatus::kConnected);
return socket_api::GetSocketAddress(descriptor_);
}
absl::StatusOr<quiche::QuicheMemSlice>
EventLoopConnectingClientSocket::ReceiveBlocking(QuicByteCount max_size) {
QUICHE_DCHECK_GT(max_size, 0u);
QUICHE_DCHECK_NE(descriptor_, kInvalidSocketFd);
QUICHE_DCHECK(connect_status_ == ConnectStatus::kConnected);
QUICHE_DCHECK(!receive_max_size_.has_value());
absl::Status status =
socket_api::SetSocketBlocking(descriptor_, true);
if (!status.ok()) {
QUICHE_LOG_FIRST_N(WARNING, 100)
<< "Failed to set socket to address: " << peer_address_.ToString()
<< " as blocking for receive with error: " << status;
return status;
}
receive_max_size_ = max_size;
absl::StatusOr<quiche::QuicheMemSlice> buffer = ReceiveInternal();
if (!buffer.ok() && absl::IsUnavailable(buffer.status())) {
QUICHE_LOG_FIRST_N(ERROR, 100)
<< "Non-blocking receive from should-be blocking socket to address:"
<< peer_address_.ToString() << ".";
receive_max_size_.reset();
} else {
QUICHE_DCHECK(!receive_max_size_.has_value());
}
absl::Status set_non_blocking_status =
socket_api::SetSocketBlocking(descriptor_, false);
if (!set_non_blocking_status.ok()) {
QUICHE_LOG_FIRST_N(WARNING, 100)
<< "Failed to return socket to address: " << peer_address_.ToString()
<< " to non-blocking after receive with error: "
<< set_non_blocking_status;
return set_non_blocking_status;
}
return buffer;
}
void EventLoopConnectingClientSocket::ReceiveAsync(QuicByteCount max_size) {
QUICHE_DCHECK(async_visitor_);
QUICHE_DCHECK_GT(max_size, 0u);
QUICHE_DCHECK_NE(descriptor_, kInvalidSocketFd);
QUICHE_DCHECK(connect_status_ == ConnectStatus::kConnected);
QUICHE_DCHECK(!receive_max_size_.has_value());
receive_max_size_ = max_size;
FinishOrRearmAsyncReceive(ReceiveInternal());
}
absl::Status EventLoopConnectingClientSocket::SendBlocking(std::string data) {
QUICHE_DCHECK(!data.empty());
QUICHE_DCHECK(absl::holds_alternative<absl::monostate>(send_data_));
send_data_ = std::move(data);
return SendBlockingInternal();
}
absl::Status EventLoopConnectingClientSocket::SendBlocking(
quiche::QuicheMemSlice data) {
QUICHE_DCHECK(!data.empty());
QUICHE_DCHECK(absl::holds_alternative<absl::monostate>(send_data_));
send_data_ = std::move(data);
return SendBlockingInternal();
}
void EventLoopConnectingClientSocket::SendAsync(std::string data) {
QUICHE_DCHECK(!data.empty());
QUICHE_DCHECK(absl::holds_alternative<absl::monostate>(send_data_));
send_data_ = std::move(data);
send_remaining_ = absl::get<std::string>(send_data_);
FinishOrRearmAsyncSend(SendInternal());
}
void EventLoopConnectingClientSocket::SendAsync(quiche::QuicheMemSlice data) {
QUICHE_DCHECK(!data.empty());
QUICHE_DCHECK(absl::holds_alternative<absl::monostate>(send_data_));
send_data_ = std::move(data);
send_remaining_ =
absl::get<quiche::QuicheMemSlice>(send_data_).AsStringView();
FinishOrRearmAsyncSend(SendInternal());
}
void EventLoopConnectingClientSocket::OnSocketEvent(
QuicEventLoop* event_loop, SocketFd fd, QuicSocketEventMask events) {
QUICHE_DCHECK_EQ(event_loop, event_loop_);
QUICHE_DCHECK_EQ(fd, descriptor_);
if (connect_status_ == ConnectStatus::kConnecting &&
(events & (kSocketEventWritable | kSocketEventError))) {
FinishOrRearmAsyncConnect(GetConnectResult());
return;
}
if (receive_max_size_.has_value() &&
(events & (kSocketEventReadable | kSocketEventError))) {
FinishOrRearmAsyncReceive(ReceiveInternal());
}
if (!send_remaining_.empty() &&
(events & (kSocketEventWritable | kSocketEventError))) {
FinishOrRearmAsyncSend(SendInternal());
}
}
absl::Status EventLoopConnectingClientSocket::Open() {
QUICHE_DCHECK_EQ(descriptor_, kInvalidSocketFd);
QUICHE_DCHECK(connect_status_ == ConnectStatus::kNotConnected);
QUICHE_DCHECK(!receive_max_size_.has_value());
QUICHE_DCHECK(absl::holds_alternative<absl::monostate>(send_data_));
QUICHE_DCHECK(send_remaining_.empty());
absl::StatusOr<SocketFd> descriptor =
socket_api::CreateSocket(peer_address_.host().address_family(), protocol_,
false);
if (!descriptor.ok()) {
QUICHE_DVLOG(1) << "Failed to open socket for connection to address: "
<< peer_address_.ToString()
<< " with error: " << descriptor.status();
return descriptor.status();
}
QUICHE_DCHECK_NE(*descriptor, kInvalidSocketFd);
descriptor_ = *descriptor;
if (async_visitor_) {
bool registered;
if (event_loop_->SupportsEdgeTriggered()) {
registered = event_loop_->RegisterSocket(
descriptor_,
kSocketEventReadable | kSocketEventWritable | kSocketEventError,
this);
} else {
registered = event_loop_->RegisterSocket(descriptor_, 0, this);
}
QUICHE_DCHECK(registered);
}
if (receive_buffer_size_ != 0) {
absl::Status status =
socket_api::SetReceiveBufferSize(descriptor_, receive_buffer_size_);
if (!status.ok()) {
QUICHE_LOG_FIRST_N(WARNING, 100)
<< "Failed to set receive buffer size to: " << receive_buffer_size_
<< " for socket to address: " << peer_address_.ToString()
<< " with error: " << status;
Close();
return status;
}
}
if (send_buffer_size_ != 0) {
absl::Status status =
socket_api::SetSendBufferSize(descriptor_, send_buffer_size_);
if (!status.ok()) {
QUICHE_LOG_FIRST_N(WARNING, 100)
<< "Failed to set send buffer size to: " << send_buffer_size_
<< " for socket to address: " << peer_address_.ToString()
<< " with error: " << status;
Close();
return status;
}
}
return absl::OkStatus();
}
void EventLoopConnectingClientSocket::Close() {
QUICHE_DCHECK_NE(descriptor_, kInvalidSocketFd);
bool unregistered = event_loop_->UnregisterSocket(descriptor_);
QUICHE_DCHECK_EQ(unregistered, !!async_visitor_);
absl::Status status = socket_api::Close(descriptor_);
if (!status.ok()) {
QUICHE_LOG_FIRST_N(WARNING, 100)
<< "Could not close socket to address: " << peer_address_.ToString()
<< " with error: " << status;
}
descriptor_ = kInvalidSocketFd;
}
absl::Status EventLoopConnectingClientSocket::DoInitialConnect() {
QUICHE_DCHECK_NE(descriptor_, kInvalidSocketFd);
QUICHE_DCHECK(connect_status_ == ConnectStatus::kNotConnected);
QUICHE_DCHECK(!receive_max_size_.has_value());
QUICHE_DCHECK(absl::holds_alternative<absl::monostate>(send_data_));
absl::Status connect_result = socket_api::Connect(descriptor_, peer_address_);
if (connect_result.ok()) {
connect_status_ = ConnectStatus::kConnected;
} else if (absl::IsUnavailable(connect_result)) {
connect_status_ = ConnectStatus::kConnecting;
} else {
QUICHE_DVLOG(1) << "Synchronously failed to connect socket to address: "
<< peer_address_.ToString()
<< " with error: " << connect_result;
Close();
connect_status_ = ConnectStatus::kNotConnected;
}
return connect_result;
}
absl::Status EventLoopConnectingClientSocket::GetConnectResult() {
QUICHE_DCHECK_NE(descriptor_, kInvalidSocketFd);
QUICHE_DCHECK(connect_status_ == ConnectStatus::kConnecting);
QUICHE_DCHECK(!receive_max_size_.has_value());
QUICHE_DCHECK(absl::holds_alternative<absl::monostate>(send_data_));
absl::Status error = socket_api::GetSocketError(descriptor_);
if (!error.ok()) {
QUICHE_DVLOG(1) << "Asynchronously failed to connect socket to address: "
<< peer_address_.ToString() << " with error: " << error;
Close();
connect_status_ = ConnectStatus::kNotConnected;
return error;
}
absl::StatusOr<bool> peek_data = OneBytePeek();
if (peek_data.ok() || absl::IsUnavailable(peek_data.status())) {
connect_status_ = ConnectStatus::kConnected;
} else {
error = peek_data.status();
QUICHE_LOG_FIRST_N(WARNING, 100)
<< "Socket to address: " << peer_address_.ToString()
<< " signalled writable after connect and no connect error found, "
"but socket does not appear connected with error: "
<< error;
Close();
connect_status_ = ConnectStatus::kNotConnected;
}
return error;
}
void EventLoopConnectingClientSocket::FinishOrRearmAsyncConnect(
absl::Status status) {
if (absl::IsUnavailable(status)) {
if (!event_loop_->SupportsEdgeTriggered()) {
bool result = event_loop_->RearmSocket(
descriptor_, kSocketEventWritable | kSocketEventError);
QUICHE_DCHECK(result);
}
QUICHE_DCHECK(connect_status_ == ConnectStatus::kConnecting);
} else {
QUICHE_DCHECK(connect_status_ != ConnectStatus::kConnecting);
async_visitor_->ConnectComplete(status);
}
}
absl::StatusOr<quiche::QuicheMemSlice>
EventLoopConnectingClientSocket::ReceiveInternal() {
QUICHE_DCHECK_NE(descriptor_, kInvalidSocketFd);
QUICHE_DCHECK(connect_status_ == ConnectStatus::kConnected);
QUICHE_CHECK(receive_max_size_.has_value());
QUICHE_DCHECK_GE(*receive_max_size_, 1u);
QUICHE_DCHECK_LE(*receive_max_size_, std::numeric_limits<size_t>::max());
if (*receive_max_size_ > 1) {
absl::StatusOr<bool> peek_data = OneBytePeek();
if (!peek_data.ok()) {
if (!absl::IsUnavailable(peek_data.status())) {
receive_max_size_.reset();
}
return peek_data.status();
} else if (!*peek_data) {
receive_max_size_.reset();
return quiche::QuicheMemSlice();
}
}
quiche::QuicheBuffer buffer(buffer_allocator_, *receive_max_size_);
absl::StatusOr<absl::Span<char>> received = socket_api::Receive(
descriptor_, absl::MakeSpan(buffer.data(), buffer.size()));
if (received.ok()) {
QUICHE_DCHECK_LE(received->size(), buffer.size());
QUICHE_DCHECK_EQ(received->data(), buffer.data());
receive_max_size_.reset();
return quiche::QuicheMemSlice(
quiche::QuicheBuffer(buffer.Release(), received->size()));
} else {
if (!absl::IsUnavailable(received.status())) {
QUICHE_DVLOG(1) << "Failed to receive from socket to address: "
<< peer_address_.ToString()
<< " with error: " << received.status();
receive_max_size_.reset();
}
return received.status();
}
}
void EventLoopConnectingClientSocket::FinishOrRearmAsyncReceive(
absl::StatusOr<quiche::QuicheMemSlice> buffer) {
QUICHE_DCHECK(async_visitor_);
QUICHE_DCHECK(connect_status_ == ConnectStatus::kConnected);
if (!buffer.ok() && absl::IsUnavailable(buffer.status())) {
if (!event_loop_->SupportsEdgeTriggered()) {
bool result = event_loop_->RearmSocket(
descriptor_, kSocketEventReadable | kSocketEventError);
QUICHE_DCHECK(result);
}
QUICHE_DCHECK(receive_max_size_.has_value());
} else {
QUICHE_DCHECK(!receive_max_size_.has_value());
async_visitor_->ReceiveComplete(std::move(buffer));
}
}
absl::StatusOr<bool> EventLoopConnectingClientSocket::OneBytePeek() {
QUICHE_DCHECK_NE(descriptor_, kInvalidSocketFd);
char peek_buffer;
absl::StatusOr<absl::Span<char>> peek_received = socket_api::Receive(
descriptor_, absl::MakeSpan(&peek_buffer, 1), true);
if (!peek_received.ok()) {
return peek_received.status();
} else {
return !peek_received->empty();
}
}
absl::Status EventLoopConnectingClientSocket::SendBlockingInternal() {
QUICHE_DCHECK_NE(descriptor_, kInvalidSocketFd);
QUICHE_DCHECK(connect_status_ == ConnectStatus::kConnected);
QUICHE_DCHECK(!absl::holds_alternative<absl::monostate>(send_data_));
QUICHE_DCHECK(send_remaining_.empty());
absl::Status status =
socket_api::SetSocketBlocking(descriptor_, true);
if (!status.ok()) {
QUICHE_LOG_FIRST_N(WARNING, 100)
<< "Failed to set socket to address: " << peer_address_.ToString()
<< " as blocking for send with error: " << status;
send_data_ = absl::monostate();
return status;
}
if (absl::holds_alternative<std::string>(send_data_)) {
send_remaining_ = absl::get<std::string>(send_data_);
} else {
send_remaining_ =
absl::get<quiche::QuicheMemSlice>(send_data_).AsStringView();
}
status = SendInternal();
if (absl::IsUnavailable(status)) {
QUICHE_LOG_FIRST_N(ERROR, 100)
<< "Non-blocking send for should-be blocking socket to address:"
<< peer_address_.ToString();
send_data_ = absl::monostate();
send_remaining_ = "";
} else {
QUICHE_DCHECK(absl::holds_alternative<absl::monostate>(send_data_));
QUICHE_DCHECK(send_remaining_.empty());
}
absl::Status set_non_blocking_status =
socket_api::SetSocketBlocking(descriptor_, false);
if (!set_non_blocking_status.ok()) {
QUICHE_LOG_FIRST_N(WARNING, 100)
<< "Failed to return socket to address: " << peer_address_.ToString()
<< " to non-blocking after send with error: "
<< set_non_blocking_status;
return set_non_blocking_status;
}
return status;
}
absl::Status EventLoopConnectingClientSocket::SendInternal() {
QUICHE_DCHECK_NE(descriptor_, kInvalidSocketFd);
QUICHE_DCHECK(connect_status_ == ConnectStatus::kConnected);
QUICHE_DCHECK(!absl::holds_alternative<absl::monostate>(send_data_));
QUICHE_DCHECK(!send_remaining_.empty());
while (!send_remaining_.empty()) {
absl::StatusOr<absl::string_view> remainder =
socket_api::Send(descriptor_, send_remaining_);
if (remainder.ok()) {
QUICHE_DCHECK(remainder->empty() ||
(remainder->data() >= send_remaining_.data() &&
remainder->data() <
send_remaining_.data() + send_remaining_.size()));
QUICHE_DCHECK(remainder->empty() ||
(remainder->data() + remainder->size() ==
send_remaining_.data() + send_remaining_.size()));
send_remaining_ = *remainder;
} else {
if (!absl::IsUnavailable(remainder.status())) {
QUICHE_DVLOG(1) << "Failed to send to socket to address: "
<< peer_address_.ToString()
<< " with error: " << remainder.status();
send_data_ = absl::monostate();
send_remaining_ = "";
}
return remainder.status();
}
}
send_data_ = absl::monostate();
return absl::OkStatus();
}
void EventLoopConnectingClientSocket::FinishOrRearmAsyncSend(
absl::Status status) {
QUICHE_DCHECK(async_visitor_);
QUICHE_DCHECK(connect_status_ == ConnectStatus::kConnected);
if (absl::IsUnavailable(status)) {
if (!event_loop_->SupportsEdgeTriggered()) {
bool result = event_loop_->RearmSocket(
descriptor_, kSocketEventWritable | kSocketEventError);
QUICHE_DCHECK(result);
}
QUICHE_DCHECK(!absl::holds_alternative<absl::monostate>(send_data_));
QUICHE_DCHECK(!send_remaining_.empty());
} else {
QUICHE_DCHECK(absl::holds_alternative<absl::monostate>(send_data_));
QUICHE_DCHECK(send_remaining_.empty());
async_visitor_->SendComplete(status);
}
}
} | #include "quiche/quic/core/io/event_loop_connecting_client_socket.h"
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include "absl/functional/bind_front.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "quiche/quic/core/connecting_client_socket.h"
#include "quiche/quic/core/io/event_loop_socket_factory.h"
#include "quiche/quic/core/io/quic_default_event_loop.h"
#include "quiche/quic/core/io/quic_event_loop.h"
#include "quiche/quic/core/io/socket.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
#include "quiche/quic/test_tools/mock_clock.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_mem_slice.h"
#include "quiche/common/platform/api/quiche_mutex.h"
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/common/platform/api/quiche_test_loopback.h"
#include "quiche/common/platform/api/quiche_thread.h"
#include "quiche/common/quiche_callbacks.h"
#include "quiche/common/simple_buffer_allocator.h"
namespace quic::test {
namespace {
using ::testing::Combine;
using ::testing::Values;
using ::testing::ValuesIn;
class TestServerSocketRunner : public quiche::QuicheThread {
public:
using SocketBehavior = quiche::MultiUseCallback<void(
SocketFd connected_socket, socket_api::SocketProtocol protocol)>;
TestServerSocketRunner(SocketFd server_socket_descriptor,
SocketBehavior behavior)
: QuicheThread("TestServerSocketRunner"),
server_socket_descriptor_(server_socket_descriptor),
behavior_(std::move(behavior)) {}
~TestServerSocketRunner() override { WaitForCompletion(); }
void WaitForCompletion() { completion_notification_.WaitForNotification(); }
protected:
SocketFd server_socket_descriptor() const {
return server_socket_descriptor_;
}
const SocketBehavior& behavior() const { return behavior_; }
quiche::QuicheNotification& completion_notification() {
return completion_notification_;
}
private:
const SocketFd server_socket_descriptor_;
const SocketBehavior behavior_;
quiche::QuicheNotification completion_notification_;
};
class TestTcpServerSocketRunner : public TestServerSocketRunner {
public:
TestTcpServerSocketRunner(SocketFd server_socket_descriptor,
SocketBehavior behavior)
: TestServerSocketRunner(server_socket_descriptor, std::move(behavior)) {
Start();
}
~TestTcpServerSocketRunner() override { Join(); }
protected:
void Run() override {
AcceptSocket();
behavior()(connection_socket_descriptor_, socket_api::SocketProtocol::kTcp);
CloseSocket();
completion_notification().Notify();
}
private:
void AcceptSocket() {
absl::StatusOr<socket_api::AcceptResult> connection_socket =
socket_api::Accept(server_socket_descriptor(), true);
QUICHE_CHECK(connection_socket.ok());
connection_socket_descriptor_ = connection_socket.value().fd;
}
void CloseSocket() {
QUICHE_CHECK(socket_api::Close(connection_socket_descriptor_).ok());
QUICHE_CHECK(socket_api::Close(server_socket_descriptor()).ok());
}
SocketFd connection_socket_descriptor_ = kInvalidSocketFd;
};
class TestUdpServerSocketRunner : public TestServerSocketRunner {
public:
TestUdpServerSocketRunner(SocketFd server_socket_descriptor,
SocketBehavior behavior,
QuicSocketAddress client_socket_address)
: TestServerSocketRunner(server_socket_descriptor, std::move(behavior)),
client_socket_address_(std::move(client_socket_address)) {
Start();
}
~TestUdpServerSocketRunner() override { Join(); }
protected:
void Run() override {
ConnectSocket();
behavior()(server_socket_descriptor(), socket_api::SocketProtocol::kUdp);
DisconnectSocket();
completion_notification().Notify();
}
private:
void ConnectSocket() {
QUICHE_CHECK(
socket_api::Connect(server_socket_descriptor(), client_socket_address_)
.ok());
}
void DisconnectSocket() {
QUICHE_CHECK(socket_api::Close(server_socket_descriptor()).ok());
}
QuicSocketAddress client_socket_address_;
};
class EventLoopConnectingClientSocketTest
: public quiche::test::QuicheTestWithParam<
std::tuple<socket_api::SocketProtocol, QuicEventLoopFactory*>>,
public ConnectingClientSocket::AsyncVisitor {
public:
void SetUp() override {
QuicEventLoopFactory* event_loop_factory;
std::tie(protocol_, event_loop_factory) = GetParam();
event_loop_ = event_loop_factory->Create(&clock_);
socket_factory_ = std::make_unique<EventLoopSocketFactory>(
event_loop_.get(), quiche::SimpleBufferAllocator::Get());
QUICHE_CHECK(CreateListeningServerSocket());
}
void TearDown() override {
if (server_socket_descriptor_ != kInvalidSocketFd) {
QUICHE_CHECK(socket_api::Close(server_socket_descriptor_).ok());
}
}
void ConnectComplete(absl::Status status) override {
QUICHE_CHECK(!connect_result_.has_value());
connect_result_ = std::move(status);
}
void ReceiveComplete(absl::StatusOr<quiche::QuicheMemSlice> data) override {
QUICHE_CHECK(!receive_result_.has_value());
receive_result_ = std::move(data);
}
void SendComplete(absl::Status status) override {
QUICHE_CHECK(!send_result_.has_value());
send_result_ = std::move(status);
}
protected:
std::unique_ptr<ConnectingClientSocket> CreateSocket(
const quic::QuicSocketAddress& peer_address,
ConnectingClientSocket::AsyncVisitor* async_visitor) {
switch (protocol_) {
case socket_api::SocketProtocol::kUdp:
return socket_factory_->CreateConnectingUdpClientSocket(
peer_address, 0, 0,
async_visitor);
case socket_api::SocketProtocol::kTcp:
return socket_factory_->CreateTcpClientSocket(
peer_address, 0, 0,
async_visitor);
default:
QUICHE_NOTREACHED();
return nullptr;
}
}
std::unique_ptr<ConnectingClientSocket> CreateSocketToEncourageDelayedSend(
const quic::QuicSocketAddress& peer_address,
ConnectingClientSocket::AsyncVisitor* async_visitor) {
switch (protocol_) {
case socket_api::SocketProtocol::kUdp:
return socket_factory_->CreateConnectingUdpClientSocket(
peer_address, 0, 0,
async_visitor);
case socket_api::SocketProtocol::kTcp:
return socket_factory_->CreateTcpClientSocket(
peer_address, 0, 4,
async_visitor);
default:
QUICHE_NOTREACHED();
return nullptr;
}
}
bool CreateListeningServerSocket() {
absl::StatusOr<SocketFd> socket = socket_api::CreateSocket(
quiche::TestLoopback().address_family(), protocol_,
true);
QUICHE_CHECK(socket.ok());
if (protocol_ == socket_api::SocketProtocol::kTcp) {
static const QuicByteCount kReceiveBufferSize = 2;
absl::Status result =
socket_api::SetReceiveBufferSize(socket.value(), kReceiveBufferSize);
QUICHE_CHECK(result.ok());
}
QuicSocketAddress bind_address(quiche::TestLoopback(), 0);
absl::Status result = socket_api::Bind(socket.value(), bind_address);
QUICHE_CHECK(result.ok());
absl::StatusOr<QuicSocketAddress> socket_address =
socket_api::GetSocketAddress(socket.value());
QUICHE_CHECK(socket_address.ok());
if (protocol_ == socket_api::SocketProtocol::kTcp) {
result = socket_api::Listen(socket.value(), 1);
QUICHE_CHECK(result.ok());
}
server_socket_descriptor_ = socket.value();
server_socket_address_ = std::move(socket_address).value();
return true;
}
std::unique_ptr<TestServerSocketRunner> CreateServerSocketRunner(
TestServerSocketRunner::SocketBehavior behavior,
ConnectingClientSocket* client_socket) {
std::unique_ptr<TestServerSocketRunner> runner;
switch (protocol_) {
case socket_api::SocketProtocol::kUdp: {
absl::StatusOr<QuicSocketAddress> client_socket_address =
client_socket->GetLocalAddress();
QUICHE_CHECK(client_socket_address.ok());
runner = std::make_unique<TestUdpServerSocketRunner>(
server_socket_descriptor_, std::move(behavior),
std::move(client_socket_address).value());
break;
}
case socket_api::SocketProtocol::kTcp:
runner = std::make_unique<TestTcpServerSocketRunner>(
server_socket_descriptor_, std::move(behavior));
break;
default:
QUICHE_NOTREACHED();
}
server_socket_descriptor_ = kInvalidSocketFd;
return runner;
}
socket_api::SocketProtocol protocol_;
SocketFd server_socket_descriptor_ = kInvalidSocketFd;
QuicSocketAddress server_socket_address_;
MockClock clock_;
std::unique_ptr<QuicEventLoop> event_loop_;
std::unique_ptr<EventLoopSocketFactory> socket_factory_;
std::optional<absl::Status> connect_result_;
std::optional<absl::StatusOr<quiche::QuicheMemSlice>> receive_result_;
std::optional<absl::Status> send_result_;
};
std::string GetTestParamName(
::testing::TestParamInfo<
std::tuple<socket_api::SocketProtocol, QuicEventLoopFactory*>>
info) {
auto [protocol, event_loop_factory] = info.param;
return EscapeTestParamName(absl::StrCat(socket_api::GetProtocolName(protocol),
"_", event_loop_factory->GetName()));
}
INSTANTIATE_TEST_SUITE_P(EventLoopConnectingClientSocketTests,
EventLoopConnectingClientSocketTest,
Combine(Values(socket_api::SocketProtocol::kUdp,
socket_api::SocketProtocol::kTcp),
ValuesIn(GetAllSupportedEventLoops())),
&GetTestParamName);
TEST_P(EventLoopConnectingClientSocketTest, ConnectBlocking) {
std::unique_ptr<ConnectingClientSocket> socket =
CreateSocket(server_socket_address_,
nullptr);
EXPECT_TRUE(socket->ConnectBlocking().ok());
socket->Disconnect();
}
TEST_P(EventLoopConnectingClientSocketTest, ConnectAsync) {
std::unique_ptr<ConnectingClientSocket> socket =
CreateSocket(server_socket_address_,
this);
socket->ConnectAsync();
if (!connect_result_.has_value()) {
event_loop_->RunEventLoopOnce(QuicTime::Delta::FromSeconds(1));
ASSERT_TRUE(connect_result_.has_value());
}
EXPECT_TRUE(connect_result_.value().ok());
connect_result_.reset();
socket->Disconnect();
EXPECT_FALSE(connect_result_.has_value());
}
TEST_P(EventLoopConnectingClientSocketTest, ErrorBeforeConnectAsync) {
std::unique_ptr<ConnectingClientSocket> socket =
CreateSocket(server_socket_address_,
this);
EXPECT_TRUE(socket_api::Close(server_socket_descriptor_).ok());
server_socket_descriptor_ = kInvalidSocketFd;
socket->ConnectAsync();
if (!connect_result_.has_value()) {
event_loop_->RunEventLoopOnce(QuicTime::Delta::FromSeconds(1));
ASSERT_TRUE(connect_result_.has_value());
}
switch (protocol_) {
case socket_api::SocketProtocol::kTcp:
EXPECT_FALSE(connect_result_.value().ok());
break;
case socket_api::SocketProtocol::kUdp:
EXPECT_TRUE(connect_result_.value().ok());
socket->Disconnect();
break;
default:
FAIL();
}
}
TEST_P(EventLoopConnectingClientSocketTest, ErrorDuringConnectAsync) {
std::unique_ptr<ConnectingClientSocket> socket =
CreateSocket(server_socket_address_,
this);
socket->ConnectAsync();
if (connect_result_.has_value()) {
EXPECT_TRUE(connect_result_.value().ok());
socket->Disconnect();
return;
}
EXPECT_TRUE(socket_api::Close(server_socket_descriptor_).ok());
server_socket_descriptor_ = kInvalidSocketFd;
EXPECT_FALSE(connect_result_.has_value());
event_loop_->RunEventLoopOnce(QuicTime::Delta::FromSeconds(1));
ASSERT_TRUE(connect_result_.has_value());
switch (protocol_) {
case socket_api::SocketProtocol::kTcp:
EXPECT_FALSE(connect_result_.value().ok());
break;
case socket_api::SocketProtocol::kUdp:
EXPECT_TRUE(connect_result_.value().ok());
break;
default:
FAIL();
}
}
TEST_P(EventLoopConnectingClientSocketTest, Disconnect) {
std::unique_ptr<ConnectingClientSocket> socket =
CreateSocket(server_socket_address_,
nullptr);
ASSERT_TRUE(socket->ConnectBlocking().ok());
socket->Disconnect();
}
TEST_P(EventLoopConnectingClientSocketTest, DisconnectCancelsConnectAsync) {
std::unique_ptr<ConnectingClientSocket> socket =
CreateSocket(server_socket_address_,
this);
socket->ConnectAsync();
bool expect_canceled = true;
if (connect_result_.has_value()) {
EXPECT_TRUE(connect_result_.value().ok());
expect_canceled = false;
}
socket->Disconnect();
if (expect_canceled) {
ASSERT_TRUE(connect_result_.has_value());
EXPECT_TRUE(absl::IsCancelled(connect_result_.value()));
}
}
TEST_P(EventLoopConnectingClientSocketTest, ConnectAndReconnect) {
std::unique_ptr<ConnectingClientSocket> socket =
CreateSocket(server_socket_address_,
nullptr);
ASSERT_TRUE(socket->ConnectBlocking().ok());
socket->Disconnect();
EXPECT_TRUE(socket->ConnectBlocking().ok());
socket->Disconnect();
}
TEST_P(EventLoopConnectingClientSocketTest, GetLocalAddress) {
std::unique_ptr<ConnectingClientSocket> socket =
CreateSocket(server_socket_address_,
nullptr);
ASSERT_TRUE(socket->ConnectBlocking().ok());
absl::StatusOr<QuicSocketAddress> address = socket->GetLocalAddress();
ASSERT_TRUE(address.ok());
EXPECT_TRUE(address.value().IsInitialized());
socket->Disconnect();
}
void SendDataOnSocket(absl::string_view data, SocketFd connected_socket,
socket_api::SocketProtocol protocol) {
QUICHE_CHECK(!data.empty());
do {
absl::StatusOr<absl::string_view> remainder =
socket_api::Send(connected_socket, data);
if (!remainder.ok()) {
return;
}
data = remainder.value();
} while (protocol == socket_api::SocketProtocol::kTcp && !data.empty());
QUICHE_CHECK(data.empty());
}
TEST_P(EventLoopConnectingClientSocketTest, ReceiveBlocking) {
std::unique_ptr<ConnectingClientSocket> socket =
CreateSocket(server_socket_address_,
nullptr);
ASSERT_TRUE(socket->ConnectBlocking().ok());
std::string expected = {1, 2, 3, 4, 5, 6, 7, 8};
std::unique_ptr<TestServerSocketRunner> runner = CreateServerSocketRunner(
absl::bind_front(&SendDataOnSocket, expected), socket.get());
std::string received;
absl::StatusOr<quiche::QuicheMemSlice> data;
do {
data = socket->ReceiveBlocking(100);
ASSERT_TRUE(data.ok());
received.append(data.value().data(), data.value().length());
} while (protocol_ == socket_api::SocketProtocol::kTcp &&
!data.value().empty());
EXPECT_EQ(received, expected);
socket->Disconnect();
}
TEST_P(EventLoopConnectingClientSocketTest, ReceiveAsync) {
std::unique_ptr<ConnectingClientSocket> socket =
CreateSocket(server_socket_address_,
this);
ASSERT_TRUE(socket->ConnectBlocking().ok());
socket->ReceiveAsync(100);
EXPECT_FALSE(receive_result_.has_value());
std::string expected = {1, 2, 3, 4, 5, 6, 7, 8};
std::unique_ptr<TestServerSocketRunner> runner = CreateServerSocketRunner(
absl::bind_front(&SendDataOnSocket, expected), socket.get());
EXPECT_FALSE(receive_result_.has_value());
for (int i = 0; i < 5 && !receive_result_.has_value(); ++i) {
event_loop_->RunEventLoopOnce(QuicTime::Delta::FromSeconds(1));
}
ASSERT_TRUE(receive_result_.has_value());
ASSERT_TRUE(receive_result_.value().ok());
EXPECT_FALSE(receive_result_.value().value().empty());
std::string received(receive_result_.value().value().data(),
receive_result_.value().value().length());
if (protocol_ == socket_api::SocketProtocol::kTcp) {
absl::StatusOr<quiche::QuicheMemSlice> data;
do {
data = socket->ReceiveBlocking(100);
ASSERT_TRUE(data.ok());
received.append(data.value().data(), data.value().length());
} while (!data.value().empty());
}
EXPECT_EQ(received, expected);
receive_result_.reset();
socket->Disconnect();
EXPECT_FALSE(receive_result_.has_value());
}
TEST_P(EventLoopConnectingClientSocketTest, DisconnectCancelsReceiveAsync) {
std::unique_ptr<ConnectingClientSocket> socket =
CreateSocket(server_socket_address_,
this);
ASSERT_TRUE(socket->ConnectBlocking().ok());
socket->ReceiveAsync(100);
EXPECT_FALSE(receive_result_.has_value());
socket->Disconnect();
ASSERT_TRUE(receive_result_.has_value());
ASSERT_FALSE(receive_result_.value().ok());
EXPECT_TRUE(absl::IsCancelled(receive_result_.value().status()));
}
void ReceiveDataFromSocket(std::string* out_received, SocketFd connected_socket,
socket_api::SocketProtocol protocol) {
out_received->clear();
std::string buffer(100, 0);
absl::StatusOr<absl::Span<char>> received;
do {
received = socket_api::Receive(connected_socket, absl::MakeSpan(buffer));
QUICHE_CHECK(received.ok());
out_received->insert(out_received->end(), received.value().begin(),
received.value().end());
} while (protocol == socket_api::SocketProtocol::kTcp &&
!received.value().empty());
QUICHE_CHECK(!out_received->empty());
}
TEST_P(EventLoopConnectingClientSocketTest, SendBlocking) {
std::unique_ptr<ConnectingClientSocket> socket =
CreateSocket(server_socket_address_,
nullptr);
ASSERT_TRUE(socket->ConnectBlocking().ok());
std::string sent;
std::unique_ptr<TestServerSocketRunner> runner = CreateServerSocketRunner(
absl::bind_front(&ReceiveDataFromSocket, &sent), socket.get());
std::string expected = {1, 2, 3, 4, 5, 6, 7, 8};
EXPECT_TRUE(socket->SendBlocking(expected).ok());
socket->Disconnect();
runner->WaitForCompletion();
EXPECT_EQ(sent, expected);
}
TEST_P(EventLoopConnectingClientSocketTest, SendAsync) {
std::unique_ptr<ConnectingClientSocket> socket =
CreateSocketToEncourageDelayedSend(server_socket_address_,
this);
ASSERT_TRUE(socket->ConnectBlocking().ok());
std::string data = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
std::string expected;
std::unique_ptr<TestServerSocketRunner> runner;
std::string sent;
switch (protocol_) {
case socket_api::SocketProtocol::kTcp:
do {
expected.insert(expected.end(), data.begin(), data.end());
send_result_.reset();
socket->SendAsync(data);
ASSERT_TRUE(!send_result_.has_value() || send_result_.value().ok());
} while (send_result_.has_value());
runner = CreateServerSocketRunner(
absl::bind_front(&ReceiveDataFromSocket, &sent), socket.get());
EXPECT_FALSE(send_result_.has_value());
for (int i = 0; i < 5 && !send_result_.has_value(); ++i) {
event_loop_->RunEventLoopOnce(QuicTime::Delta::FromSeconds(1));
}
break;
case socket_api::SocketProtocol::kUdp:
runner = CreateServerSocketRunner(
absl::bind_front(&ReceiveDataFromSocket, &sent), socket.get());
socket->SendAsync(data);
expected = data;
break;
default:
FAIL();
}
ASSERT_TRUE(send_result_.has_value());
EXPECT_TRUE(send_result_.value().ok());
send_result_.reset();
socket->Disconnect();
EXPECT_FALSE(send_result_.has_value());
runner->WaitForCompletion();
EXPECT_EQ(sent, expected);
}
TEST_P(EventLoopConnectingClientSocketTest, DisconnectCancelsSendAsync) {
if (protocol_ == socket_api::SocketProtocol::kUdp) {
return;
}
std::unique_ptr<ConnectingClientSocket> socket =
CreateSocketToEncourageDelayedSend(server_socket_address_,
this);
ASSERT_TRUE(socket->ConnectBlocking().ok());
std::string data = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
do {
send_result_.reset();
socket->SendAsync(data);
ASSERT_TRUE(!send_result_.has_value() || send_result_.value().ok());
} while (send_result_.has_value());
socket->Disconnect();
ASSERT_TRUE(send_result_.has_value());
EXPECT_TRUE(absl::IsCancelled(send_result_.value()));
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/io/event_loop_connecting_client_socket.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/io/event_loop_connecting_client_socket_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
f24d4774-ec82-49af-b0a3-e694b8beb8e4 | cpp | tensorflow/tensorflow | shape_inference_utils | tensorflow/compiler/mlir/tensorflow/utils/shape_inference_utils.cc | tensorflow/core/ir/utils/shape_inference_utils_test.cc | #include "tensorflow/compiler/mlir/tensorflow/utils/shape_inference_utils.h"
#include <optional>
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/export_tf_dialect_op.h"
#define DEBUG_TYPE "tf-shape-inference-utils"
namespace mlir {
namespace TF {
LogicalResult InferReturnTypeComponentsForTFOp(
std::optional<Location> location, Operation* op, int64_t graph_version,
tfg::OperandAsConstantFn operand_as_constant_fn,
tfg::OpResultAsShapeFn op_result_as_shape_fn,
tfg::ResultElementTypeFn result_element_type_fn,
SmallVectorImpl<ShapedTypeComponents>& inferred_return_shapes) {
assert(op->getName().getDialectNamespace() ==
TensorFlowDialect::getDialectNamespace());
return tfg::InferReturnTypeComponentsForTFOp(
location, op, op->getOperands(), graph_version, operand_as_constant_fn,
op_result_as_shape_fn, result_element_type_fn,
tensorflow::GetAttrValuesFromOperation, inferred_return_shapes);
}
}
} | #include "tensorflow/core/ir/utils/shape_inference_utils.h"
#include <vector>
#include "absl/status/status.h"
#include "llvm/ADT/STLExtras.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Block.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/Interfaces/InferTypeOpInterface.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/ir/ops.h"
#include "tensorflow/core/ir/tf_op_wrapper.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/test.h"
using tensorflow::shape_inference::DimensionHandle;
using tensorflow::shape_inference::InferenceContext;
using tensorflow::shape_inference::ShapeHandle;
namespace mlir {
namespace tfg {
namespace {
const char *const code = R"mlir(
tfg.func @test(%arg : tensor<32x?x256x4xi32> {tfg.name = "arg"}, %arg_1 : tensor<*xi32> {tfg.name = "arg1", tf._output_shapes = [5 : i32]}) -> (tensor<2x2xf32>) {
%Placeholder, %ctl = Placeholder name("placeholder") {dtype = f32, shape = #tf_type.shape<>} : () -> (tensor<f32>)
%Const, %ctl_0 = Const name("c0") {dtype = f32, value = dense<1.000000e+00> : tensor<2x2xf32>} : () -> (tensor<2x2xf32>)
%Const_1, %ctl_2 = Const name("c1") {dtype = f32, value = dense<2.000000e+00> : tensor<2x2xf32>} : () -> (tensor<2x2xf32>)
%IdentityN:3, %ctl_3 = IdentityN(%Const, %Placeholder, %Const_1) name("id_n") {T = [f32, f32, f32]} : (tensor<2x2xf32>, tensor<f32>, tensor<2x2xf32>) -> (tensor<2x2xf32>, tensor<f32>, tensor<2x2xf32>)
%Identity, %ctl_6 = Identity(%IdentityN#1) name("id1") {T = f32} : (tensor<f32>) -> (tensor<f32>)
%Add, %ctl_7 = Add(%Const, %IdentityN#1) name("add") {T = f32} : (tensor<2x2xf32>, tensor<f32>) -> (tensor<2x2xf32>)
%Const_1000, %ctl_9 = Const name("c1000") {dtype = i32, value = dense<1000> : tensor<i32>} : () -> (tensor<i32>)
%Const_2, %ctl_10 = Const name("c2") {dtype = i32, value = dense<0> : tensor<i32>} : () -> (tensor<i32>)
%Const_3, %ctl_11 = Const name("c3") {dtype = i32, value = dense<1> : tensor<i32>} : () -> (tensor<i32>)
%Range, %ctl_range = Range(%Const_2, %Const_1000, %Const_3) name("range") {Tidx = i32} : (tensor<i32>, tensor<i32>, tensor<i32>) -> tensor<1000xi32>
%Const_4, %ctl_12 = Const name("c4") {dtype = i32, value = dense<[32, -1, 4]> : tensor<3xi32>} : () -> (tensor<3xi32>)
%Reshape, %ctl_13 = Reshape(%arg, %Const_4) name("reshape") {T = i32} : (tensor<32x?x256x4xi32>, tensor<3xi32>) -> tensor<32x?x4xi32>
%Const_5, %ctl_14 = Const name("TensorListReserve/num_elements") {dtype = i32, value = dense<3> : tensor<i32>} : () -> (tensor<i32>)
%Const_6, %ctl_15 = Const name("TensorListReserve/element_shape") {dtype = i32, value = dense<2> : tensor<2xi32>} : () -> (tensor<2xi32>)
%TensorListReserve, %ctl_16 = TensorListReserve(%Const_6, %Const_5) name("TensorListReserve") {element_dtype = f32, shape_type = i32} : (tensor<2xi32>, tensor<i32>) -> (tensor<!tf_type.variant<tensor<2x2xf32>>>)
%Const_7, %ctl_17 = Const name("index") {dtype = i32, value = dense<0> : tensor<i32>} : () -> (tensor<i32>)
%Const_8, %ctl_18 = Const name("item") {dtype = f32, value = dense<[[1.000000e+00, 2.000000e+00], [3.000000e+00, 4.000000e+00]]> : tensor<2x2xf32>} : () -> (tensor<2x2xf32>)
%TensorListSetItem, %ctl_19 = TensorListSetItem(%TensorListReserve, %Const_7, %Const_8) name("TensorListSetItem") {element_dtype = f32} : (tensor<!tf_type.variant<tensor<2x2xf32>>>, tensor<i32>, tensor<2x2xf32>) -> (tensor<!tf_type.variant<tensor<2x2xf32>>>)
%Identity_1, %ctl_20 = Identity(%arg_1) name("id2") {T = i32} : (tensor<*xi32>) -> (tensor<*xi32>)
return (%Const_1) : tensor<2x2xf32>
}
)mlir";
}
class ShapeInferenceTest : public ::testing::Test {
protected:
using OpShapeInfo = SmallVector<ShapedTypeComponents>;
ShapeInferenceTest() {
context_.getOrLoadDialect<tfg::TFGraphDialect>();
module_ = mlir::parseSourceString<mlir::ModuleOp>(code, &context_);
assert(module_);
}
template <typename OpRange>
void VerifyInferredShapes(OpRange &&ops,
SmallVector<OpShapeInfo> &inferred_result,
bool check_type) {
for (auto it : llvm::zip(ops, inferred_result)) {
Operation &op = std::get<0>(it);
OpShapeInfo &info = std::get<1>(it);
EXPECT_EQ(op.getNumResults() - 1, info.size());
for (int i = 0; i < op.getNumResults() - 1; ++i) {
ShapedType shape = mlir::cast<ShapedType>(op.getResultTypes()[i]);
EXPECT_EQ(shape.hasRank(), info[i].hasRank());
if (shape.hasRank()) EXPECT_EQ(shape.getShape(), info[i].getDims());
if (check_type)
EXPECT_EQ(shape.getElementType(), info[i].getElementType());
}
}
}
ModuleOp GetModule() { return module_.get(); }
MLIRContext *GetContext() { return &context_; }
private:
MLIRContext context_;
OwningOpRef<ModuleOp> module_;
};
TEST_F(ShapeInferenceTest, TestShapeAndTypeInference) {
auto operand_as_constant_fn = [](Value operand) -> Attribute {
return operand.getDefiningOp()->getAttr("value");
};
auto op_result_as_shape_fn = [](InferenceContext &ic,
OpResult op_result) -> ShapeHandle {
auto rt = mlir::dyn_cast<RankedTensorType>(op_result.getType());
if (!rt || rt.getRank() != 1 || !rt.hasStaticShape()) return {};
std::vector<DimensionHandle> dims(rt.getDimSize(0), ic.UnknownDim());
auto attr =
op_result.getDefiningOp()->getAttrOfType<DenseElementsAttr>("value");
for (auto element : llvm::enumerate(attr.getValues<APInt>()))
dims[element.index()] = ic.MakeDim(element.value().getSExtValue());
return ic.MakeShape(dims);
};
GraphFuncOp func = GetModule().lookupSymbol<GraphFuncOp>("test");
ASSERT_TRUE(func);
Block &block = *func.getBody().begin();
SmallVector<SmallVector<ShapedTypeComponents>> all_results;
for (Operation &op : block.without_terminator()) {
auto result_element_type_fn = [&](int idx) -> Type {
return mlir::cast<ShapedType>(op.getResult(idx).getType())
.getElementType();
};
SmallVector<ShapedTypeComponents> results;
EXPECT_TRUE(InferReturnTypeComponentsForTFOp(
op.getLoc(), &op, TFOp(&op).getNonControlOperands(),
1010, operand_as_constant_fn,
op_result_as_shape_fn, result_element_type_fn,
nullptr, results)
.succeeded());
all_results.push_back(results);
}
VerifyInferredShapes(func.getBody().begin()->without_terminator(),
all_results,
true);
auto exclude_reshape_operand_as_constant_fn =
[&](Value operand) -> Attribute {
Operation *defining_op = operand.getDefiningOp();
if (!defining_op || defining_op->getName().getStringRef() == "tfg.Reshape")
return BoolAttr::get(GetContext(), false);
return operand.getDefiningOp()->getAttr("value");
};
all_results.clear();
for (Operation &op : block.without_terminator()) {
auto result_element_type_fn = [&](int idx) -> Type {
return mlir::cast<ShapedType>(op.getResult(idx).getType())
.getElementType();
};
SmallVector<ShapedTypeComponents> results;
EXPECT_TRUE(InferReturnTypeComponentsForTFOp(
op.getLoc(), &op, TFOp(&op).getNonControlOperands(),
1010,
exclude_reshape_operand_as_constant_fn,
op_result_as_shape_fn, result_element_type_fn,
nullptr, results)
.succeeded());
all_results.push_back(results);
}
VerifyInferredShapes(func.getBody().begin()->without_terminator(),
all_results,
true);
}
TEST_F(ShapeInferenceTest, TestInferenceFailure) {
auto operand_as_constant_fn = [](Value operand) -> Attribute {
return nullptr;
};
auto op_result_as_shape_fn = [](InferenceContext &ic,
OpResult op_result) -> ShapeHandle {
return {};
};
auto result_element_type_fn = [](int idx) -> Type { return nullptr; };
GraphFuncOp func = GetModule().lookupSymbol<GraphFuncOp>("test");
ASSERT_TRUE(func);
Block &block = *func.getBody().begin();
SmallVector<SmallVector<ShapedTypeComponents>> all_results;
auto get_empty_attr_values_fn =
[](Operation *, llvm::StringRef, const tensorflow::OpRegistrationData *,
bool, tensorflow::AttrValueMap *) { return absl::OkStatus(); };
for (Operation &op : block.without_terminator()) {
SmallVector<ShapedTypeComponents> results;
auto result = InferReturnTypeComponentsForTFOp(
op.getLoc(), &op, TFOp(&op).getNonControlOperands(),
1010, operand_as_constant_fn, op_result_as_shape_fn,
result_element_type_fn, get_empty_attr_values_fn, results);
if (op.getName().getStringRef() == "tfg.Const" ||
op.getName().getStringRef() == "tfg.IdentityN" ||
op.getName().getStringRef() == "tfg.PlaceHolder" ||
op.getName().getStringRef() == "tfg.Range")
EXPECT_TRUE(failed(result));
}
auto error_attr_values_fn = [](Operation *, llvm::StringRef,
const tensorflow::OpRegistrationData *, bool,
tensorflow::AttrValueMap *) {
return tensorflow::errors::Unknown("Intended error");
};
for (Operation &op : block.without_terminator()) {
SmallVector<ShapedTypeComponents> results;
EXPECT_FALSE(InferReturnTypeComponentsForTFOp(
op.getLoc(), &op, TFOp(&op).getNonControlOperands(),
1010, operand_as_constant_fn,
op_result_as_shape_fn, result_element_type_fn,
error_attr_values_fn, results)
.succeeded());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/shape_inference_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ir/utils/shape_inference_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ad98af36-7b78-4dc3-9d69-3e4820bc3231 | cpp | google/tsl | intrusive_ptr | tsl/platform/intrusive_ptr.h | tsl/platform/intrusive_ptr_test.cc | #ifndef TENSORFLOW_TSL_PLATFORM_INTRUSIVE_PTR_H_
#define TENSORFLOW_TSL_PLATFORM_INTRUSIVE_PTR_H_
#include <algorithm>
namespace tsl {
namespace core {
template <class T>
class IntrusivePtr {
public:
IntrusivePtr(T* h, bool add_ref) { reset(h, add_ref); }
IntrusivePtr(const IntrusivePtr& o) { reset(o.handle_, true); }
IntrusivePtr(IntrusivePtr&& o) noexcept { *this = std::move(o); }
IntrusivePtr() {}
void reset(T* h, bool add_ref) {
if (h != handle_) {
if (add_ref && h) h->Ref();
if (handle_) handle_->Unref();
handle_ = h;
}
}
IntrusivePtr& operator=(const IntrusivePtr& o) {
reset(o.handle_, true);
return *this;
}
IntrusivePtr& operator=(IntrusivePtr&& o) noexcept {
if (handle_ != o.handle_) {
reset(o.detach(), false);
}
return *this;
}
bool operator==(const IntrusivePtr& o) const { return handle_ == o.handle_; }
T* operator->() const { return handle_; }
T& operator*() const { return *handle_; }
explicit operator bool() const noexcept { return get(); }
T* get() const { return handle_; }
T* detach() {
T* handle = handle_;
handle_ = nullptr;
return handle;
}
~IntrusivePtr() {
if (handle_) handle_->Unref();
}
private:
T* handle_ = nullptr;
};
}
}
#endif | #include "tsl/platform/intrusive_ptr.h"
#include "tsl/platform/refcount.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace core {
namespace {
TEST(IntrusivePtr, ConstructorAddRefFalse) {
auto ptr = IntrusivePtr<RefCounted>(new RefCounted(), false);
ASSERT_TRUE(ptr->RefCountIsOne());
}
TEST(IntrusivePtr, ConstructorAddRefTrue) {
auto raw = new RefCounted();
auto ptr = IntrusivePtr<RefCounted>(raw, true);
ASSERT_FALSE(raw->RefCountIsOne());
raw->Unref();
ASSERT_TRUE(raw->RefCountIsOne());
}
TEST(IntrusivePtr, CopyConstructor) {
auto ptr1 = IntrusivePtr<RefCounted>(new RefCounted(), false);
auto ptr2 = IntrusivePtr<RefCounted>(ptr1);
ASSERT_FALSE(ptr2->RefCountIsOne());
}
TEST(IntrusivePtr, CopyAssignment) {
auto ptr1 = IntrusivePtr<RefCounted>(new RefCounted(), false);
auto raw = new RefCounted();
auto ptr2 = IntrusivePtr<RefCounted>(raw, true);
ptr2 = ptr1;
ASSERT_EQ(ptr1.get(), ptr2.get());
ASSERT_FALSE(ptr2->RefCountIsOne());
ASSERT_TRUE(raw->RefCountIsOne());
raw->Unref();
}
TEST(IntrusivePtr, CopyAssignmentIntoEmpty) {
auto ptr1 = IntrusivePtr<RefCounted>(new RefCounted(), false);
auto ptr2 = IntrusivePtr<RefCounted>();
ptr2 = ptr1;
ASSERT_FALSE(ptr2->RefCountIsOne());
}
TEST(IntrusivePtr, MoveConstructor) {
auto ptr1 = IntrusivePtr<RefCounted>(new RefCounted(), false);
auto ptr2 = IntrusivePtr<RefCounted>(std::move(ptr1));
ASSERT_TRUE(ptr2->RefCountIsOne());
ASSERT_EQ(ptr1.get(), nullptr);
}
TEST(IntrusivePtr, MoveAssignment) {
auto ptr1 = IntrusivePtr<RefCounted>(new RefCounted(), false);
auto ptr2 = IntrusivePtr<RefCounted>(new RefCounted(), false);
ptr2 = std::move(ptr1);
ASSERT_TRUE(ptr2->RefCountIsOne());
ASSERT_EQ(ptr1.get(), nullptr);
}
TEST(IntrusivePtr, MoveAssignmentIntoEmpty) {
auto ptr1 = IntrusivePtr<RefCounted>(new RefCounted(), false);
auto ptr2 = IntrusivePtr<RefCounted>();
ptr2 = std::move(ptr1);
ASSERT_TRUE(ptr2->RefCountIsOne());
ASSERT_EQ(ptr1.get(), nullptr);
}
TEST(IntrusivePtr, MoveAssignmentAlias) {
auto ptr = IntrusivePtr<RefCounted>(new RefCounted(), false);
auto& ptr_alias = ptr;
ptr = std::move(ptr_alias);
ASSERT_TRUE(ptr->RefCountIsOne());
}
TEST(IntrusivePtr, Reset) {
auto ptr = IntrusivePtr<RefCounted>(new RefCounted(), false);
ptr.reset(new RefCounted(), false);
ASSERT_TRUE(ptr->RefCountIsOne());
}
TEST(IntrusivePtr, ResetIntoEmpty) {
auto ptr = IntrusivePtr<RefCounted>();
ptr.reset(new RefCounted(), false);
ASSERT_TRUE(ptr->RefCountIsOne());
}
TEST(IntrusivePtr, ResetAlias) {
auto ptr = IntrusivePtr<RefCounted>(new RefCounted(), false);
ASSERT_TRUE(ptr->RefCountIsOne());
ptr.reset(ptr.get(), false);
ASSERT_TRUE(ptr->RefCountIsOne());
}
TEST(IntrusivePtr, ResetRefBeforeUnref) {
class Foo : public RefCounted {
public:
explicit Foo(char label, Foo* ptr = nullptr)
: label_(label), ptr_(ptr, false) {}
char label_;
IntrusivePtr<Foo> ptr_;
};
IntrusivePtr<Foo> x(new Foo{'a', new Foo{'b', new Foo{'c'}}}, false);
x->ptr_ = x->ptr_->ptr_;
}
TEST(IntrusivePtr, ResetStealPtrBeforeUnref) {
class Foo : public RefCounted {
public:
explicit Foo(char label, Foo* ptr = nullptr)
: label_(label), ptr_(ptr, false) {}
char label_;
IntrusivePtr<Foo> ptr_;
};
IntrusivePtr<Foo> x(new Foo{'a', new Foo{'b', new Foo{'c'}}}, false);
x->ptr_ = std::move(x->ptr_->ptr_);
}
TEST(IntrusivePtr, Detach) {
auto ptr = IntrusivePtr<RefCounted>(new RefCounted(), false);
ASSERT_TRUE(ptr->RefCountIsOne());
auto raw = ptr.detach();
ASSERT_TRUE(raw->RefCountIsOne());
raw->Unref();
}
}
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/intrusive_ptr.h | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/intrusive_ptr_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
e80839a8-1faa-4935-8730-513eb511a49b | cpp | google/quiche | spdy_protocol | quiche/http2/core/spdy_protocol.cc | quiche/http2/core/spdy_protocol_test.cc | #include "quiche/http2/core/spdy_protocol.h"
#include <cstddef>
#include <cstdint>
#include <limits>
#include <memory>
#include <ostream>
#include <string>
#include <utility>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/http2/core/spdy_alt_svc_wire_format.h"
#include "quiche/common/http/http_header_block.h"
#include "quiche/common/platform/api/quiche_bug_tracker.h"
#include "quiche/common/platform/api/quiche_flag_utils.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace spdy {
const char* const kHttp2ConnectionHeaderPrefix =
"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n";
std::ostream& operator<<(std::ostream& out, SpdyKnownSettingsId id) {
return out << static_cast<SpdySettingsId>(id);
}
std::ostream& operator<<(std::ostream& out, SpdyFrameType frame_type) {
return out << SerializeFrameType(frame_type);
}
SpdyPriority ClampSpdy3Priority(SpdyPriority priority) {
static_assert(std::numeric_limits<SpdyPriority>::min() == kV3HighestPriority,
"The value of given priority shouldn't be smaller than highest "
"priority. Check this invariant explicitly.");
if (priority > kV3LowestPriority) {
QUICHE_BUG(spdy_bug_22_1)
<< "Invalid priority: " << static_cast<int>(priority);
return kV3LowestPriority;
}
return priority;
}
int ClampHttp2Weight(int weight) {
if (weight < kHttp2MinStreamWeight) {
QUICHE_BUG(spdy_bug_22_2) << "Invalid weight: " << weight;
return kHttp2MinStreamWeight;
}
if (weight > kHttp2MaxStreamWeight) {
QUICHE_BUG(spdy_bug_22_3) << "Invalid weight: " << weight;
return kHttp2MaxStreamWeight;
}
return weight;
}
int Spdy3PriorityToHttp2Weight(SpdyPriority priority) {
priority = ClampSpdy3Priority(priority);
const float kSteps = 255.9f / 7.f;
return static_cast<int>(kSteps * (7.f - priority)) + 1;
}
SpdyPriority Http2WeightToSpdy3Priority(int weight) {
weight = ClampHttp2Weight(weight);
const float kSteps = 255.9f / 7.f;
return static_cast<SpdyPriority>(7.f - (weight - 1) / kSteps);
}
bool IsDefinedFrameType(uint8_t frame_type_field) {
switch (static_cast<SpdyFrameType>(frame_type_field)) {
case SpdyFrameType::DATA:
return true;
case SpdyFrameType::HEADERS:
return true;
case SpdyFrameType::PRIORITY:
return true;
case SpdyFrameType::RST_STREAM:
return true;
case SpdyFrameType::SETTINGS:
return true;
case SpdyFrameType::PUSH_PROMISE:
return true;
case SpdyFrameType::PING:
return true;
case SpdyFrameType::GOAWAY:
return true;
case SpdyFrameType::WINDOW_UPDATE:
return true;
case SpdyFrameType::CONTINUATION:
return true;
case SpdyFrameType::ALTSVC:
return true;
case SpdyFrameType::PRIORITY_UPDATE:
return true;
case SpdyFrameType::ACCEPT_CH:
return true;
}
return false;
}
SpdyFrameType ParseFrameType(uint8_t frame_type_field) {
QUICHE_BUG_IF(spdy_bug_22_4, !IsDefinedFrameType(frame_type_field))
<< "Frame type not defined: " << static_cast<int>(frame_type_field);
return static_cast<SpdyFrameType>(frame_type_field);
}
uint8_t SerializeFrameType(SpdyFrameType frame_type) {
return static_cast<uint8_t>(frame_type);
}
bool IsValidHTTP2FrameStreamId(SpdyStreamId current_frame_stream_id,
SpdyFrameType frame_type_field) {
if (current_frame_stream_id == 0) {
switch (frame_type_field) {
case SpdyFrameType::DATA:
case SpdyFrameType::HEADERS:
case SpdyFrameType::PRIORITY:
case SpdyFrameType::RST_STREAM:
case SpdyFrameType::CONTINUATION:
case SpdyFrameType::PUSH_PROMISE:
return false;
default:
return true;
}
} else {
switch (frame_type_field) {
case SpdyFrameType::GOAWAY:
case SpdyFrameType::SETTINGS:
case SpdyFrameType::PING:
return false;
default:
return true;
}
}
}
const char* FrameTypeToString(SpdyFrameType frame_type) {
switch (frame_type) {
case SpdyFrameType::DATA:
return "DATA";
case SpdyFrameType::RST_STREAM:
return "RST_STREAM";
case SpdyFrameType::SETTINGS:
return "SETTINGS";
case SpdyFrameType::PING:
return "PING";
case SpdyFrameType::GOAWAY:
return "GOAWAY";
case SpdyFrameType::HEADERS:
return "HEADERS";
case SpdyFrameType::WINDOW_UPDATE:
return "WINDOW_UPDATE";
case SpdyFrameType::PUSH_PROMISE:
return "PUSH_PROMISE";
case SpdyFrameType::CONTINUATION:
return "CONTINUATION";
case SpdyFrameType::PRIORITY:
return "PRIORITY";
case SpdyFrameType::ALTSVC:
return "ALTSVC";
case SpdyFrameType::PRIORITY_UPDATE:
return "PRIORITY_UPDATE";
case SpdyFrameType::ACCEPT_CH:
return "ACCEPT_CH";
}
return "UNKNOWN_FRAME_TYPE";
}
bool ParseSettingsId(SpdySettingsId wire_setting_id,
SpdyKnownSettingsId* setting_id) {
if (wire_setting_id != SETTINGS_EXPERIMENT_SCHEDULER &&
(wire_setting_id < SETTINGS_MIN || wire_setting_id > SETTINGS_MAX)) {
return false;
}
*setting_id = static_cast<SpdyKnownSettingsId>(wire_setting_id);
switch (*setting_id) {
case SETTINGS_HEADER_TABLE_SIZE:
case SETTINGS_ENABLE_PUSH:
case SETTINGS_MAX_CONCURRENT_STREAMS:
case SETTINGS_INITIAL_WINDOW_SIZE:
case SETTINGS_MAX_FRAME_SIZE:
case SETTINGS_MAX_HEADER_LIST_SIZE:
case SETTINGS_ENABLE_CONNECT_PROTOCOL:
case SETTINGS_DEPRECATE_HTTP2_PRIORITIES:
case SETTINGS_EXPERIMENT_SCHEDULER:
return true;
}
return false;
}
std::string SettingsIdToString(SpdySettingsId id) {
SpdyKnownSettingsId known_id;
if (!ParseSettingsId(id, &known_id)) {
return absl::StrCat("SETTINGS_UNKNOWN_", absl::Hex(uint32_t{id}));
}
switch (known_id) {
case SETTINGS_HEADER_TABLE_SIZE:
return "SETTINGS_HEADER_TABLE_SIZE";
case SETTINGS_ENABLE_PUSH:
return "SETTINGS_ENABLE_PUSH";
case SETTINGS_MAX_CONCURRENT_STREAMS:
return "SETTINGS_MAX_CONCURRENT_STREAMS";
case SETTINGS_INITIAL_WINDOW_SIZE:
return "SETTINGS_INITIAL_WINDOW_SIZE";
case SETTINGS_MAX_FRAME_SIZE:
return "SETTINGS_MAX_FRAME_SIZE";
case SETTINGS_MAX_HEADER_LIST_SIZE:
return "SETTINGS_MAX_HEADER_LIST_SIZE";
case SETTINGS_ENABLE_CONNECT_PROTOCOL:
return "SETTINGS_ENABLE_CONNECT_PROTOCOL";
case SETTINGS_DEPRECATE_HTTP2_PRIORITIES:
return "SETTINGS_DEPRECATE_HTTP2_PRIORITIES";
case SETTINGS_EXPERIMENT_SCHEDULER:
return "SETTINGS_EXPERIMENT_SCHEDULER";
}
return absl::StrCat("SETTINGS_UNKNOWN_", absl::Hex(uint32_t{id}));
}
SpdyErrorCode ParseErrorCode(uint32_t wire_error_code) {
if (wire_error_code > ERROR_CODE_MAX) {
return ERROR_CODE_INTERNAL_ERROR;
}
return static_cast<SpdyErrorCode>(wire_error_code);
}
const char* ErrorCodeToString(SpdyErrorCode error_code) {
switch (error_code) {
case ERROR_CODE_NO_ERROR:
return "NO_ERROR";
case ERROR_CODE_PROTOCOL_ERROR:
return "PROTOCOL_ERROR";
case ERROR_CODE_INTERNAL_ERROR:
return "INTERNAL_ERROR";
case ERROR_CODE_FLOW_CONTROL_ERROR:
return "FLOW_CONTROL_ERROR";
case ERROR_CODE_SETTINGS_TIMEOUT:
return "SETTINGS_TIMEOUT";
case ERROR_CODE_STREAM_CLOSED:
return "STREAM_CLOSED";
case ERROR_CODE_FRAME_SIZE_ERROR:
return "FRAME_SIZE_ERROR";
case ERROR_CODE_REFUSED_STREAM:
return "REFUSED_STREAM";
case ERROR_CODE_CANCEL:
return "CANCEL";
case ERROR_CODE_COMPRESSION_ERROR:
return "COMPRESSION_ERROR";
case ERROR_CODE_CONNECT_ERROR:
return "CONNECT_ERROR";
case ERROR_CODE_ENHANCE_YOUR_CALM:
return "ENHANCE_YOUR_CALM";
case ERROR_CODE_INADEQUATE_SECURITY:
return "INADEQUATE_SECURITY";
case ERROR_CODE_HTTP_1_1_REQUIRED:
return "HTTP_1_1_REQUIRED";
}
return "UNKNOWN_ERROR_CODE";
}
const char* WriteSchedulerTypeToString(WriteSchedulerType type) {
switch (type) {
case WriteSchedulerType::LIFO:
return "LIFO";
case WriteSchedulerType::SPDY:
return "SPDY";
case WriteSchedulerType::HTTP2:
return "HTTP2";
case WriteSchedulerType::FIFO:
return "FIFO";
}
return "UNKNOWN";
}
size_t GetNumberRequiredContinuationFrames(size_t size) {
QUICHE_DCHECK_GT(size, kHttp2MaxControlFrameSendSize);
size_t overflow = size - kHttp2MaxControlFrameSendSize;
int payload_size =
kHttp2MaxControlFrameSendSize - kContinuationFrameMinimumSize;
return (overflow - 1) / payload_size + 1;
}
const char* const kHttp2Npn = "h2";
const char* const kHttp2AuthorityHeader = ":authority";
const char* const kHttp2MethodHeader = ":method";
const char* const kHttp2PathHeader = ":path";
const char* const kHttp2SchemeHeader = ":scheme";
const char* const kHttp2ProtocolHeader = ":protocol";
const char* const kHttp2StatusHeader = ":status";
bool SpdyFrameIR::fin() const { return false; }
int SpdyFrameIR::flow_control_window_consumed() const { return 0; }
bool SpdyFrameWithFinIR::fin() const { return fin_; }
SpdyFrameWithHeaderBlockIR::SpdyFrameWithHeaderBlockIR(
SpdyStreamId stream_id, quiche::HttpHeaderBlock header_block)
: SpdyFrameWithFinIR(stream_id), header_block_(std::move(header_block)) {}
SpdyFrameWithHeaderBlockIR::~SpdyFrameWithHeaderBlockIR() = default;
SpdyDataIR::SpdyDataIR(SpdyStreamId stream_id, absl::string_view data)
: SpdyFrameWithFinIR(stream_id),
data_(nullptr),
data_len_(0),
padded_(false),
padding_payload_len_(0) {
SetDataDeep(data);
}
SpdyDataIR::SpdyDataIR(SpdyStreamId stream_id, const char* data)
: SpdyDataIR(stream_id, absl::string_view(data)) {}
SpdyDataIR::SpdyDataIR(SpdyStreamId stream_id, std::string data)
: SpdyFrameWithFinIR(stream_id),
data_store_(std::make_unique<std::string>(std::move(data))),
data_(data_store_->data()),
data_len_(data_store_->size()),
padded_(false),
padding_payload_len_(0) {}
SpdyDataIR::SpdyDataIR(SpdyStreamId stream_id)
: SpdyFrameWithFinIR(stream_id),
data_(nullptr),
data_len_(0),
padded_(false),
padding_payload_len_(0) {}
SpdyDataIR::~SpdyDataIR() = default;
void SpdyDataIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitData(*this);
}
SpdyFrameType SpdyDataIR::frame_type() const { return SpdyFrameType::DATA; }
int SpdyDataIR::flow_control_window_consumed() const {
return padded_ ? 1 + padding_payload_len_ + data_len_ : data_len_;
}
size_t SpdyDataIR::size() const {
return kFrameHeaderSize +
(padded() ? 1 + padding_payload_len() + data_len() : data_len());
}
SpdyRstStreamIR::SpdyRstStreamIR(SpdyStreamId stream_id,
SpdyErrorCode error_code)
: SpdyFrameIR(stream_id) {
set_error_code(error_code);
}
SpdyRstStreamIR::~SpdyRstStreamIR() = default;
void SpdyRstStreamIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitRstStream(*this);
}
SpdyFrameType SpdyRstStreamIR::frame_type() const {
return SpdyFrameType::RST_STREAM;
}
size_t SpdyRstStreamIR::size() const { return kRstStreamFrameSize; }
SpdySettingsIR::SpdySettingsIR() : is_ack_(false) {}
SpdySettingsIR::~SpdySettingsIR() = default;
void SpdySettingsIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitSettings(*this);
}
SpdyFrameType SpdySettingsIR::frame_type() const {
return SpdyFrameType::SETTINGS;
}
size_t SpdySettingsIR::size() const {
return kFrameHeaderSize + values_.size() * kSettingsOneSettingSize;
}
void SpdyPingIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitPing(*this);
}
SpdyFrameType SpdyPingIR::frame_type() const { return SpdyFrameType::PING; }
size_t SpdyPingIR::size() const { return kPingFrameSize; }
SpdyGoAwayIR::SpdyGoAwayIR(SpdyStreamId last_good_stream_id,
SpdyErrorCode error_code,
absl::string_view description)
: description_(description) {
set_last_good_stream_id(last_good_stream_id);
set_error_code(error_code);
}
SpdyGoAwayIR::SpdyGoAwayIR(SpdyStreamId last_good_stream_id,
SpdyErrorCode error_code, const char* description)
: SpdyGoAwayIR(last_good_stream_id, error_code,
absl::string_view(description)) {}
SpdyGoAwayIR::SpdyGoAwayIR(SpdyStreamId last_good_stream_id,
SpdyErrorCode error_code, std::string description)
: description_store_(std::move(description)),
description_(description_store_) {
set_last_good_stream_id(last_good_stream_id);
set_error_code(error_code);
}
SpdyGoAwayIR::~SpdyGoAwayIR() = default;
void SpdyGoAwayIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitGoAway(*this);
}
SpdyFrameType SpdyGoAwayIR::frame_type() const { return SpdyFrameType::GOAWAY; }
size_t SpdyGoAwayIR::size() const {
return kGoawayFrameMinimumSize + description_.size();
}
SpdyContinuationIR::SpdyContinuationIR(SpdyStreamId stream_id)
: SpdyFrameIR(stream_id), end_headers_(false) {}
SpdyContinuationIR::~SpdyContinuationIR() = default;
void SpdyContinuationIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitContinuation(*this);
}
SpdyFrameType SpdyContinuationIR::frame_type() const {
return SpdyFrameType::CONTINUATION;
}
size_t SpdyContinuationIR::size() const {
QUICHE_DLOG(WARNING) << "Shouldn't not call size() for CONTINUATION frame.";
return 0;
}
void SpdyHeadersIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitHeaders(*this);
}
SpdyFrameType SpdyHeadersIR::frame_type() const {
return SpdyFrameType::HEADERS;
}
size_t SpdyHeadersIR::size() const {
size_t size = kHeadersFrameMinimumSize;
if (padded_) {
size += 1;
size += padding_payload_len_;
}
if (has_priority_) {
size += 5;
}
size += header_block().TotalBytesUsed() +
header_block().size() * kPerHeaderHpackOverhead;
if (size > kHttp2MaxControlFrameSendSize) {
size += GetNumberRequiredContinuationFrames(size) *
kContinuationFrameMinimumSize;
}
return size;
}
void SpdyWindowUpdateIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitWindowUpdate(*this);
}
SpdyFrameType SpdyWindowUpdateIR::frame_type() const {
return SpdyFrameType::WINDOW_UPDATE;
}
size_t SpdyWindowUpdateIR::size() const { return kWindowUpdateFrameSize; }
void SpdyPushPromiseIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitPushPromise(*this);
}
SpdyFrameType SpdyPushPromiseIR::frame_type() const {
return SpdyFrameType::PUSH_PROMISE;
}
size_t SpdyPushPromiseIR::size() const {
size_t size = kPushPromiseFrameMinimumSize;
if (padded_) {
size += 1;
size += padding_payload_len_;
}
size += header_block().TotalBytesUsed();
if (size > kHttp2MaxControlFrameSendSize) {
size += GetNumberRequiredContinuationFrames(size) *
kContinuationFrameMinimumSize;
}
return size;
}
SpdyAltSvcIR::SpdyAltSvcIR(SpdyStreamId stream_id) : SpdyFrameIR(stream_id) {}
SpdyAltSvcIR::~SpdyAltSvcIR() = default;
void SpdyAltSvcIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitAltSvc(*this);
}
SpdyFrameType SpdyAltSvcIR::frame_type() const { return SpdyFrameType::ALTSVC; }
size_t SpdyAltSvcIR::size() const {
size_t size = kGetAltSvcFrameMinimumSize;
size += origin_.length();
std::string str =
SpdyAltSvcWireFormat::SerializeHeaderFieldValue(altsvc_vector_);
size += str.size();
return size;
}
void SpdyPriorityIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitPriority(*this);
}
SpdyFrameType SpdyPriorityIR::frame_type() const {
return SpdyFrameType::PRIORITY;
}
size_t SpdyPriorityIR::size() const { return kPriorityFrameSize; }
void SpdyPriorityUpdateIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitPriorityUpdate(*this);
}
SpdyFrameType SpdyPriorityUpdateIR::frame_type() const {
return SpdyFrameType::PRIORITY_UPDATE;
}
size_t SpdyPriorityUpdateIR::size() const {
return kPriorityUpdateFrameMinimumSize + priority_field_value_.size();
}
void SpdyAcceptChIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitAcceptCh(*this);
}
SpdyFrameType SpdyAcceptChIR::frame_type() const {
return SpdyFrameType::ACCEPT_CH;
}
size_t SpdyAcceptChIR::size() const {
size_t total_size = kAcceptChFrameMinimumSize;
for (const AcceptChOriginValuePair& entry : entries_) {
total_size += entry.origin.size() + entry.value.size() +
kAcceptChFramePerEntryOverhead;
}
return total_size;
}
void SpdyUnknownIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitUnknown(*this);
}
SpdyFrameType SpdyUnknownIR::frame_type() const {
return static_cast<SpdyFrameType>(type());
}
size_t SpdyUnknownIR::size() const {
return kFrameHeaderSize + payload_.size();
}
int SpdyUnknownIR::flow_control_window_consumed() const {
if (frame_type() == SpdyFrameType::DATA) {
return payload_.size();
} else {
return 0;
}
}
const size_t kPadLengthFieldSize = 1;
size_t GetHeaderFrameSizeSansBlock(const SpdyHeadersIR& header_ir) {
size_t min_size = kFrameHeaderSize;
if (header_ir.padded()) {
min_size += kPadLengthFieldSize;
min_size += header_ir.padding_payload_len();
}
if (header_ir.has_priority()) {
min_size += 5;
}
return min_size;
}
size_t GetPushPromiseFrameSizeSansBlock(
const SpdyPushPromiseIR& push_promise_ir) {
size_t min_size = kPushPromiseFrameMinimumSize;
if (push_promise_ir.padded()) {
min_size += kPadLengthFieldSize;
min_size += push_promise_ir.padding_payload_len();
}
return min_size;
}
} | #include "quiche/http2/core/spdy_protocol.h"
#include <iostream>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "quiche/common/platform/api/quiche_expect_bug.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace spdy {
std::ostream& operator<<(std::ostream& os,
const SpdyStreamPrecedence precedence) {
if (precedence.is_spdy3_priority()) {
os << "SpdyStreamPrecedence[spdy3_priority=" << precedence.spdy3_priority()
<< "]";
} else {
os << "SpdyStreamPrecedence[parent_id=" << precedence.parent_id()
<< ", weight=" << precedence.weight()
<< ", is_exclusive=" << precedence.is_exclusive() << "]";
}
return os;
}
namespace test {
TEST(SpdyProtocolTest, ClampSpdy3Priority) {
EXPECT_QUICHE_BUG(EXPECT_EQ(7, ClampSpdy3Priority(8)), "Invalid priority: 8");
EXPECT_EQ(kV3LowestPriority, ClampSpdy3Priority(kV3LowestPriority));
EXPECT_EQ(kV3HighestPriority, ClampSpdy3Priority(kV3HighestPriority));
}
TEST(SpdyProtocolTest, ClampHttp2Weight) {
EXPECT_QUICHE_BUG(EXPECT_EQ(kHttp2MinStreamWeight, ClampHttp2Weight(0)),
"Invalid weight: 0");
EXPECT_QUICHE_BUG(EXPECT_EQ(kHttp2MaxStreamWeight, ClampHttp2Weight(300)),
"Invalid weight: 300");
EXPECT_EQ(kHttp2MinStreamWeight, ClampHttp2Weight(kHttp2MinStreamWeight));
EXPECT_EQ(kHttp2MaxStreamWeight, ClampHttp2Weight(kHttp2MaxStreamWeight));
}
TEST(SpdyProtocolTest, Spdy3PriorityToHttp2Weight) {
EXPECT_EQ(256, Spdy3PriorityToHttp2Weight(0));
EXPECT_EQ(220, Spdy3PriorityToHttp2Weight(1));
EXPECT_EQ(183, Spdy3PriorityToHttp2Weight(2));
EXPECT_EQ(147, Spdy3PriorityToHttp2Weight(3));
EXPECT_EQ(110, Spdy3PriorityToHttp2Weight(4));
EXPECT_EQ(74, Spdy3PriorityToHttp2Weight(5));
EXPECT_EQ(37, Spdy3PriorityToHttp2Weight(6));
EXPECT_EQ(1, Spdy3PriorityToHttp2Weight(7));
}
TEST(SpdyProtocolTest, Http2WeightToSpdy3Priority) {
EXPECT_EQ(0u, Http2WeightToSpdy3Priority(256));
EXPECT_EQ(0u, Http2WeightToSpdy3Priority(221));
EXPECT_EQ(1u, Http2WeightToSpdy3Priority(220));
EXPECT_EQ(1u, Http2WeightToSpdy3Priority(184));
EXPECT_EQ(2u, Http2WeightToSpdy3Priority(183));
EXPECT_EQ(2u, Http2WeightToSpdy3Priority(148));
EXPECT_EQ(3u, Http2WeightToSpdy3Priority(147));
EXPECT_EQ(3u, Http2WeightToSpdy3Priority(111));
EXPECT_EQ(4u, Http2WeightToSpdy3Priority(110));
EXPECT_EQ(4u, Http2WeightToSpdy3Priority(75));
EXPECT_EQ(5u, Http2WeightToSpdy3Priority(74));
EXPECT_EQ(5u, Http2WeightToSpdy3Priority(38));
EXPECT_EQ(6u, Http2WeightToSpdy3Priority(37));
EXPECT_EQ(6u, Http2WeightToSpdy3Priority(2));
EXPECT_EQ(7u, Http2WeightToSpdy3Priority(1));
}
TEST(SpdyProtocolTest, IsValidHTTP2FrameStreamId) {
EXPECT_TRUE(IsValidHTTP2FrameStreamId(1, SpdyFrameType::DATA));
EXPECT_FALSE(IsValidHTTP2FrameStreamId(0, SpdyFrameType::DATA));
EXPECT_TRUE(IsValidHTTP2FrameStreamId(1, SpdyFrameType::HEADERS));
EXPECT_FALSE(IsValidHTTP2FrameStreamId(0, SpdyFrameType::HEADERS));
EXPECT_TRUE(IsValidHTTP2FrameStreamId(1, SpdyFrameType::PRIORITY));
EXPECT_FALSE(IsValidHTTP2FrameStreamId(0, SpdyFrameType::PRIORITY));
EXPECT_TRUE(IsValidHTTP2FrameStreamId(1, SpdyFrameType::RST_STREAM));
EXPECT_FALSE(IsValidHTTP2FrameStreamId(0, SpdyFrameType::RST_STREAM));
EXPECT_TRUE(IsValidHTTP2FrameStreamId(1, SpdyFrameType::CONTINUATION));
EXPECT_FALSE(IsValidHTTP2FrameStreamId(0, SpdyFrameType::CONTINUATION));
EXPECT_TRUE(IsValidHTTP2FrameStreamId(1, SpdyFrameType::PUSH_PROMISE));
EXPECT_FALSE(IsValidHTTP2FrameStreamId(0, SpdyFrameType::PUSH_PROMISE));
EXPECT_FALSE(IsValidHTTP2FrameStreamId(1, SpdyFrameType::GOAWAY));
EXPECT_TRUE(IsValidHTTP2FrameStreamId(0, SpdyFrameType::GOAWAY));
EXPECT_FALSE(IsValidHTTP2FrameStreamId(1, SpdyFrameType::SETTINGS));
EXPECT_TRUE(IsValidHTTP2FrameStreamId(0, SpdyFrameType::SETTINGS));
EXPECT_FALSE(IsValidHTTP2FrameStreamId(1, SpdyFrameType::PING));
EXPECT_TRUE(IsValidHTTP2FrameStreamId(0, SpdyFrameType::PING));
EXPECT_TRUE(IsValidHTTP2FrameStreamId(1, SpdyFrameType::WINDOW_UPDATE));
EXPECT_TRUE(IsValidHTTP2FrameStreamId(0, SpdyFrameType::WINDOW_UPDATE));
}
TEST(SpdyProtocolTest, ParseSettingsId) {
SpdyKnownSettingsId setting_id;
EXPECT_FALSE(ParseSettingsId(0, &setting_id));
EXPECT_TRUE(ParseSettingsId(1, &setting_id));
EXPECT_EQ(SETTINGS_HEADER_TABLE_SIZE, setting_id);
EXPECT_TRUE(ParseSettingsId(2, &setting_id));
EXPECT_EQ(SETTINGS_ENABLE_PUSH, setting_id);
EXPECT_TRUE(ParseSettingsId(3, &setting_id));
EXPECT_EQ(SETTINGS_MAX_CONCURRENT_STREAMS, setting_id);
EXPECT_TRUE(ParseSettingsId(4, &setting_id));
EXPECT_EQ(SETTINGS_INITIAL_WINDOW_SIZE, setting_id);
EXPECT_TRUE(ParseSettingsId(5, &setting_id));
EXPECT_EQ(SETTINGS_MAX_FRAME_SIZE, setting_id);
EXPECT_TRUE(ParseSettingsId(6, &setting_id));
EXPECT_EQ(SETTINGS_MAX_HEADER_LIST_SIZE, setting_id);
EXPECT_FALSE(ParseSettingsId(7, &setting_id));
EXPECT_TRUE(ParseSettingsId(8, &setting_id));
EXPECT_EQ(SETTINGS_ENABLE_CONNECT_PROTOCOL, setting_id);
EXPECT_TRUE(ParseSettingsId(9, &setting_id));
EXPECT_EQ(SETTINGS_DEPRECATE_HTTP2_PRIORITIES, setting_id);
EXPECT_FALSE(ParseSettingsId(10, &setting_id));
EXPECT_FALSE(ParseSettingsId(0xFF44, &setting_id));
EXPECT_TRUE(ParseSettingsId(0xFF45, &setting_id));
EXPECT_EQ(SETTINGS_EXPERIMENT_SCHEDULER, setting_id);
EXPECT_FALSE(ParseSettingsId(0xFF46, &setting_id));
}
TEST(SpdyProtocolTest, SettingsIdToString) {
struct {
SpdySettingsId setting_id;
const std::string expected_string;
} test_cases[] = {
{0, "SETTINGS_UNKNOWN_0"},
{SETTINGS_HEADER_TABLE_SIZE, "SETTINGS_HEADER_TABLE_SIZE"},
{SETTINGS_ENABLE_PUSH, "SETTINGS_ENABLE_PUSH"},
{SETTINGS_MAX_CONCURRENT_STREAMS, "SETTINGS_MAX_CONCURRENT_STREAMS"},
{SETTINGS_INITIAL_WINDOW_SIZE, "SETTINGS_INITIAL_WINDOW_SIZE"},
{SETTINGS_MAX_FRAME_SIZE, "SETTINGS_MAX_FRAME_SIZE"},
{SETTINGS_MAX_HEADER_LIST_SIZE, "SETTINGS_MAX_HEADER_LIST_SIZE"},
{7, "SETTINGS_UNKNOWN_7"},
{SETTINGS_ENABLE_CONNECT_PROTOCOL, "SETTINGS_ENABLE_CONNECT_PROTOCOL"},
{SETTINGS_DEPRECATE_HTTP2_PRIORITIES,
"SETTINGS_DEPRECATE_HTTP2_PRIORITIES"},
{0xa, "SETTINGS_UNKNOWN_a"},
{0xFF44, "SETTINGS_UNKNOWN_ff44"},
{0xFF45, "SETTINGS_EXPERIMENT_SCHEDULER"},
{0xFF46, "SETTINGS_UNKNOWN_ff46"}};
for (auto test_case : test_cases) {
EXPECT_EQ(test_case.expected_string,
SettingsIdToString(test_case.setting_id));
}
}
TEST(SpdyStreamPrecedenceTest, Basic) {
SpdyStreamPrecedence spdy3_prec(2);
EXPECT_TRUE(spdy3_prec.is_spdy3_priority());
EXPECT_EQ(2, spdy3_prec.spdy3_priority());
EXPECT_EQ(kHttp2RootStreamId, spdy3_prec.parent_id());
EXPECT_EQ(Spdy3PriorityToHttp2Weight(2), spdy3_prec.weight());
EXPECT_FALSE(spdy3_prec.is_exclusive());
for (bool is_exclusive : {true, false}) {
SpdyStreamPrecedence h2_prec(7, 123, is_exclusive);
EXPECT_FALSE(h2_prec.is_spdy3_priority());
EXPECT_EQ(Http2WeightToSpdy3Priority(123), h2_prec.spdy3_priority());
EXPECT_EQ(7u, h2_prec.parent_id());
EXPECT_EQ(123, h2_prec.weight());
EXPECT_EQ(is_exclusive, h2_prec.is_exclusive());
}
}
TEST(SpdyStreamPrecedenceTest, Clamping) {
EXPECT_QUICHE_BUG(EXPECT_EQ(7, SpdyStreamPrecedence(8).spdy3_priority()),
"Invalid priority: 8");
EXPECT_QUICHE_BUG(EXPECT_EQ(kHttp2MinStreamWeight,
SpdyStreamPrecedence(3, 0, false).weight()),
"Invalid weight: 0");
EXPECT_QUICHE_BUG(EXPECT_EQ(kHttp2MaxStreamWeight,
SpdyStreamPrecedence(3, 300, false).weight()),
"Invalid weight: 300");
}
TEST(SpdyStreamPrecedenceTest, Copying) {
SpdyStreamPrecedence prec1(3);
SpdyStreamPrecedence copy1(prec1);
EXPECT_TRUE(copy1.is_spdy3_priority());
EXPECT_EQ(3, copy1.spdy3_priority());
SpdyStreamPrecedence prec2(4, 5, true);
SpdyStreamPrecedence copy2(prec2);
EXPECT_FALSE(copy2.is_spdy3_priority());
EXPECT_EQ(4u, copy2.parent_id());
EXPECT_EQ(5, copy2.weight());
EXPECT_TRUE(copy2.is_exclusive());
copy1 = prec2;
EXPECT_FALSE(copy1.is_spdy3_priority());
EXPECT_EQ(4u, copy1.parent_id());
EXPECT_EQ(5, copy1.weight());
EXPECT_TRUE(copy1.is_exclusive());
copy2 = prec1;
EXPECT_TRUE(copy2.is_spdy3_priority());
EXPECT_EQ(3, copy2.spdy3_priority());
}
TEST(SpdyStreamPrecedenceTest, Equals) {
EXPECT_EQ(SpdyStreamPrecedence(3), SpdyStreamPrecedence(3));
EXPECT_NE(SpdyStreamPrecedence(3), SpdyStreamPrecedence(4));
EXPECT_EQ(SpdyStreamPrecedence(1, 2, false),
SpdyStreamPrecedence(1, 2, false));
EXPECT_NE(SpdyStreamPrecedence(1, 2, false),
SpdyStreamPrecedence(2, 2, false));
EXPECT_NE(SpdyStreamPrecedence(1, 2, false),
SpdyStreamPrecedence(1, 3, false));
EXPECT_NE(SpdyStreamPrecedence(1, 2, false),
SpdyStreamPrecedence(1, 2, true));
SpdyStreamPrecedence spdy3_prec(3);
SpdyStreamPrecedence h2_prec(spdy3_prec.parent_id(), spdy3_prec.weight(),
spdy3_prec.is_exclusive());
EXPECT_NE(spdy3_prec, h2_prec);
}
TEST(SpdyDataIRTest, Construct) {
absl::string_view s1;
SpdyDataIR d1( 1, s1);
EXPECT_EQ(0u, d1.data_len());
EXPECT_NE(nullptr, d1.data());
const char s2[] = "something";
SpdyDataIR d2( 2, s2);
EXPECT_EQ(absl::string_view(d2.data(), d2.data_len()), s2);
EXPECT_NE(absl::string_view(d1.data(), d1.data_len()), s2);
EXPECT_EQ((int)d1.data_len(), d1.flow_control_window_consumed());
const std::string foo = "foo";
SpdyDataIR d3( 3, foo);
EXPECT_EQ(foo, d3.data());
EXPECT_EQ((int)d3.data_len(), d3.flow_control_window_consumed());
std::string bar = "bar";
SpdyDataIR d4( 4, bar);
EXPECT_EQ("bar", bar);
EXPECT_EQ("bar", absl::string_view(d4.data(), d4.data_len()));
std::string baz = "the quick brown fox";
SpdyDataIR d5( 5, std::move(baz));
EXPECT_EQ("", baz);
EXPECT_EQ(absl::string_view(d5.data(), d5.data_len()), "the quick brown fox");
SpdyDataIR d7( 7, "something else");
EXPECT_EQ(absl::string_view(d7.data(), d7.data_len()), "something else");
SpdyDataIR d8( 8, "shawarma");
d8.set_padding_len(20);
EXPECT_EQ(28, d8.flow_control_window_consumed());
}
TEST(SpdySerializedFrameTest, Basic) {
const std::string data = "0123456789";
auto buffer = std::make_unique<char[]>(data.length());
memcpy(buffer.get(), &data[0], data.length());
SpdySerializedFrame frame(std::move(buffer), data.length());
EXPECT_EQ(data.length(), frame.size());
EXPECT_EQ(data, std::string(frame.data(), frame.size()));
EXPECT_EQ(frame.begin(), frame.data());
EXPECT_EQ(frame.end(), frame.data() + frame.size());
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/core/spdy_protocol.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/core/spdy_protocol_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
4b6aa12d-4153-40bd-b957-b2c2f6d2f921 | cpp | tensorflow/tensorflow | tf2hlo | tensorflow/compiler/mlir/tfrt/transforms/ifrt/tf2hlo.cc | tensorflow/compiler/mlir/tfrt/transforms/ifrt/tf2hlo_test.cc | #include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/tf2hlo.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Pass/PassManager.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v2/legalize_tf.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_constants.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h"
#include "tensorflow/compiler/tf2xla/layout_util.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/client_library.h"
#include "xla/hlo/translate/hlo_to_mhlo/hlo_to_mlir_hlo.h"
#include "xla/python/ifrt/client.h"
#include "xla/service/computation_placer.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/shape.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
#include "tensorflow/core/protobuf/tpu/topology.pb.h"
#include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
static constexpr absl::string_view kEntryFuncName = "main";
}
absl::Status UpdateCompileMetadata(
tensorflow::tpu::TPUCompileMetadataProto& metadata,
absl::Span<const DtypeAndShape> inputs) {
VLOG(3) << "TpuCompileMetadata before shape is populated " << metadata;
if (metadata.num_replicas() < 1 || metadata.num_cores_per_replica() < 1) {
return absl::InternalError(
absl::StrCat("Number of replicas ", metadata.num_replicas(),
" and number of cores per replica ",
metadata.num_cores_per_replica(), " must be >= 1"));
}
if (metadata.args_size() != inputs.size()) {
return absl::InternalError(
absl::StrCat("Number of inputs mismatched! Expected ",
metadata.args_size(), " got ", inputs.size()));
}
for (int i = 0; i < metadata.args_size(); ++i) {
if (metadata.args(i).kind() !=
tensorflow::tpu::TPUCompileMetadataProto::Arg::PARAMETER) {
return absl::InternalError(absl::StrCat(
"Only support PARAMETER, but got ", metadata.args(i).kind()));
}
if (metadata.args(i).dtype() != inputs[i].dtype) {
return absl::InternalError(absl::StrCat("Dtype mismatched! Expected ",
metadata.args(i).dtype(), " got ",
inputs[i].dtype));
}
*metadata.mutable_args(i)->mutable_shape() = inputs[i].shape.AsProto();
}
return absl::OkStatus();
}
absl::StatusOr<tensorflow::tpu::TPUCompileMetadataProto> GetCompileMetadata(
mlir::ModuleOp module, const xla::ifrt::Client& ifrt_client) {
tensorflow::tpu::TPUCompileMetadataProto metadata;
auto op = module.lookupSymbol<mlir::func::FuncOp>(kEntryFuncName);
if (!op) {
return absl::InternalError("Could not find entry function in MLIR Module.");
}
auto metadata_text_attr =
op->getAttrOfType<mlir::StringAttr>(kMetadataTextAttrName);
if (metadata_text_attr && !metadata_text_attr.getValue().empty()) {
VLOG(1) << "Parsing from attribute " << kMetadataTextAttrName
<< metadata_text_attr.getValue().str();
if (!tsl::protobuf::TextFormat::ParseFromString(
metadata_text_attr.getValue().str(), &metadata)) {
return absl::InvalidArgumentError(absl::StrCat(
"Attribute ", kMetadataTextAttrName, ":",
metadata_text_attr.getValue().str(), " cannot be parsed"));
}
} else {
return absl::InvalidArgumentError(
absl::StrCat("Missing ", kMetadataTextAttrName));
}
if (!metadata.has_device_assignment()) {
TF_ASSIGN_OR_RETURN(
auto device_assignment,
ifrt_client.GetDefaultDeviceAssignment(
metadata.num_replicas(), metadata.num_cores_per_replica()));
xla::DeviceAssignmentProto device_assignment_proto;
device_assignment.Serialize(&device_assignment_proto);
*metadata.mutable_device_assignment() = device_assignment_proto;
}
return metadata;
}
absl::StatusOr<Tf2HloResult> CompileTfToHlo(
mlir::ModuleOp module, absl::Span<const DtypeAndShape> inputs,
absl::string_view entry_function_name, const xla::ifrt::Client& ifrt_client,
const tensorflow::tpu::TPUCompileMetadataProto& compile_metadata,
tensorflow::XlaHelpers::ShapeRepresentationFn shape_representation_fn) {
if (VLOG_IS_ON(1)) {
tensorflow::DumpMlirOpToFile("ifrt_before_bridge_phase2", module);
}
tpu::MlirToHloArgs mlir_to_hlo_args;
std::string module_str = tensorflow::SerializeMlirModule(module);
mlir_to_hlo_args.mlir_module = module_str;
mlir_to_hlo_args.rollout_state =
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_DISABLED;
TF_ASSIGN_OR_RETURN(
auto* platform,
stream_executor::PlatformManager::PlatformWithName("Host"));
TF_ASSIGN_OR_RETURN(
auto* client, xla::ClientLibrary::GetOrCreateCompileOnlyClient(platform));
std::vector<TensorShape> arg_shapes;
for (const auto& input : inputs) {
arg_shapes.push_back(input.shape);
}
bool use_tuple_args = false;
std::vector<tpu::ShardingAndIndex> arg_core_mapping;
std::vector<std::vector<xla::Shape>> per_core_arg_shapes;
std::vector<std::unique_ptr<mlir::Pass>> custom_legalization_passes;
TF_ASSIGN_OR_RETURN(
tensorflow::XlaCompiler::CompilationResult compilation_result,
tensorflow::tf2xla::v2::LegalizeMlirToHlo(
mlir_to_hlo_args, compile_metadata, use_tuple_args,
"XLA_TPU_JIT", custom_legalization_passes,
tensorflow::XlaShapeLayoutHelpers::ShapeDeterminationFns(
tensorflow::UseNoPreferenceLayoutFn(), shape_representation_fn),
arg_shapes, &arg_core_mapping, &per_core_arg_shapes, client));
for (auto arg_shapes_iter = per_core_arg_shapes.begin() + 1;
arg_shapes_iter != per_core_arg_shapes.end(); ++arg_shapes_iter) {
if (per_core_arg_shapes.front() != *arg_shapes_iter) {
return absl::UnimplementedError(
"Only support even sharding SPMD, but get "
"different shapes across cores");
}
}
Tf2HloResult result;
result.mlir_hlo_module = xla::llvm_ir::CreateMlirModuleOp(module->getLoc());
result.compile_metadata = std::move(compile_metadata);
result.host_compute_metadata = compilation_result.host_compute_metadata;
TF_RETURN_IF_ERROR(xla::ConvertHloToMlirHlo(
*result.mlir_hlo_module, &compilation_result.computation->proto()));
if (VLOG_IS_ON(1)) {
tensorflow::DumpMlirOpToFile("ifrt_after_bridge_phase2",
result.mlir_hlo_module.get());
}
return result;
}
}
} | #include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/tf2hlo.h"
#include <memory>
#include <ostream>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/AsmState.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/InitAllDialects.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/test_util.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
class ProtoStringMatcher {
public:
explicit ProtoStringMatcher(const tsl::protobuf::Message& expected)
: expected_(expected.SerializeAsString()) {}
template <typename Message>
bool MatchAndExplain(const Message& p,
::testing::MatchResultListener*) const {
return p.SerializeAsString() == expected_;
}
void DescribeTo(::std::ostream* os) const { *os << expected_; }
void DescribeNegationTo(::std::ostream* os) const {
*os << "not equal to expected message: " << expected_;
}
private:
const std::string expected_;
};
inline ::testing::PolymorphicMatcher<ProtoStringMatcher> EqualsProto(
const tsl::protobuf::Message& x) {
return ::testing::MakePolymorphicMatcher(ProtoStringMatcher(x));
}
TEST(Tf2HloTest, Empty) {
constexpr absl::string_view kDataDirectory =
"tensorflow/compiler/mlir/tfrt/transforms/ifrt/testdata";
std::string mlir_module_path = tensorflow::GetDataDependencyFilepath(
absl::StrCat(kDataDirectory, "/tf2hlo_empty.mlir"));
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context);
ASSERT_TRUE(mlir_module);
ASSERT_TRUE(mlir_module.get() != nullptr);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::ifrt::Client> client,
xla::ifrt::test_util::GetClient());
TF_ASSERT_OK_AND_ASSIGN(
tensorflow::tpu::TPUCompileMetadataProto compile_metadata,
GetCompileMetadata(mlir_module.get(), *client));
TF_ASSERT_OK(UpdateCompileMetadata(compile_metadata, {}));
auto result =
CompileTfToHlo(mlir_module.get(), {}, "main", *client, compile_metadata,
tensorflow::IdentityShapeRepresentationFn());
TF_ASSERT_OK(result.status());
}
TEST(Tf2HloTest, Tuple) {
constexpr absl::string_view kDataDirectory =
"tensorflow/compiler/mlir/tfrt/transforms/ifrt/testdata";
std::string mlir_module_path = tensorflow::GetDataDependencyFilepath(
absl::StrCat(kDataDirectory, "/tf2hlo_tuple.mlir"));
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context);
ASSERT_TRUE(mlir_module);
ASSERT_TRUE(mlir_module.get() != nullptr);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::ifrt::Client> client,
xla::ifrt::test_util::GetClient());
std::vector<DtypeAndShape> dtype_and_shapes;
dtype_and_shapes.push_back(DtypeAndShape{DT_FLOAT, {1, 3}});
dtype_and_shapes.push_back(DtypeAndShape{DT_FLOAT, {3, 1}});
TF_ASSERT_OK_AND_ASSIGN(
tensorflow::tpu::TPUCompileMetadataProto compile_metadata,
GetCompileMetadata(mlir_module.get(), *client));
TF_ASSERT_OK(UpdateCompileMetadata(compile_metadata, dtype_and_shapes));
auto result = CompileTfToHlo(mlir_module.get(), dtype_and_shapes, "main",
*client, compile_metadata,
tensorflow::IdentityShapeRepresentationFn());
TF_ASSERT_OK(result.status());
}
TEST(Tf2HloTest, Spmd) {
constexpr absl::string_view kDataDirectory =
"tensorflow/compiler/mlir/tfrt/transforms/ifrt/testdata";
std::string mlir_module_path = tensorflow::GetDataDependencyFilepath(
absl::StrCat(kDataDirectory, "/tf2hlo_spmd_with_device_assignment.mlir"));
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context);
ASSERT_TRUE(mlir_module);
ASSERT_TRUE(mlir_module.get() != nullptr);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::ifrt::Client> client,
xla::ifrt::test_util::GetClient());
std::vector<DtypeAndShape> dtype_and_shapes;
dtype_and_shapes.push_back(DtypeAndShape{DT_FLOAT, {4, 64}});
TF_ASSERT_OK_AND_ASSIGN(
tensorflow::tpu::TPUCompileMetadataProto compile_metadata,
GetCompileMetadata(mlir_module.get(), *client));
TF_ASSERT_OK(UpdateCompileMetadata(compile_metadata, dtype_and_shapes));
auto result = CompileTfToHlo(mlir_module.get(), dtype_and_shapes, "main",
*client, compile_metadata,
tensorflow::IdentityShapeRepresentationFn());
LOG(INFO) << result->compile_metadata;
TF_ASSERT_OK(result.status());
tensorflow::tpu::TPUCompileMetadataProto expected_compile_metadata;
ASSERT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
R"pb(
args {
dtype: DT_FLOAT
shape {
dim { size: 4 }
dim { size: 64 }
}
kind: PARAMETER
sharding {
type: OTHER
tile_assignment_dimensions: 2
tile_assignment_dimensions: 1
tile_assignment_devices: 0
tile_assignment_devices: 1
}
is_bounded_dynamic_dim: false
}
retvals { sharding {} }
num_replicas: 1
num_cores_per_replica: 2
device_assignment {
replica_count: 1
computation_count: 2
computation_devices { replica_device_ids: 0 }
computation_devices { replica_device_ids: 1 }
}
use_spmd_for_xla_partitioning: true
compile_options {}
)pb",
&expected_compile_metadata));
EXPECT_THAT(result->compile_metadata, EqualsProto(expected_compile_metadata));
}
TEST(Tf2HloTest, UsingDefaultDeviceAssignment) {
constexpr absl::string_view kDataDirectory =
"tensorflow/compiler/mlir/tfrt/transforms/ifrt/testdata";
std::string mlir_module_path = tensorflow::GetDataDependencyFilepath(
absl::StrCat(kDataDirectory, "/tf2hlo_spmd_no_device_assignment.mlir"));
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context);
ASSERT_TRUE(mlir_module);
ASSERT_TRUE(mlir_module.get() != nullptr);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::ifrt::Client> client,
xla::ifrt::test_util::GetClient());
std::vector<DtypeAndShape> dtype_and_shapes;
dtype_and_shapes.push_back(DtypeAndShape{DT_FLOAT, {4, 64}});
dtype_and_shapes.push_back(DtypeAndShape{DT_FLOAT, {64, 10}});
dtype_and_shapes.push_back(DtypeAndShape{DT_FLOAT, {1, 4}});
TF_ASSERT_OK_AND_ASSIGN(
tensorflow::tpu::TPUCompileMetadataProto compile_metadata,
GetCompileMetadata(mlir_module.get(), *client));
TF_ASSERT_OK(UpdateCompileMetadata(compile_metadata, dtype_and_shapes));
auto result = CompileTfToHlo(mlir_module.get(), dtype_and_shapes, "main",
*client, compile_metadata,
tensorflow::IdentityShapeRepresentationFn());
LOG(INFO) << result->compile_metadata;
TF_ASSERT_OK(result.status());
tensorflow::tpu::TPUCompileMetadataProto expected_compile_metadata;
ASSERT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
R"pb(
args {
dtype: DT_FLOAT
shape {
dim { size: 4 }
dim { size: 64 }
}
kind: PARAMETER
sharding {
type: OTHER
tile_assignment_dimensions: 2
tile_assignment_dimensions: 1
tile_assignment_devices: 0
tile_assignment_devices: 1
}
is_bounded_dynamic_dim: false
}
args {
dtype: DT_FLOAT
shape {
dim { size: 64 }
dim { size: 10 }
}
kind: PARAMETER
sharding {
type: OTHER
tile_assignment_dimensions: 2
tile_assignment_dimensions: 1
tile_assignment_devices: 0
tile_assignment_devices: 1
}
is_bounded_dynamic_dim: false
}
args {
dtype: DT_FLOAT
shape {
dim { size: 1 }
dim { size: 4 }
}
kind: PARAMETER
is_bounded_dynamic_dim: false
}
retvals { sharding {} }
num_replicas: 1
num_cores_per_replica: 2
device_assignment {
replica_count: 1
computation_count: 2
computation_devices { replica_device_ids: 0 }
computation_devices { replica_device_ids: 1 }
}
use_spmd_for_xla_partitioning: true
compile_options {}
)pb",
&expected_compile_metadata));
EXPECT_THAT(result->compile_metadata, EqualsProto(expected_compile_metadata));
}
TEST(Tf2HloTest, XlaCallHostCallback) {
constexpr absl::string_view kDataDirectory =
"tensorflow/compiler/mlir/tfrt/transforms/ifrt/testdata";
std::string mlir_module_path = tensorflow::GetDataDependencyFilepath(
absl::StrCat(kDataDirectory, "/xla_call_host_callback.mlir"));
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path,
mlir::ParserConfig(&context));
ASSERT_TRUE(mlir_module);
ASSERT_TRUE(mlir_module.get() != nullptr);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::ifrt::Client> client,
xla::ifrt::test_util::GetClient());
std::vector<DtypeAndShape> dtype_and_shapes;
dtype_and_shapes.push_back(DtypeAndShape{DT_INT32, {1}});
dtype_and_shapes.push_back(DtypeAndShape{DT_INT32, {1}});
TF_ASSERT_OK_AND_ASSIGN(
tensorflow::tpu::TPUCompileMetadataProto compile_metadata,
GetCompileMetadata(mlir_module.get(), *client));
TF_ASSERT_OK(UpdateCompileMetadata(compile_metadata, dtype_and_shapes));
auto result = CompileTfToHlo(mlir_module.get(), dtype_and_shapes, "main",
*client, compile_metadata,
tensorflow::IdentityShapeRepresentationFn());
TF_ASSERT_OK(result.status());
ASSERT_EQ((*result).host_compute_metadata.device_to_host().size(), 1);
ASSERT_EQ(
(*result).host_compute_metadata.device_to_host().begin()->metadata_size(),
2);
ASSERT_EQ((*result).host_compute_metadata.host_to_device().size(), 0);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfrt/transforms/ifrt/tf2hlo.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfrt/transforms/ifrt/tf2hlo_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
eacfe416-636a-4216-9513-71b894e575ba | cpp | tensorflow/tensorflow | report | tensorflow/compiler/mlir/quantization/stablehlo/cc/report.cc | tensorflow/compiler/mlir/quantization/stablehlo/cc/report_test.cc | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/report.h"
#include <optional>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/quantization/common/lift_as_function_call.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/io.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tsl/platform/protobuf.h"
namespace mlir::quant::stablehlo {
namespace {
using ::stablehlo::quantization::Method;
using ::stablehlo::quantization::QuantizationResult;
using ::stablehlo::quantization::QuantizationResults;
using ::stablehlo::quantization::io::WriteStringToFile;
using ::tsl::protobuf::TextFormat;
std::string GetCompositeFunctionName(const StringRef quantized_func_name) {
return Twine(kCompositeFuncPrefix)
.concat(quantized_func_name.rsplit(kQuantizedFuncPrefix).second)
.str();
}
std::optional<QuantizationResult> GetQuantizationResult(func::CallOp call_op) {
const StringRef callee_name = call_op.getCalleeAttr().getValue();
if (!callee_name.starts_with(kQuantizedFuncPrefix)) {
return std::nullopt;
}
absl::StatusOr<Method> method = GetQuantizationMethod(call_op);
if (!method.ok()) {
call_op->emitError() << "Failed to get quantization method: "
<< method.status().ToString();
return std::nullopt;
}
QuantizationResult result{};
result.mutable_quantizable_unit()->set_name(
GetCompositeFunctionName(callee_name));
*result.mutable_method() = std::move(*method);
return result;
}
std::optional<QuantizationResult> GetQuantizationResult(
TF::XlaCallModuleOp xla_call_module_op) {
const StringAttr callee_name_attr =
mlir::dyn_cast_or_null<StringAttr>(xla_call_module_op->getDiscardableAttr(
kOriginalStablehloEntryFunctionAttrName));
if (callee_name_attr == nullptr) return std::nullopt;
if (callee_name_attr.getValue().starts_with(kCompositeFuncPrefix)) {
QuantizationResult result{};
result.mutable_quantizable_unit()->set_name(
callee_name_attr.getValue().str());
result.mutable_method()->mutable_no_quantization();
return result;
} else {
return std::nullopt;
}
}
void PopulateQuantizedResults(ModuleOp module_op,
QuantizationResults& results) {
module_op.walk([&results](func::CallOp call_op) {
std::optional<QuantizationResult> result = GetQuantizationResult(call_op);
if (result == std::nullopt) return WalkResult::skip();
*results.add_results() = std::move(*result);
return WalkResult::advance();
});
}
void PopulateNonQuantizedResults(ModuleOp module_op,
QuantizationResults& results) {
module_op.walk([&results](TF::XlaCallModuleOp xla_call_module_op) {
std::optional<QuantizationResult> result =
GetQuantizationResult(xla_call_module_op);
if (result == std::nullopt) return WalkResult::skip();
*results.add_results() = std::move(*result);
return WalkResult::advance();
});
}
}
QuantizationReport::QuantizationReport(ModuleOp module_op)
: quantization_results_(CollectResultsFromModuleOp(module_op)) {}
QuantizationResults QuantizationReport::CollectResultsFromModuleOp(
ModuleOp module_op) const {
QuantizationResults results{};
PopulateQuantizedResults(module_op, results);
PopulateNonQuantizedResults(module_op, results);
return results;
}
void QuantizationReport::AddQuantizationResult(QuantizationResult&& result) {
*quantization_results_.add_results() = std::move(result);
}
std::string QuantizationReport::ToString() const {
std::string results_str{};
TextFormat::PrintToString(quantization_results_, &results_str);
return absl::StrCat("===== Quantization Report =====\n\n", results_str,
"\n===== Quantization Report End =====\n\n");
}
void QuantizationReport::Print() const {
llvm::outs() << ToString();
llvm::outs().flush();
}
absl::Status QuantizationReport::Save(const StringRef file_path) const {
std::string results_str{};
TextFormat::PrintToString(GetQuantizationResults(), &results_str);
return WriteStringToFile(file_path, results_str);
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/report.h"
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/io.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status_matchers.h"
namespace mlir::quant::stablehlo {
namespace {
using ::stablehlo::quantization::Method;
using ::stablehlo::quantization::QuantizableUnit;
using ::stablehlo::quantization::QuantizationResult;
using ::stablehlo::quantization::QuantizationResults;
using ::stablehlo::quantization::io::ReadFileToString;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::SizeIs;
using ::testing::StrEq;
using ::testing::TempDir;
using ::tsl::protobuf::TextFormat;
using ::tsl::testing::IsOk;
using QuantizationReportTest = ::mlir::quant::QuantizationTestBase;
TEST_F(QuantizationReportTest, GetQuantizationResultsReturnsEmptyResults) {
QuantizationReport report{};
const QuantizationResults& results = report.GetQuantizationResults();
ASSERT_THAT(results.results(), IsEmpty());
}
TEST_F(QuantizationReportTest, AddQuantizationResult) {
QuantizationResult result{};
QuantizableUnit& quantizable_unit = *result.mutable_quantizable_unit();
quantizable_unit.set_name("quantized_my_function");
Method& method = *result.mutable_method();
method.mutable_no_quantization();
QuantizationReport report{};
report.AddQuantizationResult(std::move(result));
const QuantizationResults& results = report.GetQuantizationResults();
ASSERT_THAT(results.results(), SizeIs(1));
const QuantizationResult& first_result = results.results(0);
EXPECT_THAT(first_result.quantizable_unit().name(),
StrEq("quantized_my_function"));
EXPECT_TRUE(first_result.method().has_no_quantization());
}
TEST_F(QuantizationReportTest, InitializeWithModuleOp) {
constexpr absl::string_view kQuantizedDotGeneral = R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%0 = stablehlo.constant() {value = dense<127> : tensor<2x3xi8>} : () -> tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>
%1 = stablehlo.uniform_quantize %arg0 : (tensor<1x2xf32>) -> tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>
%2 = call @quantized_dot_general_fn(%1, %0) {_quantization_method = "static_range_ptq { }"} : (tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
%3 = stablehlo.uniform_dequantize %2 : (tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>) -> tensor<1x3xf32>
return %3 : tensor<1x3xf32>
}
func.func private @quantized_dot_general_fn(%arg0: tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, %arg1: tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>> {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i32:f32:1, {6.000000e+0,7.000000e+0,8.000000e+0}>>
%1 = stablehlo.uniform_quantize %0 : (tensor<1x3x!quant.uniform<i32:f32:1, {6.000000e+0,7.000000e+0,8.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
return %1 : tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kQuantizedDotGeneral);
ASSERT_TRUE(module_op);
const QuantizationReport report(*module_op);
const QuantizationResults& results = report.GetQuantizationResults();
ASSERT_THAT(results.results(), SizeIs(1));
const QuantizationResult& result = results.results(0);
EXPECT_THAT(result.quantizable_unit().name(),
StrEq("composite_dot_general_fn"));
EXPECT_TRUE(result.method().has_static_range_ptq());
}
TEST_F(QuantizationReportTest,
InitializeWithModuleOpWithoutQuantizationMethodAttribute) {
constexpr absl::string_view
kQuantizedDotGeneralMissingQuantizationMethodAttr = R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%0 = stablehlo.constant() {value = dense<127> : tensor<2x3xi8>} : () -> tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>
%1 = stablehlo.uniform_quantize %arg0 : (tensor<1x2xf32>) -> tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>
%2 = call @quantized_dot_general_fn(%1, %0) : (tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
%3 = stablehlo.uniform_dequantize %2 : (tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>) -> tensor<1x3xf32>
return %3 : tensor<1x3xf32>
}
func.func private @quantized_dot_general_fn(%arg0: tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, %arg1: tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>> {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i32:f32:1, {6.000000e+0,7.000000e+0,8.000000e+0}>>
%1 = stablehlo.uniform_quantize %0 : (tensor<1x3x!quant.uniform<i32:f32:1, {6.000000e+0,7.000000e+0,8.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
return %1 : tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kQuantizedDotGeneralMissingQuantizationMethodAttr);
ASSERT_TRUE(module_op);
const QuantizationReport report(*module_op);
const QuantizationResults& results = report.GetQuantizationResults();
ASSERT_THAT(results.results(), IsEmpty());
}
TEST_F(QuantizationReportTest, InitializeWithModuleOpWithInvalidCalleeName) {
constexpr absl::string_view kQuantizedDotGeneralWithInvalidCalleeName =
R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%0 = stablehlo.constant() {value = dense<127> : tensor<2x3xi8>} : () -> tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>
%1 = stablehlo.uniform_quantize %arg0 : (tensor<1x2xf32>) -> tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>
%2 = call @invalid_quantized_dot_general_fn(%1, %0) {_quantization_method = "static_range_ptq { }"} : (tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
%3 = stablehlo.uniform_dequantize %2 : (tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>) -> tensor<1x3xf32>
return %3 : tensor<1x3xf32>
}
func.func private @invalid_quantized_dot_general_fn(%arg0: tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, %arg1: tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>> {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i32:f32:1, {6.000000e+0,7.000000e+0,8.000000e+0}>>
%1 = stablehlo.uniform_quantize %0 : (tensor<1x3x!quant.uniform<i32:f32:1, {6.000000e+0,7.000000e+0,8.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
return %1 : tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kQuantizedDotGeneralWithInvalidCalleeName);
ASSERT_TRUE(module_op);
const QuantizationReport report(*module_op);
const QuantizationResults& results = report.GetQuantizationResults();
ASSERT_THAT(results.results(), IsEmpty());
}
TEST_F(QuantizationReportTest, InitializeWithModuleOpWithNonQuantizedOp) {
constexpr absl::string_view kNonQuantizedDotGeneral = R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%0 = stablehlo.constant dense<3.000000e+0> : tensor<2x3xf32>
%1 = "tf.XlaCallModule"(%arg0, %0) {Sout = [#tf_type.shape<1x3>], _entry_function = @composite_dot_general_fn, _original_entry_function = "composite_dot_general_fn", _stablehlo_module_attrs = {}, _tfl_quant_trait = "fully_quantizable", device = "", dim_args_spec = [], disabled_checks = [], has_token_input_output = false, module = "", platforms = [], version = 5 : i64} : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
return %1 : tensor<1x3xf32>
}
func.func private @composite_dot_general_fn(%arg0: tensor<1x2xf32>, %arg1: tensor<2x3xf32>) -> tensor<1x3xf32> {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kNonQuantizedDotGeneral);
ASSERT_TRUE(module_op);
const QuantizationReport report(*module_op);
const QuantizationResults& results = report.GetQuantizationResults();
ASSERT_THAT(results.results(), SizeIs(1));
const QuantizationResult& result = results.results(0);
EXPECT_THAT(result.quantizable_unit().name(),
StrEq("composite_dot_general_fn"));
EXPECT_TRUE(result.method().has_no_quantization());
}
TEST_F(QuantizationReportTest,
InitializeWithModuleOpWithQuantizedAndNonQuantizedOps) {
constexpr absl::string_view kQuantizedDotGeneralAndNonQuantizedDotGeneral =
R"mlir(
func.func @main(%arg0: tensor<1x2xf32>, %arg1: tensor<1x2xf32>) -> tensor<1x3xf32> {
%0 = stablehlo.constant dense<3.000000e+0> : tensor<2x3xf32>
%1 = "tf.XlaCallModule"(%arg0, %0) {Sout = [#tf_type.shape<1x3>], _entry_function = @composite_dot_general_fn_1, _original_entry_function = "composite_dot_general_fn_1", _stablehlo_module_attrs = {}, _tfl_quant_trait = "fully_quantizable", device = "", dim_args_spec = [], disabled_checks = [], has_token_input_output = false, module = "", platforms = [], version = 5 : i64} : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
%2 = stablehlo.constant() {value = dense<127> : tensor<2x3xi8>} : () -> tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>
%3 = stablehlo.uniform_quantize %arg1 : (tensor<1x2xf32>) -> tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>
%4 = call @quantized_dot_general_fn_2(%3, %2) {_quantization_method = "static_range_ptq { }"} : (tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
%5 = stablehlo.uniform_dequantize %4 : (tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>) -> tensor<1x3xf32>
%6 = stablehlo.add %1, %5 : tensor<1x3xf32>
return %6 : tensor<1x3xf32>
}
func.func private @composite_dot_general_fn_1(%arg0: tensor<1x2xf32>, %arg1: tensor<2x3xf32>) -> tensor<1x3xf32> {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
func.func private @quantized_dot_general_fn_2(%arg0: tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, %arg1: tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>> {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i32:f32:1, {6.000000e+0,7.000000e+0,8.000000e+0}>>
%1 = stablehlo.uniform_quantize %0 : (tensor<1x3x!quant.uniform<i32:f32:1, {6.000000e+0,7.000000e+0,8.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
return %1 : tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kQuantizedDotGeneralAndNonQuantizedDotGeneral);
ASSERT_TRUE(module_op);
const QuantizationReport report(*module_op);
const QuantizationResults& results = report.GetQuantizationResults();
ASSERT_THAT(results.results(), SizeIs(2));
const QuantizationResult& quantized_result = results.results(0);
EXPECT_THAT(quantized_result.quantizable_unit().name(),
StrEq("composite_dot_general_fn_2"));
EXPECT_TRUE(quantized_result.method().has_static_range_ptq());
const QuantizationResult& non_quantized_result = results.results(1);
EXPECT_THAT(non_quantized_result.quantizable_unit().name(),
StrEq("composite_dot_general_fn_1"));
EXPECT_TRUE(non_quantized_result.method().has_no_quantization());
}
TEST_F(QuantizationReportTest, ToString) {
QuantizationResult result{};
QuantizableUnit& quantizable_unit = *result.mutable_quantizable_unit();
quantizable_unit.set_name("quantized_my_function");
Method& method = *result.mutable_method();
method.mutable_no_quantization();
QuantizationReport report{};
report.AddQuantizationResult(std::move(result));
std::string result_str{};
TextFormat::PrintToString(report.GetQuantizationResults(), &result_str);
EXPECT_THAT(report.ToString(), HasSubstr("Quantization Report"));
EXPECT_THAT(report.ToString(), HasSubstr(result_str));
EXPECT_THAT(report.ToString(), HasSubstr("Quantization Report End"));
}
TEST_F(QuantizationReportTest, Save) {
constexpr absl::string_view kQuantizedDotGeneral = R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%0 = stablehlo.constant() {value = dense<127> : tensor<2x3xi8>} : () -> tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>
%1 = stablehlo.uniform_quantize %arg0 : (tensor<1x2xf32>) -> tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>
%2 = call @quantized_dot_general_fn(%1, %0) {_quantization_method = "static_range_ptq { }"} : (tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
%3 = stablehlo.uniform_dequantize %2 : (tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>) -> tensor<1x3xf32>
return %3 : tensor<1x3xf32>
}
func.func private @quantized_dot_general_fn(%arg0: tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, %arg1: tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>> {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i32:f32:1, {6.000000e+0,7.000000e+0,8.000000e+0}>>
%1 = stablehlo.uniform_quantize %0 : (tensor<1x3x!quant.uniform<i32:f32:1, {6.000000e+0,7.000000e+0,8.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
return %1 : tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kQuantizedDotGeneral);
ASSERT_TRUE(module_op);
const QuantizationReport report(*module_op);
const std::string dst_file_path =
absl::StrCat(TempDir(), "/quantization_report.txtpb");
const absl::Status save_status = report.Save(dst_file_path);
ASSERT_THAT(save_status, IsOk());
const absl::StatusOr<std::string> file_data = ReadFileToString(dst_file_path);
ASSERT_THAT(file_data, IsOk());
QuantizationResults results{};
ASSERT_TRUE(TextFormat::ParseFromString(*file_data, &results));
ASSERT_THAT(results.results(), SizeIs(1));
EXPECT_THAT(results.results(0).quantizable_unit().name(),
StrEq("composite_dot_general_fn"));
EXPECT_TRUE(results.results(0).method().has_static_range_ptq());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/report.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/report_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
52112969-3335-4fe7-9ed1-cd823a39f211 | cpp | google/tensorstore | oauth2_auth_provider | tensorstore/internal/oauth2/oauth2_auth_provider.cc | tensorstore/internal/oauth2/oauth2_auth_provider_test.cc | #include "tensorstore/internal/oauth2/oauth2_auth_provider.h"
#include <functional>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include "absl/strings/cord.h"
#include "absl/time/time.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/oauth2/auth_provider.h"
#include "tensorstore/internal/oauth2/bearer_token.h"
#include "tensorstore/internal/oauth2/oauth_utils.h"
#include "tensorstore/internal/oauth2/refreshable_auth_provider.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_oauth2 {
namespace {
using ::tensorstore::Result;
using ::tensorstore::internal_http::HttpRequestBuilder;
using ::tensorstore::internal_http::HttpResponse;
std::string MakePayload(const internal_oauth2::RefreshToken& creds) {
auto client_id = internal::PercentEncodeUriComponent(creds.client_id);
auto client_secret = internal::PercentEncodeUriComponent(creds.client_secret);
auto refresh_token = internal::PercentEncodeUriComponent(creds.refresh_token);
return tensorstore::StrCat(
"grant_type=refresh_token", "&client_id=", client_id,
"&client_secret=", client_secret, "&refresh_token=", refresh_token);
}
}
OAuth2AuthProvider::OAuth2AuthProvider(
const RefreshToken& creds, std::string uri,
std::shared_ptr<internal_http::HttpTransport> transport,
std::function<absl::Time()> clock)
: RefreshableAuthProvider(std::move(clock)),
refresh_payload_(MakePayload(creds)),
uri_(std::move(uri)),
transport_(std::move(transport)) {}
Result<HttpResponse> OAuth2AuthProvider::IssueRequest(std::string_view method,
std::string_view uri,
absl::Cord payload) {
return transport_
->IssueRequest(
HttpRequestBuilder(method, std::string{uri}).BuildRequest(),
internal_http::IssueRequestOptions(std::move(payload)))
.result();
}
Result<BearerTokenWithExpiration> OAuth2AuthProvider::Refresh() {
const auto now = GetCurrentTime();
TENSORSTORE_ASSIGN_OR_RETURN(
auto response, IssueRequest("POST", uri_, absl::Cord(refresh_payload_)));
TENSORSTORE_RETURN_IF_ERROR(HttpResponseCodeToStatus(response));
TENSORSTORE_ASSIGN_OR_RETURN(auto result, internal_oauth2::ParseOAuthResponse(
response.payload.Flatten()));
return BearerTokenWithExpiration{std::move(result.access_token),
now + absl::Seconds(result.expires_in)};
}
}
} | #include "tensorstore/internal/oauth2/oauth2_auth_provider.h"
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/time/clock.h"
#include "tensorstore/internal/http/curl_transport.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::Result;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_oauth2::OAuth2AuthProvider;
const char kServiceAccountInfo[] = R"({
"token_type" : "123",
"access_token": "abc",
"expires_in": 456
})";
constexpr char kOAuthV3Url[] = "https:
class TestAuthProvider : public OAuth2AuthProvider {
public:
TestAuthProvider(const RefreshToken& creds)
: OAuth2AuthProvider(creds, kOAuthV3Url, nullptr,
[this] { return this->time; }),
time(absl::Now()),
idx(0) {}
virtual Result<HttpResponse> IssueRequest(std::string_view method,
std::string_view uri,
absl::Cord body) {
request.push_back(std::make_pair(std::string(uri), std::string(body)));
if (responses.count(idx) != 0) {
return responses[idx++];
}
return HttpResponse{};
}
absl::Time time;
int idx;
absl::flat_hash_map<int, HttpResponse> responses;
std::vector<std::pair<std::string, std::string>> request;
};
TEST(OAuth2AuthProviderTest, InitialState) {
TestAuthProvider auth({"a", "b", "c"});
EXPECT_FALSE(auth.IsValid());
EXPECT_TRUE(auth.IsExpired());
}
TEST(OAuth2AuthProviderTest, NoResponse) {
TestAuthProvider auth({"a", "b", "c"});
auto result = auth.GetToken();
EXPECT_FALSE(result.ok()) << result.status();
ASSERT_EQ(1, auth.request.size());
EXPECT_EQ("https:
auth.request[0].first);
EXPECT_EQ(
"grant_type=refresh_token&client_id=a&client_secret=b&refresh_token=c",
auth.request[0].second);
}
TEST(OAuth2AuthProviderTest, Status200) {
TestAuthProvider auth({"a", "b", "c"});
auth.responses = {
{0,
{200,
absl::Cord(kServiceAccountInfo),
{}}},
{1,
{200,
absl::Cord(kServiceAccountInfo),
{}}},
};
{
auto result = auth.GetToken();
EXPECT_EQ(1, auth.idx);
EXPECT_TRUE(result.ok()) << result.status();
ASSERT_EQ(1, auth.request.size());
EXPECT_EQ("https:
auth.request[0].first);
EXPECT_EQ(
"grant_type=refresh_token&client_id=a&client_secret=b&refresh_token=c",
auth.request[0].second);
EXPECT_EQ(auth.time + absl::Seconds(456), result->expiration);
EXPECT_EQ("abc", result->token);
}
EXPECT_FALSE(auth.IsExpired());
EXPECT_TRUE(auth.IsValid());
auth.time += absl::Seconds(600);
{
auto result = auth.GetToken();
EXPECT_EQ(2, auth.idx);
EXPECT_TRUE(result.ok()) << result.status();
ASSERT_EQ(2, auth.request.size());
EXPECT_EQ("https:
auth.request[1].first);
EXPECT_EQ(
"grant_type=refresh_token&client_id=a&client_secret=b&refresh_token=c",
auth.request[1].second);
EXPECT_EQ(auth.time + absl::Seconds(456), result->expiration);
EXPECT_EQ("abc", result->token);
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/oauth2/oauth2_auth_provider.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/oauth2/oauth2_auth_provider_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
6fadef9d-22c1-478a-82f1-11ef5eabfa35 | cpp | tensorflow/tensorflow | symbolic_shapes | tensorflow/core/grappler/utils/symbolic_shapes.cc | tensorflow/core/grappler/utils/symbolic_shapes_test.cc | #include "tensorflow/core/grappler/utils/symbolic_shapes.h"
#include <unordered_map>
#include "tensorflow/core/util/bcast.h"
namespace tensorflow {
namespace grappler {
namespace {
BCast::Vec ShapeDims(const TensorShapeProto& shape) {
BCast::Vec dims;
dims.reserve(shape.dim_size());
for (int i = 0; i < shape.dim_size(); ++i)
dims.push_back(shape.dim(i).size());
return dims;
}
}
bool IsKnown(const TensorShapeProto::Dim& dim) { return dim.size() >= 0; }
bool IsKnownSymbolically(const TensorShapeProto::Dim& dim) {
return dim.size() <= -2;
}
bool IsUnknown(const TensorShapeProto::Dim& dim) { return dim.size() == -1; }
bool ShapeIsSymbolicallyDefined(const TensorShapeProto& shape) {
return !shape.unknown_rank() &&
std::all_of(
shape.dim().begin(), shape.dim().end(),
[](const TensorShapeProto::Dim& dim) { return !IsUnknown(dim); });
}
bool ShapeIsSymbolicallyDefined(const OpInfo::TensorProperties& properties) {
return ShapeIsSymbolicallyDefined(properties.shape());
}
int Rank(const TensorShapeProto& shape) {
if (shape.unknown_rank()) {
return -1;
}
return shape.dim_size();
}
int64_t NumCoefficients(const TensorShapeProto& shape) {
if (shape.unknown_rank()) {
return -1;
}
int64_t num_coefficients = 1;
for (const auto& dim : shape.dim()) {
if (dim.size() < 0) {
return -1;
}
num_coefficients *= dim.size();
}
return num_coefficients;
}
bool ShapesSymbolicallyEqual(const TensorShapeProto& left,
const TensorShapeProto& right) {
if (left.unknown_rank() || right.unknown_rank() ||
left.dim_size() != right.dim_size()) {
return false;
}
for (int i = 0; i < left.dim_size(); ++i) {
const auto& ldim = left.dim(i);
const auto& rdim = right.dim(i);
if (IsUnknown(ldim) || IsUnknown(rdim) || ldim.size() != rdim.size()) {
return false;
}
}
return true;
}
bool ShapesSymbolicallyEqual(const OpInfo::TensorProperties& left,
const OpInfo::TensorProperties& right) {
return ShapesSymbolicallyEqual(left.shape(), right.shape());
}
bool ShapesBroadcastable(const TensorShapeProto& left,
const TensorShapeProto& right) {
if (!ShapeIsSymbolicallyDefined(left) || !ShapeIsSymbolicallyDefined(right)) {
return false;
}
BCast bcast(ShapeDims(left), ShapeDims(right),
false);
return bcast.IsValid();
}
bool ShapesBroadcastable(const OpInfo::TensorProperties& left,
const OpInfo::TensorProperties& right) {
return ShapesBroadcastable(left.shape(), right.shape());
}
bool ShapeAfterBroadcast(const TensorShapeProto& left,
const TensorShapeProto& right,
TensorShapeProto* output_shape) {
if (!ShapeIsSymbolicallyDefined(left) || !ShapeIsSymbolicallyDefined(right)) {
return false;
}
BCast bcast(ShapeDims(left), ShapeDims(right),
false);
if (!bcast.IsValid()) {
return false;
}
output_shape->set_unknown_rank(false);
output_shape->clear_dim();
for (const auto& dim : bcast.output_shape()) {
output_shape->add_dim()->set_size(dim);
}
return true;
}
bool CompareSymbolicallyShapedTensorSizes(const TensorShapeProto& left,
const TensorShapeProto& right) {
if (left.unknown_rank() || right.unknown_rank()) {
return false;
}
int64_t left_defined_size = 1;
int64_t right_defined_size = 1;
std::unordered_map<int64_t, int64_t> left_unknown_dims;
std::unordered_map<int64_t, int64_t> right_unknown_dims;
int64_t unknown_dim_id = 1;
auto process_dimensions =
[&unknown_dim_id](const TensorShapeProto& shape, int64* defined_size,
std::unordered_map<int64, int64>* unknown_dims) {
for (int i = 0; i < shape.dim_size(); ++i) {
const auto& dim = shape.dim(i);
int64_t dim_size = dim.size();
if (dim_size > 0) {
*defined_size *= dim_size;
} else if (IsUnknown(dim)) {
++(*unknown_dims)[unknown_dim_id++];
} else if (IsKnownSymbolically(dim)) {
++(*unknown_dims)[dim_size];
}
}
};
process_dimensions(left, &left_defined_size, &left_unknown_dims);
process_dimensions(right, &right_defined_size, &right_unknown_dims);
std::set<int64_t> unknown_dims;
for (const auto& el : left_unknown_dims) unknown_dims.insert(el.first);
for (const auto& el : right_unknown_dims) unknown_dims.insert(el.first);
for (int64_t unknown_dim : unknown_dims) {
int64_t co_occurrence = std::min(left_unknown_dims[unknown_dim],
right_unknown_dims[unknown_dim]);
left_unknown_dims[unknown_dim] -= co_occurrence;
right_unknown_dims[unknown_dim] -= co_occurrence;
}
int64_t left_unbalanced_unknown_dims = 0;
int64_t right_unbalanced_unknown_dims = 0;
for (const auto& el : left_unknown_dims)
left_unbalanced_unknown_dims += el.second;
for (const auto& el : right_unknown_dims)
right_unbalanced_unknown_dims += el.second;
if (left_unbalanced_unknown_dims == 0 && right_unbalanced_unknown_dims == 0) {
return left_defined_size < right_defined_size;
}
if (left_defined_size <= right_defined_size &&
left_unbalanced_unknown_dims == 0 && right_unbalanced_unknown_dims > 0) {
return true;
}
return false;
}
bool CompareSymbolicallyShapedTensorSizes(
const OpInfo::TensorProperties& left,
const OpInfo::TensorProperties& right) {
return CompareSymbolicallyShapedTensorSizes(left.shape(), right.shape());
}
int64_t ComputeSizeRatio(const TensorShapeProto& numerator,
const TensorShapeProto& denominator) {
if (numerator.unknown_rank() || denominator.unknown_rank()) {
return -1;
}
std::multiset<int> symbolic_dims;
int64_t num = 1;
for (const auto& dim : numerator.dim()) {
if (dim.size() == -1) {
return -1;
} else if (dim.size() < -1) {
symbolic_dims.insert(dim.size());
} else {
num *= dim.size();
}
}
int64_t denom = 1;
for (const auto& dim : denominator.dim()) {
if (dim.size() == -1) {
return -1;
} else if (dim.size() < -1) {
auto it = symbolic_dims.find(dim.size());
if (it == symbolic_dims.end()) {
return -1;
}
symbolic_dims.erase(it);
} else {
denom *= dim.size();
}
}
if (denom == 0) {
return -1;
}
if (!symbolic_dims.empty()) {
return -1;
}
return num / denom;
}
}
} | #include "tensorflow/core/grappler/utils/symbolic_shapes.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class SymbolicShapesTest : public ::testing::Test {
protected:
TensorShapeProto MakeUnknown() {
TensorShapeProto shape;
shape.set_unknown_rank(true);
return shape;
}
TensorShapeProto MakeShape(std::vector<int> dims) {
TensorShapeProto shape;
for (int dim_size : dims) {
TensorShapeProto::Dim dim;
dim.set_size(dim_size);
*shape.add_dim() = dim;
}
return shape;
}
};
bool operator<(const TensorShapeProto& lhs, const TensorShapeProto& rhs) {
return CompareSymbolicallyShapedTensorSizes(lhs, rhs);
}
TEST_F(SymbolicShapesTest, ShapeIsSymbolicallyDefined) {
EXPECT_FALSE(ShapeIsSymbolicallyDefined(MakeUnknown()));
EXPECT_FALSE(ShapeIsSymbolicallyDefined(MakeShape({-1, 2})));
EXPECT_TRUE(ShapeIsSymbolicallyDefined(MakeShape({1, 2})));
EXPECT_TRUE(ShapeIsSymbolicallyDefined(MakeShape({-2, 2})));
}
TEST_F(SymbolicShapesTest, ShapesSymbolicallyEqual) {
EXPECT_FALSE(ShapesSymbolicallyEqual(MakeUnknown(), MakeUnknown()));
EXPECT_FALSE(ShapesSymbolicallyEqual(MakeShape({-1, 2}), MakeShape({-1, 2})));
EXPECT_FALSE(ShapesSymbolicallyEqual(MakeShape({-2, 2}), MakeShape({-3, 2})));
EXPECT_TRUE(ShapesSymbolicallyEqual(MakeShape({1, 2}), MakeShape({1, 2})));
EXPECT_TRUE(ShapesSymbolicallyEqual(MakeShape({-2, 2}), MakeShape({-2, 2})));
}
TEST_F(SymbolicShapesTest, ShapesBroadcastable) {
EXPECT_FALSE(ShapesBroadcastable(MakeUnknown(), MakeUnknown()));
EXPECT_FALSE(ShapesBroadcastable(MakeShape({-2}), MakeShape({1, -3})));
EXPECT_FALSE(ShapesBroadcastable(MakeShape({-1, 2}), MakeShape({-1, 2})));
EXPECT_FALSE(ShapesBroadcastable(MakeShape({-2, 2}), MakeShape({-3, 2})));
EXPECT_FALSE(ShapesBroadcastable(MakeShape({-2, 4}), MakeShape({-2, 8})));
EXPECT_TRUE(ShapesBroadcastable(MakeShape({1, 2}), MakeShape({1, 2})));
EXPECT_TRUE(ShapesBroadcastable(MakeShape({-2, 2}), MakeShape({-2, 2})));
EXPECT_TRUE(ShapesBroadcastable(MakeShape({-2, 32}), MakeShape({-2, 1})));
EXPECT_TRUE(ShapesBroadcastable(MakeShape({-2, 1}), MakeShape({1, -2})));
EXPECT_TRUE(ShapesBroadcastable(MakeShape({-2, 1}), MakeShape({1, -3})));
EXPECT_TRUE(ShapesBroadcastable(MakeShape({-3}), MakeShape({-2, -3})));
TensorShapeProto output_shape;
EXPECT_TRUE(
ShapeAfterBroadcast(MakeShape({1, 2}), MakeShape({1, 2}), &output_shape));
EXPECT_TRUE(ShapesSymbolicallyEqual(MakeShape({1, 2}), output_shape));
EXPECT_TRUE(ShapeAfterBroadcast(MakeShape({-2, 2}), MakeShape({-2, 2}),
&output_shape));
EXPECT_TRUE(ShapesSymbolicallyEqual(MakeShape({-2, 2}), output_shape));
EXPECT_TRUE(ShapeAfterBroadcast(MakeShape({-2, 32}), MakeShape({-2, 1}),
&output_shape));
EXPECT_TRUE(ShapesSymbolicallyEqual(MakeShape({-2, 32}), output_shape));
EXPECT_TRUE(ShapeAfterBroadcast(MakeShape({-2, 1}), MakeShape({1, -2}),
&output_shape));
EXPECT_TRUE(ShapesSymbolicallyEqual(MakeShape({-2, -2}), output_shape));
EXPECT_TRUE(ShapeAfterBroadcast(MakeShape({-2, 1}), MakeShape({1, -3}),
&output_shape));
EXPECT_TRUE(ShapesSymbolicallyEqual(MakeShape({-2, -3}), output_shape));
EXPECT_TRUE(
ShapeAfterBroadcast(MakeShape({-3}), MakeShape({-2, -3}), &output_shape));
EXPECT_TRUE(ShapesSymbolicallyEqual(MakeShape({-2, -3}), output_shape));
}
TEST_F(SymbolicShapesTest, CompareSymbolicallyShapedTensorSizes) {
EXPECT_TRUE(MakeShape({1, 1, 32}) < MakeShape({32, 32}));
EXPECT_TRUE(MakeShape({1, 32, 32}) < MakeShape({2048}));
EXPECT_TRUE(MakeShape({1, -2, 32}) < MakeShape({-2, 32, 32}));
EXPECT_TRUE(MakeShape({1, 32, 32}) < MakeShape({-2, 32, 32}));
EXPECT_TRUE(MakeShape({1, 32, 32}) < MakeShape({-1, 32, 32}));
EXPECT_TRUE(MakeShape({1, -2, 32}) < MakeShape({-2, -2, 32}));
EXPECT_FALSE(MakeShape({1, -2, 32}) < MakeShape({-3, 32, 32}));
EXPECT_FALSE(MakeShape({1, -1, 32}) < MakeShape({1, -1, 32}));
EXPECT_FALSE(MakeShape({1, -1, 32}) < MakeShape({-1, -1, 32}));
EXPECT_FALSE(MakeShape({-1, -1, 32}) < MakeShape({1, -1, 32}));
}
TEST_F(SymbolicShapesTest, RankAndNumCoeff) {
EXPECT_EQ(2, Rank(MakeShape({32, 32})));
EXPECT_EQ(32 * 32, NumCoefficients(MakeShape({32, 32})));
EXPECT_EQ(2, Rank(MakeShape({-2, 32})));
EXPECT_EQ(-1, NumCoefficients(MakeShape({-2, 32})));
TensorShapeProto shape;
shape.set_unknown_rank(true);
EXPECT_EQ(-1, Rank(shape));
EXPECT_EQ(-1, NumCoefficients(shape));
}
TEST_F(SymbolicShapesTest, SizeRatio) {
EXPECT_EQ(16, ComputeSizeRatio(MakeShape({32, 32}), MakeShape({32, 2})));
EXPECT_EQ(16, ComputeSizeRatio(MakeShape({-2, 32}), MakeShape({-2, 2})));
EXPECT_EQ(16,
ComputeSizeRatio(MakeShape({-2, -2, 32}), MakeShape({-2, 2, -2})));
EXPECT_EQ(-1,
ComputeSizeRatio(MakeShape({-2, -2, 32}), MakeShape({-2, 2, 2})));
EXPECT_EQ(-1,
ComputeSizeRatio(MakeShape({-2, 2, 32}), MakeShape({-2, 2, -2})));
EXPECT_EQ(-1, ComputeSizeRatio(MakeShape({-2, -2}), MakeShape({-2, 2})));
EXPECT_EQ(-1, ComputeSizeRatio(MakeShape({-2, 32}), MakeShape({-2, -2})));
EXPECT_EQ(1, ComputeSizeRatio(MakeShape({-2, -3}), MakeShape({-3, -2})));
EXPECT_EQ(-1, ComputeSizeRatio(MakeShape({-1, 32}), MakeShape({-2, 2})));
EXPECT_EQ(-1, ComputeSizeRatio(MakeShape({-1, 32}), MakeShape({-2, 0})));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/symbolic_shapes.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/symbolic_shapes_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e715a858-471c-4f6e-a0ed-8541b362a1b1 | cpp | tensorflow/tensorflow | transform_utils | tensorflow/tools/graph_transforms/transform_utils.cc | tensorflow/tools/graph_transforms/transform_utils_test.cc | #include "tensorflow/tools/graph_transforms/transform_utils.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/lib/strings/str_util.h"
namespace tensorflow {
namespace graph_transforms {
namespace {
inline bool IsMerge(const NodeDef& node_def) {
return node_def.op() == "Merge" || node_def.op() == "RefMerge" ||
node_def.op() == "_XlaMerge";
}
void RecordMatchedNodes(const NodeMatch& match,
std::set<string>* matched_nodes) {
matched_nodes->insert(match.node.name());
for (const NodeMatch& input_match : match.inputs) {
RecordMatchedNodes(input_match, matched_nodes);
}
}
inline uint64 Hash64String(const string& input) {
return Hash64(input.data(), input.size());
}
}
void MatchedNodesAsArray(const NodeMatch& match, std::vector<NodeDef>* result) {
std::set<string> found_nodes;
std::vector<NodeMatch> current_matches = {match};
while (!current_matches.empty()) {
std::vector<NodeMatch> next_matches;
for (const NodeMatch& current_match : current_matches) {
if (found_nodes.count(current_match.node.name())) {
continue;
}
found_nodes.insert(current_match.node.name());
result->push_back(current_match.node);
for (const NodeMatch& input_match : current_match.inputs) {
next_matches.push_back(input_match);
}
}
current_matches = next_matches;
}
}
void MapNamesToNodes(const GraphDef& graph_def,
std::map<string, const NodeDef*>* result) {
for (const NodeDef& node : graph_def.node()) {
(*result)[node.name()] = &node;
}
}
void MapNodesToOutputs(const GraphDef& graph_def,
std::map<string, std::vector<const NodeDef*>>* result) {
std::map<string, const NodeDef*> node_map;
MapNamesToNodes(graph_def, &node_map);
for (const NodeDef& node : graph_def.node()) {
for (const string& input : node.input()) {
string input_node_name = NodeNameFromInput(input);
(*result)[input_node_name].push_back(&node);
}
}
}
void NodeNamePartsFromInput(const string& input_name, string* prefix,
string* node_name, string* suffix) {
std::vector<string> input_parts = str_util::Split(input_name, ':');
if (input_parts.size() < 2) {
*suffix = "";
} else {
*suffix = ":" + input_parts[1];
}
StringPiece node_name_piece(input_parts[0]);
if (absl::ConsumePrefix(&node_name_piece, "^")) {
*prefix = "^";
} else {
*prefix = "";
}
*node_name = string(node_name_piece);
}
string NodeNameFromInput(const string& input_name) {
string prefix;
string node_name;
string suffix;
NodeNamePartsFromInput(input_name, &prefix, &node_name, &suffix);
return node_name;
}
string CanonicalInputName(const string& input_name) {
string prefix;
string node_name;
string suffix;
NodeNamePartsFromInput(input_name, &prefix, &node_name, &suffix);
if (suffix.empty()) {
suffix = ":0";
}
return prefix + node_name + suffix;
}
uint64 HashNodeDef(const NodeDef& node) {
uint64 hash = Hash64String(node.op());
hash = Hash64Combine(hash, Hash64String(node.name()));
for (const string& input : node.input()) {
hash = Hash64Combine(hash, Hash64String(CanonicalInputName(input)));
}
hash = Hash64Combine(hash, Hash64String(node.device()));
std::vector<string> attr_names;
attr_names.reserve(node.attr().size());
for (const auto& attr : node.attr()) {
attr_names.push_back(attr.first);
}
std::sort(attr_names.begin(), attr_names.end());
string attr_serialized;
for (const string& attr_name : attr_names) {
auto attr = node.attr().at(attr_name);
attr.SerializeToString(&attr_serialized);
hash = Hash64Combine(hash, Hash64String(attr_serialized));
}
return hash;
}
void AddNodeInput(const string& input_name, NodeDef* node) {
*(node->mutable_input()->Add()) = input_name;
}
void CopyNodeAttr(const NodeDef& source, const string& source_key,
const string& dest_key, NodeDef* dest) {
CHECK_NE(0, source.attr().count(source_key))
<< "No key '" << source_key << "' found in " << source.DebugString();
(*(dest->mutable_attr()))[dest_key] = source.attr().at(source_key);
}
Tensor GetNodeTensorAttr(const NodeDef& node, const string& key) {
TensorProto tensor_proto = node.attr().at(key).tensor();
Tensor tensor;
CHECK(tensor.FromProto(tensor_proto));
return tensor;
}
void FilterGraphDef(const GraphDef& input_graph_def,
std::function<bool(const NodeDef&)> selector,
GraphDef* output_graph_def) {
output_graph_def->mutable_node()->Clear();
for (const NodeDef& node : input_graph_def.node()) {
if (selector(node)) {
*output_graph_def->mutable_node()->Add() = node;
}
}
}
void RemoveAttributes(const GraphDef& input_graph_def,
const std::vector<string>& attributes,
GraphDef* output_graph_def) {
output_graph_def->mutable_node()->Clear();
for (const NodeDef& node : input_graph_def.node()) {
NodeDef* new_node = output_graph_def->mutable_node()->Add();
*new_node = node;
for (const string& attribute : attributes) {
new_node->mutable_attr()->erase(attribute);
}
}
}
Status SortByExecutionOrder(const GraphDef& input_graph_def,
GraphDef* output_graph_def) {
const int num_nodes = input_graph_def.node_size();
std::vector<int> ready;
std::vector<int> pending_count;
pending_count.reserve(num_nodes);
std::vector<gtl::InlinedVector<int, 4>> outputs(num_nodes);
std::map<string, int> name_index;
for (int i = 0; i < input_graph_def.node_size(); ++i) {
const NodeDef& node(input_graph_def.node(i));
name_index[node.name()] = i;
}
for (int n = 0; n < num_nodes; ++n) {
const NodeDef& node_def(input_graph_def.node(n));
if (IsMerge(node_def)) {
int32_t num_control_edges = 0;
for (int i = 0; i < node_def.input_size(); ++i) {
if (absl::StartsWith(node_def.input(i), "^")) {
num_control_edges++;
}
}
pending_count.push_back(num_control_edges + 1);
} else {
pending_count.push_back(node_def.input_size());
}
if (node_def.input_size() == 0) {
ready.push_back(n);
continue;
}
for (int i = 0; i < node_def.input_size(); ++i) {
const string& input_name = node_def.input(i);
const string& input_node_name = NodeNameFromInput(input_name);
if (!name_index.count(input_node_name)) {
return errors::InvalidArgument("Node '", node_def.name(),
"': Unknown input node '",
node_def.input(i), "'");
}
outputs[name_index[input_node_name]].push_back(n);
}
}
int processed = 0;
output_graph_def->Clear();
while (!ready.empty()) {
int o = ready.back();
ready.pop_back();
++processed;
const NodeDef& node_def(input_graph_def.node(o));
*output_graph_def->mutable_node()->Add() = node_def;
for (size_t i = 0; i < outputs[o].size(); ++i) {
const int output = outputs[o][i];
pending_count[output]--;
if (pending_count[output] == 0) {
ready.push_back(output);
}
}
}
if (processed < num_nodes) {
LOG(WARNING) << "IN " << __func__ << (num_nodes - processed)
<< " NODES IN A CYCLE";
for (int64_t i = 0; i < num_nodes; i++) {
if (pending_count[i] != 0) {
LOG(WARNING) << "PENDING: " << SummarizeNodeDef(input_graph_def.node(i))
<< "WITH PENDING COUNT = " << pending_count[i];
}
}
return errors::InvalidArgument(num_nodes - processed, " nodes in a cycle");
}
return OkStatus();
}
string OpTypePattern::DebugString() const {
string result = "{" + op + ", {";
for (const OpTypePattern& input : inputs) {
result += input.DebugString() + ",";
}
result += "}}";
return result;
}
string NodeMatch::DebugString() const {
string result = "{";
result += node.DebugString();
result += ", {";
for (const NodeMatch& input : inputs) {
result += input.DebugString() + ",";
}
result += "}}";
return result;
}
GraphMatcher::GraphMatcher(const GraphDef& graph_def) {
SortByExecutionOrder(graph_def, &graph_def_).IgnoreError();
MapNamesToNodes(graph_def_, &node_map_);
}
Status GraphMatcher::GetOpTypeMatches(const OpTypePattern& pattern,
std::vector<NodeMatch>* matches) {
std::set<string> matched_nodes;
for (const NodeDef& node : graph_def_.node()) {
if (matched_nodes.count(node.name())) {
continue;
}
NodeMatch match;
if (DoesOpTypeMatch(node, pattern, matched_nodes, &match)) {
RecordMatchedNodes(match, &matched_nodes);
matches->push_back(match);
}
}
return OkStatus();
}
bool GraphMatcher::DoesOpTypeMatch(
const NodeDef& node, const OpTypePattern& pattern,
const std::set<string>& previously_matched_nodes, NodeMatch* match) {
VLOG(1) << "Looking at node " << node.DebugString();
VLOG(1) << "pattern=" << pattern.DebugString();
VLOG(1) << "match=" << match->DebugString();
if (previously_matched_nodes.count(node.name())) {
VLOG(1) << "node " << node.name() << " has been previously matched";
return false;
}
bool pattern_matched = false;
if (pattern.op == "*") {
pattern_matched = true;
} else {
std::vector<string> pattern_ops = str_util::Split(pattern.op, '|');
for (const string& pattern_op : pattern_ops) {
if (node.op() == pattern_op) {
pattern_matched = true;
}
}
}
if (!pattern_matched) {
VLOG(1) << "node.op() != pattern.op()";
return false;
}
match->node = node;
std::vector<string> non_control_inputs;
for (const string& input : node.input()) {
if (!input.empty() && (input[0] != '^')) {
non_control_inputs.push_back(input);
}
}
if (pattern.inputs.empty()) {
return true;
}
if (non_control_inputs.size() != pattern.inputs.size()) {
VLOG(1) << "non_control_inputs.size() != pattern.inputs.size()";
return false;
}
for (int i = 0; i < pattern.inputs.size(); ++i) {
const string& input_node_name = NodeNameFromInput(non_control_inputs[i]);
const NodeDef& input_node = *(node_map_[input_node_name]);
const OpTypePattern& input_pattern = pattern.inputs[i];
match->inputs.push_back(NodeMatch());
NodeMatch* input_match = &(match->inputs.back());
if (!DoesOpTypeMatch(input_node, input_pattern, previously_matched_nodes,
input_match)) {
return false;
}
}
return true;
}
Status ReplaceMatchingOpTypes(
const GraphDef& input_graph_def, const OpTypePattern& pattern,
const std::function<Status(const NodeMatch&, const std::set<string>&,
const std::set<string>&, std::vector<NodeDef>*)>&
node_generator,
const ReplaceMatchingOpTypesOptions& options, GraphDef* output_graph_def) {
GraphMatcher matcher(input_graph_def);
std::vector<NodeMatch> matches;
TF_RETURN_IF_ERROR(matcher.GetOpTypeMatches(pattern, &matches));
std::set<string> matched_nodes;
std::map<string, const NodeMatch*> matches_by_head_name;
for (const NodeMatch& match : matches) {
matches_by_head_name[match.node.name()] = &match;
RecordMatchedNodes(match, &matched_nodes);
}
std::map<string, std::vector<const NodeDef*>> outputs_map;
MapNodesToOutputs(input_graph_def, &outputs_map);
output_graph_def->Clear();
for (const NodeDef& input_node : input_graph_def.node()) {
if (matches_by_head_name.count(input_node.name())) {
const NodeMatch* match = matches_by_head_name[input_node.name()];
std::vector<NodeDef> matched_nodes_array;
MatchedNodesAsArray(*match, &matched_nodes_array);
std::set<string> matched_nodes_lookup;
for (const NodeDef& matched_node : matched_nodes_array) {
matched_nodes_lookup.insert(matched_node.name());
}
std::set<string> input_nodes;
std::set<string> output_nodes;
for (const NodeDef& matched_node : matched_nodes_array) {
for (const string& input_name : matched_node.input()) {
string input_node_name = NodeNameFromInput(input_name);
if (!matched_nodes_lookup.count(input_node_name)) {
input_nodes.insert(matched_node.name());
}
}
if (outputs_map.count(matched_node.name())) {
for (const NodeDef* dependent_node :
outputs_map[matched_node.name()]) {
if (!matched_nodes_lookup.count(dependent_node->name())) {
output_nodes.insert(matched_node.name());
}
}
}
}
std::vector<NodeDef> new_nodes;
TF_RETURN_IF_ERROR(
node_generator(*match, input_nodes, output_nodes, &new_nodes));
std::set<string> new_node_names;
for (const NodeDef& new_node : new_nodes) {
new_node_names.insert(new_node.name());
}
bool abort_replacement = false;
if (!options.allow_inconsistencies) {
for (const string& expected_output : output_nodes) {
if (!new_node_names.count(expected_output)) {
LOG(WARNING) << "Expected " << expected_output
<< " to be preserved.";
abort_replacement = true;
}
}
}
if (abort_replacement) {
LOG(WARNING) << "Generator function didn't preserve needed nodes, "
<< "copying old replacements back in instead.";
std::vector<NodeDef> old_nodes;
MatchedNodesAsArray(*match, &old_nodes);
for (const NodeDef& old_node : old_nodes) {
NodeDef* added_node = output_graph_def->mutable_node()->Add();
*added_node = old_node;
}
} else {
for (const NodeDef& new_node : new_nodes) {
NodeDef* added_node = output_graph_def->mutable_node()->Add();
*added_node = new_node;
}
}
} else if (!matched_nodes.count(input_node.name())) {
NodeDef* added_node = output_graph_def->mutable_node()->Add();
*added_node = input_node;
} else {
}
}
return OkStatus();
}
Status RenameNodeInputs(const GraphDef& input_graph_def,
const std::map<string, string>& inputs_to_rename,
const std::unordered_set<string>& nodes_to_ignore,
GraphDef* output_graph_def) {
std::map<string, std::vector<std::pair<string, string>>>
canonical_inputs_to_rename;
for (const auto& input_to_rename : inputs_to_rename) {
canonical_inputs_to_rename[NodeNameFromInput(input_to_rename.first)]
.push_back({input_to_rename.first, input_to_rename.second});
}
output_graph_def->Clear();
for (const NodeDef& node : input_graph_def.node()) {
NodeDef* new_node = output_graph_def->mutable_node()->Add();
*new_node = node;
new_node->mutable_input()->Clear();
for (const string& input_name : node.input()) {
std::set<string> already_visited;
string new_input_name = input_name;
while (
canonical_inputs_to_rename.count(NodeNameFromInput(new_input_name))) {
string input_node_name = NodeNameFromInput(new_input_name);
if (already_visited.count(input_node_name)) {
return errors::InvalidArgument(
"RenameNodeInputs argument contains a cycle for ",
input_node_name);
}
already_visited.insert(input_node_name);
if (nodes_to_ignore.count(node.name())) {
break;
}
bool any_match_found = false;
for (const std::pair<string, string>& input_to_rename :
canonical_inputs_to_rename.at(input_node_name)) {
const string& source_name = input_to_rename.first;
const string& dest_name = input_to_rename.second;
bool is_match;
string match_name;
if (str_util::EndsWith(source_name, ":*")) {
is_match = true;
string prefix;
string unused_node_name;
string suffix;
NodeNamePartsFromInput(new_input_name, &prefix, &unused_node_name,
&suffix);
match_name = prefix + dest_name + suffix;
} else {
is_match = (CanonicalInputName(source_name) ==
CanonicalInputName(new_input_name));
match_name = dest_name;
}
if (is_match) {
new_input_name = match_name;
any_match_found = true;
}
}
if (!any_match_found) {
break;
}
}
*(new_node->mutable_input()->Add()) = new_input_name;
}
}
return OkStatus();
}
void CopyOriginalMatch(const NodeMatch& match,
std::vector<NodeDef>* new_nodes) {
std::vector<NodeDef> old_nodes;
MatchedNodesAsArray(match, &old_nodes);
for (const NodeDef& old_node : old_nodes) {
new_nodes->push_back(old_node);
}
}
TransformRegistry* GetTransformRegistry() {
static TransformRegistry transform_registry;
return &transform_registry;
}
void FindInvalidInputs(const GraphDef& graph_def,
std::vector<std::pair<string, string>>* invalid_inputs) {
std::map<string, const NodeDef*> node_map;
MapNamesToNodes(graph_def, &node_map);
for (const NodeDef& node : graph_def.node()) {
for (const string& input : node.input()) {
string input_node = NodeNameFromInput(input);
if (!node_map.count(input_node)) {
invalid_inputs->push_back({node.name(), input_node});
}
}
}
}
Status IsGraphValid(const GraphDef& graph_def) {
std::vector<std::pair<string, string>> invalid_inputs;
FindInvalidInputs(graph_def, &invalid_inputs);
if (!invalid_inputs.empty()) {
std::map<string, const NodeDef*> node_map;
MapNamesToNodes(graph_def, &node_map);
for (const std::pair<string, string>& invalid_input : invalid_inputs) {
LOG(ERROR) << "Invalid input " << invalid_input.second << " for node "
<< invalid_input.first << " - "
<< node_map[invalid_input.first]->DebugString();
}
return errors::Internal(
"Invalid graph with inputs referring to nonexistent nodes");
}
return OkStatus();
}
Status GetInOutTypes(const NodeDef& node_def, DataTypeVector* inputs,
DataTypeVector* outputs) {
const OpDef* op_def;
TF_RETURN_IF_ERROR(OpRegistry::Global()->LookUpOpDef(node_def.op(), &op_def));
TF_RETURN_IF_ERROR(InOutTypesForNode(node_def, *op_def, inputs, outputs));
return OkStatus();
}
Status TensorShapeFromString(const string& shape_string, TensorShape* result) {
if (shape_string.empty()) {
return errors::InvalidArgument("Specified shape is empty.");
}
std::vector<string> dims_as_str = str_util::Split(shape_string, ",");
std::vector<int64_t> dims;
for (const string& dim : dims_as_str) {
int64_t tmp;
if (strings::safe_strto64(dim, &tmp)) {
dims.push_back(tmp);
} else {
return errors::InvalidArgument("Could parse as shape: '", shape_string,
"'");
}
}
*result = TensorShape(dims);
return OkStatus();
}
int TransformFuncContext::CountParameters(const string& name) const {
if (params.count(name)) {
return params.at(name).size();
} else {
return 0;
}
}
Status TransformFuncContext::GetOneStringParameter(const string& name,
const string& default_value,
string* result) const {
const int params_count = CountParameters(name);
if (params_count == 0) {
*result = default_value;
return OkStatus();
} else if (params_count == 1) {
*result = params.at(name).at(0);
return OkStatus();
} else {
return errors::InvalidArgument("Expected a single '", name,
"' parameter, but found ", params_count,
" occurrences");
}
}
Status TransformFuncContext::GetOneInt32Parameter(const string& name,
int32_t default_value,
int32* result) const {
const int params_count = CountParameters(name);
if (params_count == 0) {
*result = default_value;
return OkStatus();
}
string string_value;
TF_RETURN_IF_ERROR(GetOneStringParameter(name, "", &string_value));
if (!strings::safe_strto32(StringPiece(string_value), result)) {
return errors::InvalidArgument("Couldn't interpret the ", name,
" argument as a number:", string_value);
}
return OkStatus();
}
Status TransformFuncContext::GetOneInt64Parameter(const string& name,
int64_t default_value,
int64_t* result) const {
const int params_count = CountParameters(name);
if (params_count == 0) {
*result = default_value;
return OkStatus();
}
string string_value;
TF_RETURN_IF_ERROR(GetOneStringParameter(name, "", &string_value));
if (!strings::safe_strto64(StringPiece(string_value), result)) {
return errors::InvalidArgument("Couldn't interpret the ", name,
" argument as a number:", string_value);
}
return OkStatus();
}
Status TransformFuncContext::GetOneFloatParameter(const string& name,
float default_value,
float* result) const {
const int params_count = CountParameters(name);
if (params_count == 0) {
*result = default_value;
return OkStatus();
}
string string_value;
TF_RETURN_IF_ERROR(GetOneStringParameter(name, "", &string_value));
if (!strings::safe_strtof(string_value.c_str(), result)) {
return errors::InvalidArgument(
"Couldn't interpret the ", name,
" argument as a float number:", string_value);
}
return OkStatus();
}
Status TransformFuncContext::GetOneBoolParameter(const string& name,
bool default_value,
bool* result) const {
const int params_count = CountParameters(name);
if (params_count == 0) {
*result = default_value;
return OkStatus();
}
string string_value;
TF_RETURN_IF_ERROR(GetOneStringParameter(name, "", &string_value));
if (string_value == "true" || string_value == "1") {
*result = true;
} else if (string_value == "false" || string_value == "0") {
*result = false;
} else {
return errors::InvalidArgument("Couldn't interpret the ", name,
" argument as a boolean:", string_value,
" (expected true, false, 0 or 1)");
}
return OkStatus();
}
}
} | #include "tensorflow/tools/graph_transforms/transform_utils.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace graph_transforms {
class TransformUtilsTest : public ::testing::Test {
protected:
void TestMapNamesToNodes() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
const int width = 100;
Tensor a_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&a_data, 1.0f);
Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data));
Tensor b_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&b_data, 1.0f);
Output b_const = Const(root.WithOpName("b"), Input::Initializer(b_data));
Output add = Add(root.WithOpName("add"), a_const, b_const);
Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT);
Output mul = Mul(root.WithOpName("output"), add, placeholder);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
std::map<string, const NodeDef*> node_map;
MapNamesToNodes(graph_def, &node_map);
EXPECT_EQ(1, node_map.count("a"));
EXPECT_EQ(1, node_map.count("b"));
EXPECT_EQ(1, node_map.count("add"));
EXPECT_EQ(1, node_map.count("placeholder"));
EXPECT_EQ(1, node_map.count("output"));
EXPECT_EQ(0, node_map.count("no_such_node"));
}
void TestMapNodesToOutputs() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
const int width = 100;
Tensor a_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&a_data, 1.0f);
Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data));
Tensor b_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&b_data, 1.0f);
Output b_const = Const(root.WithOpName("b"), Input::Initializer(b_data));
Output add = Add(root.WithOpName("add"), a_const, b_const);
Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT);
Output mul = Mul(root.WithOpName("output"), add, placeholder);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
std::map<string, std::vector<const NodeDef*>> outputs_map;
MapNodesToOutputs(graph_def, &outputs_map);
EXPECT_EQ(1, outputs_map.count("a"));
EXPECT_EQ(1, outputs_map["a"].size());
EXPECT_EQ("add", outputs_map["a"][0]->name());
EXPECT_EQ(1, outputs_map.count("b"));
EXPECT_EQ(1, outputs_map["b"].size());
EXPECT_EQ("add", outputs_map["b"][0]->name());
EXPECT_EQ(1, outputs_map.count("add"));
EXPECT_EQ(1, outputs_map["add"].size());
EXPECT_EQ("output", outputs_map["add"][0]->name());
EXPECT_EQ(1, outputs_map.count("placeholder"));
EXPECT_EQ(1, outputs_map["placeholder"].size());
EXPECT_EQ("output", outputs_map["placeholder"][0]->name());
EXPECT_EQ(0, outputs_map.count("output"));
EXPECT_EQ(0, outputs_map.count("no_such_node"));
}
void TestNodeNamePartsFromInput() {
string prefix;
string node_name;
string suffix;
NodeNamePartsFromInput("some_node_name", &prefix, &node_name, &suffix);
EXPECT_EQ("", prefix);
EXPECT_EQ("some_node_name", node_name);
EXPECT_EQ("", suffix);
NodeNamePartsFromInput("some_node_name/with/slashes", &prefix, &node_name,
&suffix);
EXPECT_EQ("", prefix);
EXPECT_EQ("some_node_name/with/slashes", node_name);
EXPECT_EQ("", suffix);
NodeNamePartsFromInput("some_node_name:0", &prefix, &node_name, &suffix);
EXPECT_EQ("", prefix);
EXPECT_EQ("some_node_name", node_name);
EXPECT_EQ(":0", suffix);
NodeNamePartsFromInput("^some_node_name", &prefix, &node_name, &suffix);
EXPECT_EQ("^", prefix);
EXPECT_EQ("some_node_name", node_name);
EXPECT_EQ("", suffix);
NodeNamePartsFromInput("^some_node_name:99", &prefix, &node_name, &suffix);
EXPECT_EQ("^", prefix);
EXPECT_EQ("some_node_name", node_name);
EXPECT_EQ(":99", suffix);
}
void TestNodeNameFromInput() {
EXPECT_EQ("node_name", NodeNameFromInput("node_name"));
EXPECT_EQ("node_name", NodeNameFromInput("node_name:0"));
EXPECT_EQ("node_name", NodeNameFromInput("^node_name"));
EXPECT_EQ("node_name", NodeNameFromInput("^node_name:42"));
}
void TestCanonicalInputName() {
EXPECT_EQ("node_name:0", CanonicalInputName("node_name"));
EXPECT_EQ("node_name:0", CanonicalInputName("node_name:0"));
EXPECT_EQ("^node_name:0", CanonicalInputName("^node_name"));
EXPECT_EQ("^node_name:42", CanonicalInputName("^node_name:42"));
}
void TestAddNodeInput() {
NodeDef node;
AddNodeInput("foo", &node);
EXPECT_EQ("foo", node.input(0));
}
void TestCopyNodeAttr() {
NodeDef node;
auto mutable_attr = node.mutable_attr();
(*mutable_attr)["foo"].set_i(3);
NodeDef copied_node;
CopyNodeAttr(node, "foo", "bar", &copied_node);
EXPECT_EQ(3, copied_node.attr().at("bar").i());
}
void TestSetNodeAttr() {
NodeDef node;
int32_t value_i = 32;
SetNodeAttr("foo", value_i, &node);
EXPECT_EQ(32, node.attr().at("foo").i());
string value_s = "some_value";
SetNodeAttr("bar", value_s, &node);
EXPECT_EQ("some_value", node.attr().at("bar").s());
}
void TestSetNodeTensorAttr() {
NodeDef node;
SetNodeTensorAttr<int32>("foo", {3, 1}, {1, 2, 3}, &node);
TensorProto tensor_proto = node.attr().at("foo").tensor();
Tensor tensor;
CHECK(tensor.FromProto(tensor_proto));
EXPECT_EQ(DT_INT32, tensor.dtype());
EXPECT_EQ(3, tensor.shape().dim_size(0));
EXPECT_EQ(1, tensor.shape().dim_size(1));
EXPECT_EQ(1, tensor.flat<int32>()(0));
EXPECT_EQ(2, tensor.flat<int32>()(1));
EXPECT_EQ(3, tensor.flat<int32>()(2));
}
void TestSetNodeTensorAttrWithTensor() {
NodeDef node;
Tensor input_tensor(DT_INT32, {4, 5});
test::FillIota<int32>(&input_tensor, 1);
SetNodeTensorAttr<int32>("foo", input_tensor, &node);
TensorProto tensor_proto = node.attr().at("foo").tensor();
Tensor tensor;
CHECK(tensor.FromProto(tensor_proto));
test::ExpectTensorEqual<int32>(input_tensor, tensor);
}
void TestGetNodeTensorAttr() {
NodeDef node;
Tensor input_tensor(DT_INT32, {4, 5});
test::FillIota<int32>(&input_tensor, 1);
TensorProto tensor_proto;
input_tensor.AsProtoTensorContent(&tensor_proto);
SetNodeAttr("foo", tensor_proto, &node);
Tensor result = GetNodeTensorAttr(node, "foo");
test::ExpectTensorEqual<int32>(input_tensor, result);
}
void TestFilterGraphDef() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
const int width = 100;
Tensor a_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&a_data, 1.0f);
Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data));
Tensor b_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&b_data, 1.0f);
Output b_const = Const(root.WithOpName("b"), Input::Initializer(b_data));
Output add = Add(root.WithOpName("add"), a_const, b_const);
Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT);
Output mul = Mul(root.WithOpName("output"), add, placeholder);
Output remove_me = Add(root.WithOpName("remove_me"), mul, add);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
GraphDef result_graph_def;
FilterGraphDef(
graph_def,
[](const NodeDef& node) { return (node.name() != "remove_me"); },
&result_graph_def);
std::map<string, const NodeDef*> node_map;
MapNamesToNodes(result_graph_def, &node_map);
EXPECT_EQ(1, node_map.count("a"));
EXPECT_EQ(1, node_map.count("b"));
EXPECT_EQ(1, node_map.count("add"));
EXPECT_EQ(1, node_map.count("placeholder"));
EXPECT_EQ(1, node_map.count("output"));
EXPECT_EQ(0, node_map.count("remove_me"));
}
void TestRemoveAttributes() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
GraphDef result_graph_def;
RemoveAttributes(graph_def, {"dtype"}, &result_graph_def);
std::map<string, const NodeDef*> node_map;
MapNamesToNodes(result_graph_def, &node_map);
const NodeDef* removed_placeholder = node_map["placeholder"];
EXPECT_EQ(nullptr,
tensorflow::AttrSlice(*removed_placeholder).Find("dtype"));
}
void TestGetOpTypeMatches() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
const int width = 100;
Tensor a_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&a_data, 1.0f);
Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data));
Tensor b_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&b_data, 1.0f);
Output b_const = Const(root.WithOpName("b"), Input::Initializer(b_data));
Output add = Add(root.WithOpName("add"), a_const, b_const);
Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT);
Output mul = Mul(root.WithOpName("output"), add, placeholder);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
GraphMatcher matcher(graph_def);
std::vector<NodeMatch> const_matches;
TF_ASSERT_OK(matcher.GetOpTypeMatches({"Const"}, &const_matches));
EXPECT_EQ(2, const_matches.size());
for (const NodeMatch& match : const_matches) {
EXPECT_EQ("Const", match.node.op());
EXPECT_TRUE(("a" == match.node.name()) || ("b" == match.node.name()))
<< "match.node.name()=" << match.node.name();
}
std::vector<NodeMatch> add_matches;
TF_ASSERT_OK(matcher.GetOpTypeMatches({"Add"}, &add_matches));
EXPECT_EQ(1, add_matches.size());
EXPECT_EQ("Add", add_matches[0].node.op());
EXPECT_EQ("add", add_matches[0].node.name());
std::vector<NodeMatch> add_child_matches;
TF_ASSERT_OK(matcher.GetOpTypeMatches({"Add", {{"Const"}, {"Const"}}},
&add_child_matches));
EXPECT_EQ(1, add_child_matches.size());
EXPECT_EQ("Add", add_child_matches[0].node.op());
EXPECT_EQ("add", add_child_matches[0].node.name());
EXPECT_EQ(2, add_child_matches[0].inputs.size());
for (const NodeMatch& match : add_child_matches[0].inputs) {
EXPECT_EQ("Const", match.node.op());
EXPECT_TRUE(("a" == match.node.name()) || ("b" == match.node.name()))
<< "match.node.name()=" << match.node.name();
}
std::vector<NodeMatch> no_such_matches;
TF_ASSERT_OK(matcher.GetOpTypeMatches({"NoSuch"}, &no_such_matches));
EXPECT_EQ(0, no_such_matches.size());
std::vector<NodeMatch> all_matches;
TF_ASSERT_OK(matcher.GetOpTypeMatches(
{"Mul", {{"Add", {{"Const"}, {"Const"}}}, {"Placeholder"}}},
&all_matches));
EXPECT_EQ(1, all_matches.size());
EXPECT_EQ("Mul", all_matches[0].node.op());
EXPECT_EQ("output", all_matches[0].node.name());
EXPECT_EQ(2, all_matches[0].inputs.size());
EXPECT_EQ("Add", all_matches[0].inputs[0].node.op());
EXPECT_EQ("add", all_matches[0].inputs[0].node.name());
EXPECT_EQ(2, all_matches[0].inputs[0].inputs.size());
EXPECT_EQ("Const", all_matches[0].inputs[0].inputs[0].node.op());
EXPECT_EQ("a", all_matches[0].inputs[0].inputs[0].node.name());
EXPECT_EQ(0, all_matches[0].inputs[0].inputs[0].inputs.size());
EXPECT_EQ("Const", all_matches[0].inputs[0].inputs[1].node.op());
EXPECT_EQ("b", all_matches[0].inputs[0].inputs[1].node.name());
EXPECT_EQ(0, all_matches[0].inputs[0].inputs[1].inputs.size());
EXPECT_EQ("Placeholder", all_matches[0].inputs[1].node.op());
EXPECT_EQ("placeholder", all_matches[0].inputs[1].node.name());
EXPECT_EQ(0, all_matches[0].inputs[1].inputs.size());
std::vector<NodeMatch> wildcard_matches;
TF_ASSERT_OK(
matcher.GetOpTypeMatches({"*", {{"*"}, {"*"}}}, &wildcard_matches));
EXPECT_EQ(1, wildcard_matches.size());
EXPECT_EQ("Add", wildcard_matches[0].node.op());
EXPECT_EQ("Const", wildcard_matches[0].inputs[0].node.op());
EXPECT_EQ("a", wildcard_matches[0].inputs[0].node.name());
EXPECT_EQ("Const", wildcard_matches[0].inputs[1].node.op());
EXPECT_EQ("b", wildcard_matches[0].inputs[1].node.name());
std::vector<NodeMatch> or_matches;
TF_ASSERT_OK(matcher.GetOpTypeMatches({"Add|Mul"}, &or_matches));
EXPECT_EQ(2, or_matches.size());
EXPECT_EQ("Add", or_matches[0].node.op());
EXPECT_EQ("add", or_matches[0].node.name());
EXPECT_EQ("Mul", or_matches[1].node.op());
EXPECT_EQ("output", or_matches[1].node.name());
}
void TestGetOpTypeMatchesDAG() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
const int width = 100;
Tensor a_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&a_data, 1.0f);
Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data));
Output add = Add(root.WithOpName("add"), a_const, a_const);
Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT);
Output mul = Mul(root.WithOpName("output"), add, placeholder);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
GraphMatcher matcher(graph_def);
std::vector<NodeMatch> add_matches;
TF_ASSERT_OK(matcher.GetOpTypeMatches({"Add", {{"Const"}, {"Const"}}},
&add_matches));
EXPECT_EQ(1, add_matches.size());
EXPECT_EQ("Add", add_matches[0].node.op());
EXPECT_EQ("add", add_matches[0].node.name());
EXPECT_EQ("Const", add_matches[0].inputs[0].node.op());
EXPECT_EQ("a", add_matches[0].inputs[0].node.name());
EXPECT_EQ("Const", add_matches[0].inputs[1].node.op());
EXPECT_EQ("a", add_matches[0].inputs[1].node.name());
}
void TestReplaceMatchingOpTypes() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
const int width = 10;
Tensor a_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&a_data, 1.0f);
Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data));
Tensor b_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&b_data, 1.0f);
Output b_const = Const(root.WithOpName("b"), Input::Initializer(b_data));
Output add = Add(root.WithOpName("add"), a_const, b_const);
Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT);
Output mul = Mul(root.WithOpName("output"), add, placeholder);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
GraphDef replaced_graph_def;
TF_ASSERT_OK(ReplaceMatchingOpTypes(
graph_def, {"*"},
[](const NodeMatch& match, const std::set<string>& input_nodes,
const std::set<string>& output_nodes,
std::vector<NodeDef>* new_nodes) {
NodeDef original_copy;
original_copy = match.node;
const string original_name = match.node.name();
original_copy.set_name(original_name + "_before_identity");
new_nodes->push_back(original_copy);
NodeDef identity_node;
identity_node.set_op("Identity");
identity_node.set_name(original_name);
*(identity_node.mutable_input()->Add()) = original_copy.name();
new_nodes->push_back(identity_node);
return OkStatus();
},
{}, &replaced_graph_def));
EXPECT_EQ(10, replaced_graph_def.node_size());
for (const NodeDef& node : replaced_graph_def.node()) {
if (node.name() == "output") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ("output_before_identity", node.input(0));
} else if (node.name() == "output_before_identity") {
EXPECT_EQ("Mul", node.op());
EXPECT_EQ("add", node.input(0));
EXPECT_EQ("placeholder", node.input(1));
} else if (node.name() == "placeholder") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ("placeholder_before_identity", node.input(0));
} else if (node.name() == "placeholder_before_identity") {
EXPECT_EQ("Placeholder", node.op());
} else if (node.name() == "add") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ("add_before_identity", node.input(0));
} else if (node.name() == "add_before_identity") {
EXPECT_EQ("Add", node.op());
EXPECT_EQ("a", node.input(0));
EXPECT_EQ("b", node.input(1));
} else if (node.name() == "a") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ("a_before_identity", node.input(0));
} else if (node.name() == "a_before_identity") {
EXPECT_EQ("Const", node.op());
} else if (node.name() == "b") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ("b_before_identity", node.input(0));
} else if (node.name() == "b_before_identity") {
EXPECT_EQ("Const", node.op());
} else {
EXPECT_EQ(true, false) << "Unexpected node name found: " << node.name();
}
}
}
void TestMatchedNodesAsArray() {
NodeMatch fourth;
fourth.node.set_name("fourth");
NodeMatch second;
second.node.set_name("second");
second.inputs.push_back(fourth);
NodeMatch third;
third.node.set_name("third");
third.inputs.push_back(fourth);
NodeMatch first;
first.node.set_name("first");
first.inputs.push_back(second);
first.inputs.push_back(third);
std::vector<NodeDef> result;
MatchedNodesAsArray(first, &result);
EXPECT_EQ(4, result.size());
EXPECT_EQ("first", result[0].name());
EXPECT_EQ("second", result[1].name());
EXPECT_EQ("third", result[2].name());
EXPECT_EQ("fourth", result[3].name());
}
void TestRenameNodeInputs() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
const int width = 10;
Tensor a_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&a_data, 1.0f);
Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data));
Tensor b_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&b_data, 1.0f);
Output b_const = Const(root.WithOpName("b"), Input::Initializer(b_data));
Output add = Add(root.WithOpName("add"), a_const, a_const);
Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT);
Output mul = Mul(root.WithOpName("output"), add, placeholder);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
GraphDef renamed_graph_def;
TF_ASSERT_OK(RenameNodeInputs(graph_def, {{"a", "b"}},
std::unordered_set<string>(),
&renamed_graph_def));
std::map<string, const NodeDef*> node_map;
MapNamesToNodes(renamed_graph_def, &node_map);
EXPECT_EQ("b", node_map.at("add")->input(0));
EXPECT_EQ("b", node_map.at("add")->input(1));
}
void TestRenameNodeInputsWithRedirects() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
const int width = 10;
Tensor a_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&a_data, 1.0f);
Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data));
Tensor b_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&b_data, 1.0f);
Output b_const = Const(root.WithOpName("b"), Input::Initializer(b_data));
Tensor c_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&c_data, 1.0f);
Output c_const = Const(root.WithOpName("c"), Input::Initializer(c_data));
Output add = Add(root.WithOpName("add"), a_const, b_const);
Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT);
Output mul = Mul(root.WithOpName("output"), add, placeholder);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
GraphDef renamed_graph_def;
TF_ASSERT_OK(RenameNodeInputs(
graph_def, {{"a", "f"}, {"f", "e"}, {"e", "d"}, {"d", "c"}},
std::unordered_set<string>(), &renamed_graph_def));
std::map<string, const NodeDef*> node_map;
MapNamesToNodes(renamed_graph_def, &node_map);
EXPECT_EQ("c", node_map.at("add")->input(0));
EXPECT_EQ("b", node_map.at("add")->input(1));
}
void TestRenameNodeInputsWithCycle() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
const int width = 10;
Tensor a_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&a_data, 1.0f);
Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data));
Tensor b_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&b_data, 1.0f);
Output b_const = Const(root.WithOpName("b"), Input::Initializer(b_data));
Tensor c_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&c_data, 1.0f);
Output c_const = Const(root.WithOpName("c"), Input::Initializer(c_data));
Output add = Add(root.WithOpName("add"), a_const, b_const);
Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT);
Output mul = Mul(root.WithOpName("output"), add, placeholder);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
GraphDef renamed_graph_def;
Status rename_status =
RenameNodeInputs(graph_def, {{"a", "d"}, {"d", "a"}},
std::unordered_set<string>(), &renamed_graph_def);
EXPECT_FALSE(rename_status.ok());
}
void TestRenameNodeInputsWithWildcard() {
auto root = tensorflow::Scope::DisabledShapeInferenceScope();
using namespace ::tensorflow::ops;
const int width = 10;
Tensor a_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&a_data, 1.0f);
Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data));
QuantizeV2 quantize_a(root.WithOpName("quantize_a"), a_const, a_const,
a_const, DT_QUINT8,
QuantizeV2::Attrs().Mode("MIN_FIRST"));
Tensor b_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&b_data, 1.0f);
Output b_const = Const(root.WithOpName("b"), Input::Initializer(b_data));
QuantizeV2 quantize_b(root.WithOpName("quantize_b"), b_const, b_const,
b_const, DT_QUINT8,
QuantizeV2::Attrs().Mode("MIN_FIRST"));
Output add = Add(root.WithOpName("add"), quantize_a.output_min,
quantize_a.output_max);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
GraphDef renamed_graph_def;
TF_ASSERT_OK(RenameNodeInputs(graph_def, {{"quantize_a:*", "quantize_b"}},
std::unordered_set<string>(),
&renamed_graph_def));
std::map<string, const NodeDef*> node_map;
MapNamesToNodes(renamed_graph_def, &node_map);
EXPECT_EQ("quantize_b:1", node_map.at("add")->input(0));
EXPECT_EQ("quantize_b:2", node_map.at("add")->input(1));
}
void TestRenameNodeInputsWithIgnores() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
const int width = 10;
Tensor a_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&a_data, 1.0f);
Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data));
Tensor b_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&b_data, 1.0f);
Output b_const = Const(root.WithOpName("b"), Input::Initializer(b_data));
Output add = Add(root.WithOpName("add"), a_const, a_const);
Output add2 = Add(root.WithOpName("add2"), a_const, a_const);
Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT);
Output mul = Mul(root.WithOpName("mul"), add, placeholder);
Output mul2 = Mul(root.WithOpName("output"), mul, add2);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
GraphDef renamed_graph_def;
TF_ASSERT_OK(RenameNodeInputs(graph_def, {{"a", "b"}}, {"add2"},
&renamed_graph_def));
std::map<string, const NodeDef*> node_map;
MapNamesToNodes(renamed_graph_def, &node_map);
EXPECT_EQ("b", node_map.at("add")->input(0));
EXPECT_EQ("b", node_map.at("add")->input(1));
EXPECT_EQ("a", node_map.at("add2")->input(0));
EXPECT_EQ("a", node_map.at("add2")->input(1));
}
void TestFindInvalidInputs() {
GraphDef graph_def;
NodeDef* mul_node = graph_def.mutable_node()->Add();
mul_node->set_op("Mul");
mul_node->set_name("mul_node");
*(mul_node->mutable_input()->Add()) = "add_node1";
*(mul_node->mutable_input()->Add()) = "add_node2:0";
*(mul_node->mutable_input()->Add()) = "^const_node1:0";
NodeDef* add_node1 = graph_def.mutable_node()->Add();
add_node1->set_op("Add");
add_node1->set_name("add_node1");
*(add_node1->mutable_input()->Add()) = "missing_input1";
*(add_node1->mutable_input()->Add()) = "const_node1:0";
*(add_node1->mutable_input()->Add()) = "missing_input2";
NodeDef* add_node2 = graph_def.mutable_node()->Add();
add_node2->set_op("Add");
add_node2->set_name("add_node2");
*(add_node2->mutable_input()->Add()) = "missing_input3";
*(add_node2->mutable_input()->Add()) = "const_node1:0";
*(add_node2->mutable_input()->Add()) = "^const_node2";
NodeDef* const_node1 = graph_def.mutable_node()->Add();
const_node1->set_op("Const");
const_node1->set_name("const_node1");
NodeDef* const_node2 = graph_def.mutable_node()->Add();
const_node2->set_op("Const");
const_node2->set_name("const_node2");
std::vector<std::pair<string, string>> invalid_inputs;
FindInvalidInputs(graph_def, &invalid_inputs);
EXPECT_EQ(3, invalid_inputs.size());
for (const std::pair<string, string>& invalid_input : invalid_inputs) {
EXPECT_TRUE((invalid_input.first == "add_node1") ||
(invalid_input.first == "add_node2"));
if (invalid_input.first == "add_node1") {
EXPECT_TRUE((invalid_input.second == "missing_input1") ||
(invalid_input.second == "missing_input2"))
<< invalid_input.second;
} else if (invalid_input.first == "add_node2") {
EXPECT_EQ("missing_input3", invalid_input.second);
}
}
}
void TestIsGraphValid() {
GraphDef invalid_graph_def;
NodeDef* mul_node = invalid_graph_def.mutable_node()->Add();
mul_node->set_op("Mul");
mul_node->set_name("mul_node");
*(mul_node->mutable_input()->Add()) = "add_node1";
*(mul_node->mutable_input()->Add()) = "add_node2:0";
*(mul_node->mutable_input()->Add()) = "^const_node1:0";
NodeDef* add_node1 = invalid_graph_def.mutable_node()->Add();
add_node1->set_op("Add");
add_node1->set_name("add_node1");
*(add_node1->mutable_input()->Add()) = "missing_input1";
*(add_node1->mutable_input()->Add()) = "const_node1:0";
*(add_node1->mutable_input()->Add()) = "missing_input2";
NodeDef* add_node2 = invalid_graph_def.mutable_node()->Add();
add_node2->set_op("Add");
add_node2->set_name("add_node2");
*(add_node2->mutable_input()->Add()) = "missing_input3";
*(add_node2->mutable_input()->Add()) = "const_node1:0";
*(add_node2->mutable_input()->Add()) = "^const_node2";
NodeDef* const_node1 = invalid_graph_def.mutable_node()->Add();
const_node1->set_op("Const");
const_node1->set_name("const_node1");
NodeDef* const_node2 = invalid_graph_def.mutable_node()->Add();
const_node2->set_op("Const");
const_node2->set_name("const_node2");
EXPECT_FALSE(IsGraphValid(invalid_graph_def).ok());
GraphDef valid_graph_def;
NodeDef* const_node3 = valid_graph_def.mutable_node()->Add();
const_node3->set_op("Const");
const_node3->set_name("const_node2");
EXPECT_TRUE(IsGraphValid(valid_graph_def).ok());
}
void TestGetInOutTypes() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
const int width = 20;
Tensor float_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&float_data, 1.0f);
Output float_const =
Const(root.WithOpName("float_const"), Input::Initializer(float_data));
Tensor int_data(DT_INT32, TensorShape({width}));
test::FillIota<int32>(&int_data, 1);
Output int_const =
Const(root.WithOpName("int_const"), Input::Initializer(int_data));
Output float_relu = Relu(root.WithOpName("float_relu"), float_const);
Output int_relu = Relu(root.WithOpName("int_relu"), int_const);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
std::map<string, const NodeDef*> node_map;
MapNamesToNodes(graph_def, &node_map);
const NodeDef* float_const_def = node_map.at("float_const");
DataTypeVector float_const_inputs;
DataTypeVector float_const_outputs;
TF_EXPECT_OK(GetInOutTypes(*float_const_def, &float_const_inputs,
&float_const_outputs));
ASSERT_EQ(0, float_const_inputs.size());
ASSERT_EQ(1, float_const_outputs.size());
EXPECT_EQ(DT_FLOAT, float_const_outputs[0]);
const NodeDef* int_const_def = node_map.at("int_const");
DataTypeVector int_const_inputs;
DataTypeVector int_const_outputs;
TF_EXPECT_OK(
GetInOutTypes(*int_const_def, &int_const_inputs, &int_const_outputs));
ASSERT_EQ(0, int_const_inputs.size());
ASSERT_EQ(1, int_const_outputs.size());
EXPECT_EQ(DT_INT32, int_const_outputs[0]);
const NodeDef* float_relu_def = node_map.at("float_relu");
DataTypeVector float_relu_inputs;
DataTypeVector float_relu_outputs;
TF_EXPECT_OK(GetInOutTypes(*float_relu_def, &float_relu_inputs,
&float_relu_outputs));
ASSERT_EQ(1, float_relu_inputs.size());
EXPECT_EQ(DT_FLOAT, float_relu_inputs[0]);
ASSERT_EQ(1, float_relu_outputs.size());
EXPECT_EQ(DT_FLOAT, float_relu_outputs[0]);
const NodeDef* int_relu_def = node_map.at("int_relu");
DataTypeVector int_relu_inputs;
DataTypeVector int_relu_outputs;
TF_EXPECT_OK(
GetInOutTypes(*int_relu_def, &int_relu_inputs, &int_relu_outputs));
ASSERT_EQ(1, int_relu_inputs.size());
EXPECT_EQ(DT_INT32, int_relu_inputs[0]);
ASSERT_EQ(1, int_relu_outputs.size());
EXPECT_EQ(DT_INT32, int_relu_outputs[0]);
}
void TestCopyOriginalMatch() {
NodeDef a;
a.set_op("Relu");
a.set_name("a");
AddNodeInput("b", &a);
NodeDef b;
b.set_op("Const");
b.set_name("b");
NodeMatch b_match;
b_match.node = b;
NodeMatch a_match;
a_match.node = a;
a_match.inputs.push_back(b_match);
std::vector<NodeDef> new_nodes;
CopyOriginalMatch(a_match, &new_nodes);
EXPECT_EQ(2, new_nodes.size());
EXPECT_EQ("a", new_nodes[0].name());
EXPECT_EQ("Relu", new_nodes[0].op());
EXPECT_EQ("b", new_nodes[1].name());
EXPECT_EQ("Const", new_nodes[1].op());
}
void TestHashNodeDef() {
using namespace ::tensorflow::ops;
const int width = 10;
auto a_root = tensorflow::Scope::NewRootScope();
Tensor a_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&a_data, 1.0f);
Output a_const = Const(a_root.WithOpName("a"), Input::Initializer(a_data));
GraphDef a_graph_def;
TF_ASSERT_OK(a_root.ToGraphDef(&a_graph_def));
const NodeDef& a_node_def = a_graph_def.node(0);
auto b_root = tensorflow::Scope::NewRootScope();
Tensor b_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&b_data, 1.0f);
Output b_const = Const(b_root.WithOpName("a"), Input::Initializer(b_data));
GraphDef b_graph_def;
TF_ASSERT_OK(b_root.ToGraphDef(&b_graph_def));
const NodeDef& b_node_def = b_graph_def.node(0);
auto c_root = tensorflow::Scope::NewRootScope();
Tensor c_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&c_data, 2.0f);
Output c_const = Const(c_root.WithOpName("a"), Input::Initializer(c_data));
GraphDef c_graph_def;
TF_ASSERT_OK(c_root.ToGraphDef(&c_graph_def));
const NodeDef& c_node_def = c_graph_def.node(0);
auto d_root = tensorflow::Scope::NewRootScope();
Tensor d_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&d_data, 1.0f);
Output d_const = Const(d_root.WithOpName("d"), Input::Initializer(d_data));
GraphDef d_graph_def;
TF_ASSERT_OK(d_root.ToGraphDef(&d_graph_def));
const NodeDef& d_node_def = d_graph_def.node(0);
auto e_root = tensorflow::Scope::NewRootScope();
Tensor e_data(DT_INT32, TensorShape({width}));
test::FillIota<int32>(&e_data, 1);
Output e_const = Const(e_root.WithOpName("a"), Input::Initializer(e_data));
GraphDef e_graph_def;
TF_ASSERT_OK(e_root.ToGraphDef(&e_graph_def));
const NodeDef& e_node_def = e_graph_def.node(0);
auto f_root = tensorflow::Scope::NewRootScope();
Tensor f_data(DT_FLOAT, TensorShape({width - 1}));
test::FillIota<float>(&f_data, 1.0f);
Output f_const = Const(f_root.WithOpName("a"), Input::Initializer(f_data));
GraphDef f_graph_def;
TF_ASSERT_OK(f_root.ToGraphDef(&f_graph_def));
const NodeDef& f_node_def = f_graph_def.node(0);
auto g_root = tensorflow::Scope::NewRootScope();
Tensor g_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&g_data, 1);
Output g_const = Const(g_root.WithOpName("a").WithDevice("some_device"),
Input::Initializer(g_data));
GraphDef g_graph_def;
TF_ASSERT_OK(g_root.ToGraphDef(&g_graph_def));
const NodeDef& g_node_def = g_graph_def.node(0);
NodeDef relu1_node_def;
relu1_node_def.set_op("Relu");
relu1_node_def.set_name("a");
relu1_node_def.add_input("foo");
NodeDef relu2_node_def;
relu2_node_def.set_op("Relu");
relu2_node_def.set_name("a");
relu2_node_def.add_input("bar");
EXPECT_EQ(HashNodeDef(a_node_def), HashNodeDef(b_node_def));
EXPECT_NE(HashNodeDef(a_node_def), HashNodeDef(c_node_def));
EXPECT_NE(HashNodeDef(a_node_def), HashNodeDef(d_node_def));
EXPECT_NE(HashNodeDef(a_node_def), HashNodeDef(e_node_def));
EXPECT_NE(HashNodeDef(a_node_def), HashNodeDef(f_node_def));
EXPECT_NE(HashNodeDef(a_node_def), HashNodeDef(g_node_def));
EXPECT_NE(HashNodeDef(a_node_def), HashNodeDef(relu1_node_def));
EXPECT_NE(HashNodeDef(relu1_node_def), HashNodeDef(relu2_node_def));
}
void TestCountParameters() {
TransformFuncContext context;
context.params.insert({"foo", {"a", "b"}});
context.params.insert({"bar", {"c"}});
EXPECT_EQ(2, context.CountParameters("foo"));
EXPECT_EQ(1, context.CountParameters("bar"));
EXPECT_EQ(0, context.CountParameters("not_present"));
}
void TestGetOneStringParameter() {
TransformFuncContext context;
context.params.insert({"foo", {"a", "b"}});
context.params.insert({"bar", {"c"}});
string value;
TF_EXPECT_OK(context.GetOneStringParameter("bar", "d", &value));
EXPECT_EQ("c", value);
EXPECT_FALSE(context.GetOneStringParameter("foo", "d", &value).ok());
TF_EXPECT_OK(context.GetOneStringParameter("not_present", "d", &value));
EXPECT_EQ("d", value);
}
void TestGetOneInt32Parameter() {
TransformFuncContext context;
context.params.insert({"foo", {"10", "20"}});
context.params.insert({"bar", {"-23"}});
context.params.insert({"not_a_number", {"not_numerical"}});
context.params.insert({"float", {"-23.232323"}});
int32_t value;
TF_EXPECT_OK(context.GetOneInt32Parameter("bar", 0, &value));
EXPECT_EQ(-23, value);
EXPECT_FALSE(context.GetOneInt32Parameter("foo", 0, &value).ok());
TF_EXPECT_OK(context.GetOneInt32Parameter("not_present", 10, &value));
EXPECT_EQ(10, value);
EXPECT_FALSE(context.GetOneInt32Parameter("not_a_number", 0, &value).ok());
EXPECT_FALSE(context.GetOneInt32Parameter("float", 0, &value).ok());
}
void TestGetOneInt64Parameter() {
TransformFuncContext context;
context.params.insert({"foo", {"10", "20"}});
context.params.insert({"bar", {"-23"}});
context.params.insert({"not_a_number", {"not_numerical"}});
context.params.insert({"float", {"-23.232323"}});
int64_t value;
TF_EXPECT_OK(context.GetOneInt64Parameter("bar", 0, &value));
EXPECT_EQ(-23, value);
EXPECT_FALSE(context.GetOneInt64Parameter("foo", 0, &value).ok());
TF_EXPECT_OK(context.GetOneInt64Parameter("not_present", 10, &value));
EXPECT_EQ(10, value);
EXPECT_FALSE(context.GetOneInt64Parameter("not_a_number", 0, &value).ok());
EXPECT_FALSE(context.GetOneInt64Parameter("float", 0, &value).ok());
}
void TestGetOneFloatParameter() {
TransformFuncContext context;
context.params.insert({"foo", {"10.0", "20.0"}});
context.params.insert({"bar", {"-23.2323"}});
context.params.insert({"not_a_number", {"not_numerical"}});
float value;
TF_EXPECT_OK(context.GetOneFloatParameter("bar", 0, &value));
EXPECT_NEAR(-23.2323f, value, 1e-5f);
EXPECT_FALSE(context.GetOneFloatParameter("foo", 0, &value).ok());
TF_EXPECT_OK(context.GetOneFloatParameter("not_present", 10.5f, &value));
EXPECT_NEAR(10.5f, value, 1e-5f);
EXPECT_FALSE(context.GetOneFloatParameter("not_a_number", 0, &value).ok());
}
void TestGetOneBoolParameter() {
TransformFuncContext context;
context.params.insert({"foo", {"true", "false"}});
context.params.insert({"true", {"true"}});
context.params.insert({"false", {"false"}});
context.params.insert({"one", {"1"}});
context.params.insert({"zero", {"0"}});
context.params.insert({"not_a_bool", {"not_boolean"}});
bool value;
EXPECT_FALSE(context.GetOneBoolParameter("foo", 0, &value).ok());
value = false;
TF_EXPECT_OK(context.GetOneBoolParameter("true", false, &value));
EXPECT_TRUE(value);
value = true;
TF_EXPECT_OK(context.GetOneBoolParameter("false", true, &value));
EXPECT_FALSE(value);
value = false;
TF_EXPECT_OK(context.GetOneBoolParameter("one", false, &value));
EXPECT_TRUE(value);
value = true;
TF_EXPECT_OK(context.GetOneBoolParameter("zero", true, &value));
EXPECT_FALSE(value);
EXPECT_FALSE(context.GetOneBoolParameter("not_a_bool", false, &value).ok());
value = false;
TF_EXPECT_OK(context.GetOneBoolParameter("not_present", true, &value));
EXPECT_TRUE(value);
}
};
TEST_F(TransformUtilsTest, TestMapNamesToNodes) { TestMapNamesToNodes(); }
TEST_F(TransformUtilsTest, TestMapNodesToOutputs) { TestMapNodesToOutputs(); }
TEST_F(TransformUtilsTest, TestNodeNamePartsFromInput) {
TestNodeNamePartsFromInput();
}
TEST_F(TransformUtilsTest, TestCanonicalInputName) { TestCanonicalInputName(); }
TEST_F(TransformUtilsTest, TestAddNodeInput) { TestAddNodeInput(); }
TEST_F(TransformUtilsTest, TestCopyNodeAttr) { TestCopyNodeAttr(); }
TEST_F(TransformUtilsTest, TestSetNodeAttr) { TestSetNodeAttr(); }
TEST_F(TransformUtilsTest, TestSetNodeTensorAttr) { TestSetNodeTensorAttr(); }
TEST_F(TransformUtilsTest, TestSetNodeTensorAttrWithTensor) {
TestSetNodeTensorAttrWithTensor();
}
TEST_F(TransformUtilsTest, TestGetNodeTensorAttr) { TestGetNodeTensorAttr(); }
TEST_F(TransformUtilsTest, TestNodeNameFromInput) { TestNodeNameFromInput(); }
TEST_F(TransformUtilsTest, TestFilterGraphDef) { TestFilterGraphDef(); }
TEST_F(TransformUtilsTest, TestRemoveAttributes) { TestRemoveAttributes(); }
TEST_F(TransformUtilsTest, TestGetOpTypeMatches) { TestGetOpTypeMatches(); }
TEST_F(TransformUtilsTest, TestGetOpTypeMatchesDAG) {
TestGetOpTypeMatchesDAG();
}
TEST_F(TransformUtilsTest, TestReplaceMatchingOpTypes) {
TestReplaceMatchingOpTypes();
}
TEST_F(TransformUtilsTest, TestMatchedNodesAsArray) {
TestMatchedNodesAsArray();
}
TEST_F(TransformUtilsTest, TestRenameNodeInputs) { TestRenameNodeInputs(); }
TEST_F(TransformUtilsTest, TestRenameNodeInputsWithRedirects) {
TestRenameNodeInputsWithRedirects();
}
TEST_F(TransformUtilsTest, TestRenameNodeInputsWithCycle) {
TestRenameNodeInputsWithCycle();
}
TEST_F(TransformUtilsTest, TestRenameNodeInputsWithWildcard) {
TestRenameNodeInputsWithWildcard();
}
TEST_F(TransformUtilsTest, TestRenameNodeInputsWithIgnores) {
TestRenameNodeInputsWithIgnores();
}
TEST_F(TransformUtilsTest, TestFindInvalidInputs) { TestFindInvalidInputs(); }
TEST_F(TransformUtilsTest, TestIsGraphValid) { TestIsGraphValid(); }
TEST_F(TransformUtilsTest, TestGetInOutTypes) { TestGetInOutTypes(); }
TEST_F(TransformUtilsTest, TestCopyOriginalMatch) { TestCopyOriginalMatch(); }
TEST_F(TransformUtilsTest, TestHashNodeDef) { TestHashNodeDef(); }
TEST_F(TransformUtilsTest, TestCountParameters) { TestCountParameters(); }
TEST_F(TransformUtilsTest, TestGetOneStringParameter) {
TestGetOneStringParameter();
}
TEST_F(TransformUtilsTest, TestGetOneInt32Parameter) {
TestGetOneInt32Parameter();
}
TEST_F(TransformUtilsTest, TestGetOneInt64Parameter) {
TestGetOneInt64Parameter();
}
TEST_F(TransformUtilsTest, TestGetOneFloatParameter) {
TestGetOneFloatParameter();
}
TEST_F(TransformUtilsTest, TestGetOneBoolParameter) {
TestGetOneBoolParameter();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/transform_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/transform_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
35cd33cd-4013-4952-895a-f99341d3a88c | cpp | tensorflow/tensorflow | tile | tensorflow/compiler/tf2tensorrt/convert/ops/tile.cc | tensorflow/lite/delegates/gpu/cl/kernels/tile_test.cc | #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h"
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.h"
#include "tensorflow/compiler/tf2tensorrt/convert/ops/layer_utils.h"
namespace tensorflow {
namespace tensorrt {
namespace convert {
class ConvertTile : public OpConverterBase<ConvertTile> {
public:
explicit ConvertTile(const OpConverterParams *params)
: OpConverterBase<ConvertTile>(
params,
{DataType::DT_FLOAT, DataType::DT_HALF, DataType::DT_INT32}) {}
static constexpr std::array<InputArgSpec, 2> InputSpec() {
return std::array<InputArgSpec, 2>{
InputArgSpec::Create("input_tensor", TrtInputArg::kBoth),
InputArgSpec::Create("weight", TrtInputArg::kBoth)};
}
Status Validate() {
const auto ¶ms = *this->params_;
const auto &inputs = params.inputs;
const auto &repl = inputs.at(1);
if (params.use_implicit_batch && repl.is_tensor()) {
return errors::InvalidArgument(
"Conversion for Tile is not implemented for multipliers "
"passed as a tensor in implicit batch mode.");
}
nvinfer1::DataType dtype;
const int *multiplies;
if (repl.is_weights()) {
TFTRT_CHECK_SHAPE_TENSOR(repl.weights().GetTensor());
dtype = repl.weights().TrtDType();
multiplies = repl.weights().GetPointer<int>();
} else {
dtype = repl.tensor()->getType();
multiplies = nullptr;
}
const auto &node = params.node_def;
TF_RETURN_IF_ERROR(check_type(dtype, nvinfer1::DataType::kINT32, node, 1));
const auto dims = inputs.at(0).GetTrtDims();
const auto nb_dims =
dims.nbDims +
(params.use_implicit_batch && inputs.at(0).is_tensor() ? 1 : 0);
if (multiplies) {
const int mult_numb = repl.weights().count();
if (mult_numb != nb_dims) {
return errors::InvalidArgument(
"The length of the replication vector (", mult_numb,
") of the Tile operation in '", node.name(),
"' is expected to be equal to the rank of the input vector (",
nb_dims, ").");
}
if (std::any_of(multiplies, multiplies + nb_dims,
[](int i) { return i <= 0; })) {
const auto &mul = absl::StrJoin(multiplies, multiplies + nb_dims, ", ");
return errors::InvalidArgument(
"All replications of the Tile operation in '", node.name(),
"' should be positive, got (", mul, ").");
}
if (params.use_implicit_batch && multiplies[0] > 1) {
return errors::Unimplemented(
"The Tile operation along the batch dimension in '", node.name(),
"' is not implemented.");
}
} else {
const auto &repl_dims = repl.GetTrtDims();
if (repl_dims.nbDims != 1) {
return errors::InvalidArgument(
"When replications are defined as a tensor, that tensor must be "
"1-dimensional. Got ",
repl_dims.nbDims, "-dimensional tensor.");
}
if (repl_dims.d[0] >= 0 && repl_dims.d[0] != nb_dims) {
return errors::InvalidArgument(
"When replications are defined as a tensor, "
"the number of its elements (",
repl_dims.d[0], ") must be equal to the rank of the input tensor (",
nb_dims, ").");
}
}
return OkStatus();
}
Status Convert() {
const auto ¶ms = *this->params_;
const auto &inputs = params.inputs;
auto *converter = params.converter;
auto *network = converter->network();
const auto &tensor = inputs.at(0);
const auto &replics = inputs.at(1);
const auto dims = tensor.GetTrtDims();
const auto nb_dims = dims.nbDims;
nvinfer1::Dims output_size{nb_dims, {1}};
bool dynamic_flag = replics.is_tensor() || !HasStaticShape(dims);
if (!dynamic_flag) {
const auto dim_offset =
params.use_implicit_batch && tensor.is_tensor() ? 1 : 0;
const auto *input_size = dims.d;
const int *pReplics = replics.weights().GetPointer<int>() + dim_offset;
for (int i = 0; i < nb_dims; i++)
output_size.d[i] = pReplics[i] * input_size[i];
}
StatusOr<TRTNetworkBuilder> builder;
if (tensor.is_weights() || (dynamic_flag && replics.is_weights())) {
builder =
TRTNetworkBuilder::Create(converter->network(), params.weight_store);
TRT_ENSURE_OK(builder);
}
ITensorProxyPtr input_tensor;
if (tensor.is_weights()) {
StatusOr<nvinfer1::IConstantLayer *> weights_const =
builder->WeightsToConstant(tensor.weights().GetTrtWeights(), dims);
TRT_ENSURE_PTR_OK(weights_const);
input_tensor = (*weights_const)->getOutput(0);
} else {
input_tensor = tensor.tensor();
}
auto &input_trt_tensor = *input_tensor->trt_tensor();
nvinfer1::ITensor *target_shape = nullptr;
if (dynamic_flag) {
nvinfer1::ITensor *mult;
if (replics.is_weights()) {
StatusOr<nvinfer1::IConstantLayer *> weights_const =
builder->WeightsToConstant(replics.weights().GetTrtWeights(),
replics.GetTrtDims());
TRT_ENSURE_PTR_OK(weights_const);
mult = (*weights_const)->getOutput(0);
} else {
const ITensorProxyPtr multiplies = replics.tensor()->trt_tensor();
mult = multiplies->trt_tensor();
}
nvinfer1::ITensor *shape =
network->addShape(input_trt_tensor)->getOutput(0);
target_shape = network
->addElementWise(*shape, *mult,
nvinfer1::ElementWiseOperation::kPROD)
->getOutput(0);
}
nvinfer1::Dims start{nb_dims, {}};
DimsAdapter stride(std::vector<int>(nb_dims, 1));
auto layer = network->addSlice(input_trt_tensor, start, output_size,
stride.AsTrtDims());
layer->setMode(nvinfer1::SliceMode::kWRAP);
if (target_shape) layer->setInput(2, *target_shape);
converter->SetLayerName(layer, params.node_def.name(), "to_tile");
ITensorProxyPtr output_tensor = layer->getOutput(0);
if (tensor.is_weights() && params.use_implicit_batch) {
DimsAdapter adap(output_tensor->getDimensions());
TF_RETURN_IF_ERROR(adap.RemoveBatchDimension());
TF_RETURN_IF_ERROR(PrepareTensorForShape(
params.converter, TRT_TensorOrWeights(output_tensor),
adap.AsTrtDims(), false, &output_tensor, params.node_def));
}
AddOutput(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
};
REGISTER_DEFAULT_TRT_OP_CONVERTER(MakeConverterFunction<ConvertTile>(), "Tile");
}
}
}
#endif | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/tile_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
TEST_F(OpenCLOperationTest, TileChannels) {
auto status = TileChannelsTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, TileChannelsX4) {
auto status = TileChannelsX4Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, TileWidth) {
auto status = TileWidthTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, TileHeight) {
auto status = TileHeightTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, TileHWC) {
auto status = TileHWCTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/convert/ops/tile.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/tile_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
df840d78-1120-4a2f-b520-503efce89ecf | cpp | google/googletest | gmock-function-mocker | googlemock/include/gmock/gmock-function-mocker.h | googlemock/test/gmock-function-mocker_test.cc | #ifndef GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_FUNCTION_MOCKER_H_
#define GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_FUNCTION_MOCKER_H_
#include <cstddef>
#include <type_traits>
#include <utility>
#include "gmock/gmock-spec-builders.h"
#include "gmock/internal/gmock-internal-utils.h"
#include "gmock/internal/gmock-pp.h"
namespace testing {
namespace internal {
template <typename T>
using identity_t = T;
template <typename Pattern>
struct ThisRefAdjuster {
template <typename T>
using AdjustT = typename std::conditional<
std::is_const<typename std::remove_reference<Pattern>::type>::value,
typename std::conditional<std::is_lvalue_reference<Pattern>::value,
const T&, const T&&>::type,
typename std::conditional<std::is_lvalue_reference<Pattern>::value, T&,
T&&>::type>::type;
template <typename MockType>
static AdjustT<MockType> Adjust(const MockType& mock) {
return static_cast<AdjustT<MockType>>(const_cast<MockType&>(mock));
}
};
constexpr bool PrefixOf(const char* a, const char* b) {
return *a == 0 || (*a == *b && internal::PrefixOf(a + 1, b + 1));
}
template <size_t N, size_t M>
constexpr bool StartsWith(const char (&prefix)[N], const char (&str)[M]) {
return N <= M && internal::PrefixOf(prefix, str);
}
template <size_t N, size_t M>
constexpr bool EndsWith(const char (&suffix)[N], const char (&str)[M]) {
return N <= M && internal::PrefixOf(suffix, str + M - N);
}
template <size_t N, size_t M>
constexpr bool Equals(const char (&a)[N], const char (&b)[M]) {
return N == M && internal::PrefixOf(a, b);
}
template <size_t N>
constexpr bool ValidateSpec(const char (&spec)[N]) {
return internal::Equals("const", spec) ||
internal::Equals("override", spec) ||
internal::Equals("final", spec) ||
internal::Equals("noexcept", spec) ||
(internal::StartsWith("noexcept(", spec) &&
internal::EndsWith(")", spec)) ||
internal::Equals("ref(&)", spec) ||
internal::Equals("ref(&&)", spec) ||
(internal::StartsWith("Calltype(", spec) &&
internal::EndsWith(")", spec));
}
}
using internal::FunctionMocker;
}
#define MOCK_METHOD(...) \
GMOCK_INTERNAL_WARNING_PUSH() \
GMOCK_INTERNAL_WARNING_CLANG(ignored, "-Wunused-member-function") \
GMOCK_PP_VARIADIC_CALL(GMOCK_INTERNAL_MOCK_METHOD_ARG_, __VA_ARGS__) \
GMOCK_INTERNAL_WARNING_POP()
#define GMOCK_INTERNAL_MOCK_METHOD_ARG_1(...) \
GMOCK_INTERNAL_WRONG_ARITY(__VA_ARGS__)
#define GMOCK_INTERNAL_MOCK_METHOD_ARG_2(...) \
GMOCK_INTERNAL_WRONG_ARITY(__VA_ARGS__)
#define GMOCK_INTERNAL_MOCK_METHOD_ARG_3(_Ret, _MethodName, _Args) \
GMOCK_INTERNAL_MOCK_METHOD_ARG_4(_Ret, _MethodName, _Args, ())
#define GMOCK_INTERNAL_MOCK_METHOD_ARG_4(_Ret, _MethodName, _Args, _Spec) \
GMOCK_INTERNAL_ASSERT_PARENTHESIS(_Args); \
GMOCK_INTERNAL_ASSERT_PARENTHESIS(_Spec); \
GMOCK_INTERNAL_ASSERT_VALID_SIGNATURE( \
GMOCK_PP_NARG0 _Args, GMOCK_INTERNAL_SIGNATURE(_Ret, _Args)); \
GMOCK_INTERNAL_ASSERT_VALID_SPEC(_Spec) \
GMOCK_INTERNAL_MOCK_METHOD_IMPL( \
GMOCK_PP_NARG0 _Args, _MethodName, GMOCK_INTERNAL_HAS_CONST(_Spec), \
GMOCK_INTERNAL_HAS_OVERRIDE(_Spec), GMOCK_INTERNAL_HAS_FINAL(_Spec), \
GMOCK_INTERNAL_GET_NOEXCEPT_SPEC(_Spec), \
GMOCK_INTERNAL_GET_CALLTYPE_SPEC(_Spec), \
GMOCK_INTERNAL_GET_REF_SPEC(_Spec), \
(GMOCK_INTERNAL_SIGNATURE(_Ret, _Args)))
#define GMOCK_INTERNAL_MOCK_METHOD_ARG_5(...) \
GMOCK_INTERNAL_WRONG_ARITY(__VA_ARGS__)
#define GMOCK_INTERNAL_MOCK_METHOD_ARG_6(...) \
GMOCK_INTERNAL_WRONG_ARITY(__VA_ARGS__)
#define GMOCK_INTERNAL_MOCK_METHOD_ARG_7(...) \
GMOCK_INTERNAL_WRONG_ARITY(__VA_ARGS__)
#define GMOCK_INTERNAL_WRONG_ARITY(...) \
static_assert( \
false, \
"MOCK_METHOD must be called with 3 or 4 arguments. _Ret, " \
"_MethodName, _Args and optionally _Spec. _Args and _Spec must be " \
"enclosed in parentheses. If _Ret is a type with unprotected commas, " \
"it must also be enclosed in parentheses.")
#define GMOCK_INTERNAL_ASSERT_PARENTHESIS(_Tuple) \
static_assert( \
GMOCK_PP_IS_ENCLOSED_PARENS(_Tuple), \
GMOCK_PP_STRINGIZE(_Tuple) " should be enclosed in parentheses.")
#define GMOCK_INTERNAL_ASSERT_VALID_SIGNATURE(_N, ...) \
static_assert( \
std::is_function<__VA_ARGS__>::value, \
"Signature must be a function type, maybe return type contains " \
"unprotected comma."); \
static_assert( \
::testing::tuple_size<typename ::testing::internal::Function< \
__VA_ARGS__>::ArgumentTuple>::value == _N, \
"This method does not take " GMOCK_PP_STRINGIZE( \
_N) " arguments. Parenthesize all types with unprotected commas.")
#define GMOCK_INTERNAL_ASSERT_VALID_SPEC(_Spec) \
GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_ASSERT_VALID_SPEC_ELEMENT, ~, _Spec)
#define GMOCK_INTERNAL_MOCK_METHOD_IMPL(_N, _MethodName, _Constness, \
_Override, _Final, _NoexceptSpec, \
_CallType, _RefSpec, _Signature) \
typename ::testing::internal::Function<GMOCK_PP_REMOVE_PARENS( \
_Signature)>::Result \
GMOCK_INTERNAL_EXPAND(_CallType) \
_MethodName(GMOCK_PP_REPEAT(GMOCK_INTERNAL_PARAMETER, _Signature, _N)) \
GMOCK_PP_IF(_Constness, const, ) \
_RefSpec _NoexceptSpec GMOCK_PP_IF(_Override, override, ) \
GMOCK_PP_IF(_Final, final, ) { \
GMOCK_MOCKER_(_N, _Constness, _MethodName) \
.SetOwnerAndName(this, #_MethodName); \
return GMOCK_MOCKER_(_N, _Constness, _MethodName) \
.Invoke(GMOCK_PP_REPEAT(GMOCK_INTERNAL_FORWARD_ARG, _Signature, _N)); \
} \
::testing::MockSpec<GMOCK_PP_REMOVE_PARENS(_Signature)> gmock_##_MethodName( \
GMOCK_PP_REPEAT(GMOCK_INTERNAL_MATCHER_PARAMETER, _Signature, _N)) \
GMOCK_PP_IF(_Constness, const, ) _RefSpec { \
GMOCK_MOCKER_(_N, _Constness, _MethodName).RegisterOwner(this); \
return GMOCK_MOCKER_(_N, _Constness, _MethodName) \
.With(GMOCK_PP_REPEAT(GMOCK_INTERNAL_MATCHER_ARGUMENT, , _N)); \
} \
::testing::MockSpec<GMOCK_PP_REMOVE_PARENS(_Signature)> gmock_##_MethodName( \
const ::testing::internal::WithoutMatchers&, \
GMOCK_PP_IF(_Constness, const, )::testing::internal::Function< \
GMOCK_PP_REMOVE_PARENS(_Signature)>*) const _RefSpec _NoexceptSpec { \
return ::testing::internal::ThisRefAdjuster<GMOCK_PP_IF( \
_Constness, const, ) int _RefSpec>::Adjust(*this) \
.gmock_##_MethodName(GMOCK_PP_REPEAT( \
GMOCK_INTERNAL_A_MATCHER_ARGUMENT, _Signature, _N)); \
} \
mutable ::testing::FunctionMocker<GMOCK_PP_REMOVE_PARENS(_Signature)> \
GMOCK_MOCKER_(_N, _Constness, _MethodName)
#define GMOCK_INTERNAL_EXPAND(...) __VA_ARGS__
#define GMOCK_INTERNAL_HAS_CONST(_Tuple) \
GMOCK_PP_HAS_COMMA(GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_DETECT_CONST, ~, _Tuple))
#define GMOCK_INTERNAL_HAS_OVERRIDE(_Tuple) \
GMOCK_PP_HAS_COMMA( \
GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_DETECT_OVERRIDE, ~, _Tuple))
#define GMOCK_INTERNAL_HAS_FINAL(_Tuple) \
GMOCK_PP_HAS_COMMA(GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_DETECT_FINAL, ~, _Tuple))
#define GMOCK_INTERNAL_GET_NOEXCEPT_SPEC(_Tuple) \
GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_NOEXCEPT_SPEC_IF_NOEXCEPT, ~, _Tuple)
#define GMOCK_INTERNAL_NOEXCEPT_SPEC_IF_NOEXCEPT(_i, _, _elem) \
GMOCK_PP_IF( \
GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_NOEXCEPT(_i, _, _elem)), \
_elem, )
#define GMOCK_INTERNAL_GET_CALLTYPE_SPEC(_Tuple) \
GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_CALLTYPE_SPEC_IF_CALLTYPE, ~, _Tuple)
#define GMOCK_INTERNAL_CALLTYPE_SPEC_IF_CALLTYPE(_i, _, _elem) \
GMOCK_PP_IF( \
GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_CALLTYPE(_i, _, _elem)), \
GMOCK_PP_CAT(GMOCK_INTERNAL_UNPACK_, _elem), )
#define GMOCK_INTERNAL_GET_REF_SPEC(_Tuple) \
GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_REF_SPEC_IF_REF, ~, _Tuple)
#define GMOCK_INTERNAL_REF_SPEC_IF_REF(_i, _, _elem) \
GMOCK_PP_IF(GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_REF(_i, _, _elem)), \
GMOCK_PP_CAT(GMOCK_INTERNAL_UNPACK_, _elem), )
#ifdef GMOCK_INTERNAL_STRICT_SPEC_ASSERT
#define GMOCK_INTERNAL_ASSERT_VALID_SPEC_ELEMENT(_i, _, _elem) \
static_assert( \
::testing::internal::ValidateSpec(GMOCK_PP_STRINGIZE(_elem)), \
"Token \'" GMOCK_PP_STRINGIZE( \
_elem) "\' cannot be recognized as a valid specification " \
"modifier. Is a ',' missing?");
#else
#define GMOCK_INTERNAL_ASSERT_VALID_SPEC_ELEMENT(_i, _, _elem) \
static_assert( \
(GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_CONST(_i, _, _elem)) + \
GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_OVERRIDE(_i, _, _elem)) + \
GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_FINAL(_i, _, _elem)) + \
GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_NOEXCEPT(_i, _, _elem)) + \
GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_REF(_i, _, _elem)) + \
GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_CALLTYPE(_i, _, _elem))) == 1, \
GMOCK_PP_STRINGIZE( \
_elem) " cannot be recognized as a valid specification modifier.");
#endif
#define GMOCK_INTERNAL_DETECT_CONST(_i, _, _elem) \
GMOCK_PP_CAT(GMOCK_INTERNAL_DETECT_CONST_I_, _elem)
#define GMOCK_INTERNAL_DETECT_CONST_I_const ,
#define GMOCK_INTERNAL_DETECT_OVERRIDE(_i, _, _elem) \
GMOCK_PP_CAT(GMOCK_INTERNAL_DETECT_OVERRIDE_I_, _elem)
#define GMOCK_INTERNAL_DETECT_OVERRIDE_I_override ,
#define GMOCK_INTERNAL_DETECT_FINAL(_i, _, _elem) \
GMOCK_PP_CAT(GMOCK_INTERNAL_DETECT_FINAL_I_, _elem)
#define GMOCK_INTERNAL_DETECT_FINAL_I_final ,
#define GMOCK_INTERNAL_DETECT_NOEXCEPT(_i, _, _elem) \
GMOCK_PP_CAT(GMOCK_INTERNAL_DETECT_NOEXCEPT_I_, _elem)
#define GMOCK_INTERNAL_DETECT_NOEXCEPT_I_noexcept ,
#define GMOCK_INTERNAL_DETECT_REF(_i, _, _elem) \
GMOCK_PP_CAT(GMOCK_INTERNAL_DETECT_REF_I_, _elem)
#define GMOCK_INTERNAL_DETECT_REF_I_ref ,
#define GMOCK_INTERNAL_UNPACK_ref(x) x
#define GMOCK_INTERNAL_DETECT_CALLTYPE(_i, _, _elem) \
GMOCK_PP_CAT(GMOCK_INTERNAL_DETECT_CALLTYPE_I_, _elem)
#define GMOCK_INTERNAL_DETECT_CALLTYPE_I_Calltype ,
#define GMOCK_INTERNAL_UNPACK_Calltype(...) __VA_ARGS__
#define GMOCK_INTERNAL_SIGNATURE(_Ret, _Args) \
::testing::internal::identity_t<GMOCK_PP_IF(GMOCK_PP_IS_BEGIN_PARENS(_Ret), \
GMOCK_PP_REMOVE_PARENS, \
GMOCK_PP_IDENTITY)(_Ret)>( \
GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_GET_TYPE, _, _Args))
#define GMOCK_INTERNAL_GET_TYPE(_i, _, _elem) \
GMOCK_PP_COMMA_IF(_i) \
GMOCK_PP_IF(GMOCK_PP_IS_BEGIN_PARENS(_elem), GMOCK_PP_REMOVE_PARENS, \
GMOCK_PP_IDENTITY) \
(_elem)
#define GMOCK_INTERNAL_PARAMETER(_i, _Signature, _) \
GMOCK_PP_COMMA_IF(_i) \
GMOCK_INTERNAL_ARG_O(_i, GMOCK_PP_REMOVE_PARENS(_Signature)) \
gmock_a##_i
#define GMOCK_INTERNAL_FORWARD_ARG(_i, _Signature, _) \
GMOCK_PP_COMMA_IF(_i) \
::std::forward<GMOCK_INTERNAL_ARG_O( \
_i, GMOCK_PP_REMOVE_PARENS(_Signature))>(gmock_a##_i)
#define GMOCK_INTERNAL_MATCHER_PARAMETER(_i, _Signature, _) \
GMOCK_PP_COMMA_IF(_i) \
GMOCK_INTERNAL_MATCHER_O(_i, GMOCK_PP_REMOVE_PARENS(_Signature)) \
gmock_a##_i
#define GMOCK_INTERNAL_MATCHER_ARGUMENT(_i, _1, _2) \
GMOCK_PP_COMMA_IF(_i) \
gmock_a##_i
#define GMOCK_INTERNAL_A_MATCHER_ARGUMENT(_i, _Signature, _) \
GMOCK_PP_COMMA_IF(_i) \
::testing::A<GMOCK_INTERNAL_ARG_O(_i, GMOCK_PP_REMOVE_PARENS(_Signature))>()
#define GMOCK_INTERNAL_ARG_O(_i, ...) \
typename ::testing::internal::Function<__VA_ARGS__>::template Arg<_i>::type
#define GMOCK_INTERNAL_MATCHER_O(_i, ...) \
const ::testing::Matcher<typename ::testing::internal::Function< \
__VA_ARGS__>::template Arg<_i>::type>&
#define MOCK_METHOD0(m, ...) GMOCK_INTERNAL_MOCK_METHODN(, , m, 0, __VA_ARGS__)
#define MOCK_METHOD1(m, ...) GMOCK_INTERNAL_MOCK_METHODN(, , m, 1, __VA_ARGS__)
#define MOCK_METHOD2(m, ...) GMOCK_INTERNAL_MOCK_METHODN(, , m, 2, __VA_ARGS__)
#define MOCK_METHOD3(m, ...) GMOCK_INTERNAL_MOCK_METHODN(, , m, 3, __VA_ARGS__)
#define MOCK_METHOD4(m, ...) GMOCK_INTERNAL_MOCK_METHODN(, , m, 4, __VA_ARGS__)
#define MOCK_METHOD5(m, ...) GMOCK_INTERNAL_MOCK_METHODN(, , m, 5, __VA_ARGS__)
#define MOCK_METHOD6(m, ...) GMOCK_INTERNAL_MOCK_METHODN(, , m, 6, __VA_ARGS__)
#define MOCK_METHOD7(m, ...) GMOCK_INTERNAL_MOCK_METHODN(, , m, 7, __VA_ARGS__)
#define MOCK_METHOD8(m, ...) GMOCK_INTERNAL_MOCK_METHODN(, , m, 8, __VA_ARGS__)
#define MOCK_METHOD9(m, ...) GMOCK_INTERNAL_MOCK_METHODN(, , m, 9, __VA_ARGS__)
#define MOCK_METHOD10(m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(, , m, 10, __VA_ARGS__)
#define MOCK_CONST_METHOD0(m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(const, , m, 0, __VA_ARGS__)
#define MOCK_CONST_METHOD1(m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(const, , m, 1, __VA_ARGS__)
#define MOCK_CONST_METHOD2(m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(const, , m, 2, __VA_ARGS__)
#define MOCK_CONST_METHOD3(m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(const, , m, 3, __VA_ARGS__)
#define MOCK_CONST_METHOD4(m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(const, , m, 4, __VA_ARGS__)
#define MOCK_CONST_METHOD5(m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(const, , m, 5, __VA_ARGS__)
#define MOCK_CONST_METHOD6(m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(const, , m, 6, __VA_ARGS__)
#define MOCK_CONST_METHOD7(m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(const, , m, 7, __VA_ARGS__)
#define MOCK_CONST_METHOD8(m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(const, , m, 8, __VA_ARGS__)
#define MOCK_CONST_METHOD9(m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(const, , m, 9, __VA_ARGS__)
#define MOCK_CONST_METHOD10(m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(const, , m, 10, __VA_ARGS__)
#define MOCK_METHOD0_T(m, ...) MOCK_METHOD0(m, __VA_ARGS__)
#define MOCK_METHOD1_T(m, ...) MOCK_METHOD1(m, __VA_ARGS__)
#define MOCK_METHOD2_T(m, ...) MOCK_METHOD2(m, __VA_ARGS__)
#define MOCK_METHOD3_T(m, ...) MOCK_METHOD3(m, __VA_ARGS__)
#define MOCK_METHOD4_T(m, ...) MOCK_METHOD4(m, __VA_ARGS__)
#define MOCK_METHOD5_T(m, ...) MOCK_METHOD5(m, __VA_ARGS__)
#define MOCK_METHOD6_T(m, ...) MOCK_METHOD6(m, __VA_ARGS__)
#define MOCK_METHOD7_T(m, ...) MOCK_METHOD7(m, __VA_ARGS__)
#define MOCK_METHOD8_T(m, ...) MOCK_METHOD8(m, __VA_ARGS__)
#define MOCK_METHOD9_T(m, ...) MOCK_METHOD9(m, __VA_ARGS__)
#define MOCK_METHOD10_T(m, ...) MOCK_METHOD10(m, __VA_ARGS__)
#define MOCK_CONST_METHOD0_T(m, ...) MOCK_CONST_METHOD0(m, __VA_ARGS__)
#define MOCK_CONST_METHOD1_T(m, ...) MOCK_CONST_METHOD1(m, __VA_ARGS__)
#define MOCK_CONST_METHOD2_T(m, ...) MOCK_CONST_METHOD2(m, __VA_ARGS__)
#define MOCK_CONST_METHOD3_T(m, ...) MOCK_CONST_METHOD3(m, __VA_ARGS__)
#define MOCK_CONST_METHOD4_T(m, ...) MOCK_CONST_METHOD4(m, __VA_ARGS__)
#define MOCK_CONST_METHOD5_T(m, ...) MOCK_CONST_METHOD5(m, __VA_ARGS__)
#define MOCK_CONST_METHOD6_T(m, ...) MOCK_CONST_METHOD6(m, __VA_ARGS__)
#define MOCK_CONST_METHOD7_T(m, ...) MOCK_CONST_METHOD7(m, __VA_ARGS__)
#define MOCK_CONST_METHOD8_T(m, ...) MOCK_CONST_METHOD8(m, __VA_ARGS__)
#define MOCK_CONST_METHOD9_T(m, ...) MOCK_CONST_METHOD9(m, __VA_ARGS__)
#define MOCK_CONST_METHOD10_T(m, ...) MOCK_CONST_METHOD10(m, __VA_ARGS__)
#define MOCK_METHOD0_WITH_CALLTYPE(ct, m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(, ct, m, 0, __VA_ARGS__)
#define MOCK_METHOD1_WITH_CALLTYPE(ct, m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(, ct, m, 1, __VA_ARGS__)
#define MOCK_METHOD2_WITH_CALLTYPE(ct, m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(, ct, m, 2, __VA_ARGS__)
#define MOCK_METHOD3_WITH_CALLTYPE(ct, m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(, ct, m, 3, __VA_ARGS__)
#define MOCK_METHOD4_WITH_CALLTYPE(ct, m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(, ct, m, 4, __VA_ARGS__)
#define MOCK_METHOD5_WITH_CALLTYPE(ct, m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(, ct, m, 5, __VA_ARGS__)
#define MOCK_METHOD6_WITH_CALLTYPE(ct, m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(, ct, m, 6, __VA_ARGS__)
#define MOCK_METHOD7_WITH_CALLTYPE(ct, m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(, ct, m, 7, __VA_ARGS__)
#define MOCK_METHOD8_WITH_CALLTYPE(ct, m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(, ct, m, 8, __VA_ARGS__)
#define MOCK_METHOD9_WITH_CALLTYPE(ct, m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(, ct, m, 9, __VA_ARGS__)
#define MOCK_METHOD10_WITH_CALLTYPE(ct, m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(, ct, m, 10, __VA_ARGS__)
#define MOCK_CONST_METHOD0_WITH_CALLTYPE(ct, m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(const, ct, m, 0, __VA_ARGS__)
#define MOCK_CONST_METHOD1_WITH_CALLTYPE(ct, m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(const, ct, m, 1, __VA_ARGS__)
#define MOCK_CONST_METHOD2_WITH_CALLTYPE(ct, m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(const, ct, m, 2, __VA_ARGS__)
#define MOCK_CONST_METHOD3_WITH_CALLTYPE(ct, m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(const, ct, m, 3, __VA_ARGS__)
#define MOCK_CONST_METHOD4_WITH_CALLTYPE(ct, m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(const, ct, m, 4, __VA_ARGS__)
#define MOCK_CONST_METHOD5_WITH_CALLTYPE(ct, m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(const, ct, m, 5, __VA_ARGS__)
#define MOCK_CONST_METHOD6_WITH_CALLTYPE(ct, m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(const, ct, m, 6, __VA_ARGS__)
#define MOCK_CONST_METHOD7_WITH_CALLTYPE(ct, m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(const, ct, m, 7, __VA_ARGS__)
#define MOCK_CONST_METHOD8_WITH_CALLTYPE(ct, m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(const, ct, m, 8, __VA_ARGS__)
#define MOCK_CONST_METHOD9_WITH_CALLTYPE(ct, m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(const, ct, m, 9, __VA_ARGS__)
#define MOCK_CONST_METHOD10_WITH_CALLTYPE(ct, m, ...) \
GMOCK_INTERNAL_MOCK_METHODN(const, ct, m, 10, __VA_ARGS__)
#define MOCK_METHOD0_T_WITH_CALLTYPE(ct, m, ...) \
MOCK_METHOD0_WITH_CALLTYPE(ct, m, __VA_ARGS__)
#define MOCK_METHOD1_T_WITH_CALLTYPE(ct, m, ...) \
MOCK_METHOD1_WITH_CALLTYPE(ct, m, __VA_ARGS__)
#define MOCK_METHOD2_T_WITH_CALLTYPE(ct, m, ...) \
MOCK_METHOD2_WITH_CALLTYPE(ct, m, __VA_ARGS__)
#define MOCK_METHOD3_T_WITH_CALLTYPE(ct, m, ...) \
MOCK_METHOD3_WITH_CALLTYPE(ct, m, __VA_ARGS__)
#define MOCK_METHOD4_T_WITH_CALLTYPE(ct, m, ...) \
MOCK_METHOD4_WITH_CALLTYPE(ct, m, __VA_ARGS__)
#define MOCK_METHOD5_T_WITH_CALLTYPE(ct, m, ...) \
MOCK_METHOD5_WITH_CALLTYPE(ct, m, __VA_ARGS__)
#define MOCK_METHOD6_T_WITH_CALLTYPE(ct, m, ...) \
MOCK_METHOD6_WITH_CALLTYPE(ct, m, __VA_ARGS__)
#define MOCK_METHOD7_T_WITH_CALLTYPE(ct, m, ...) \
MOCK_METHOD7_WITH_CALLTYPE(ct, m, __VA_ARGS__)
#define MOCK_METHOD8_T_WITH_CALLTYPE(ct, m, ...) \
MOCK_METHOD8_WITH_CALLTYPE(ct, m, __VA_ARGS__)
#define MOCK_METHOD9_T_WITH_CALLTYPE(ct, m, ...) \
MOCK_METHOD9_WITH_CALLTYPE(ct, m, __VA_ARGS__)
#define MOCK_METHOD10_T_WITH_CALLTYPE(ct, m, ...) \
MOCK_METHOD10_WITH_CALLTYPE(ct, m, __VA_ARGS__)
#define MOCK_CONST_METHOD0_T_WITH_CALLTYPE(ct, m, ...) \
MOCK_CONST_METHOD0_WITH_CALLTYPE(ct, m, __VA_ARGS__)
#define MOCK_CONST_METHOD1_T_WITH_CALLTYPE(ct, m, ...) \
MOCK_CONST_METHOD1_WITH_CALLTYPE(ct, m, __VA_ARGS__)
#define MOCK_CONST_METHOD2_T_WITH_CALLTYPE(ct, m, ...) \
MOCK_CONST_METHOD2_WITH_CALLTYPE(ct, m, __VA_ARGS__)
#define MOCK_CONST_METHOD3_T_WITH_CALLTYPE(ct, m, ...) \
MOCK_CONST_METHOD3_WITH_CALLTYPE(ct, m, __VA_ARGS__)
#define MOCK_CONST_METHOD4_T_WITH_CALLTYPE(ct, m, ...) \
MOCK_CONST_METHOD4_WITH_CALLTYPE(ct, m, __VA_ARGS__)
#define MOCK_CONST_METHOD5_T_WITH_CALLTYPE(ct, m, ...) \
MOCK_CONST_METHOD5_WITH_CALLTYPE(ct, m, __VA_ARGS__)
#define MOCK_CONST_METHOD6_T_WITH_CALLTYPE(ct, m, ...) \
MOCK_CONST_METHOD6_WITH_CALLTYPE(ct, m, __VA_ARGS__)
#define MOCK_CONST_METHOD7_T_WITH_CALLTYPE(ct, m, ...) \
MOCK_CONST_METHOD7_WITH_CALLTYPE(ct, m, __VA_ARGS__)
#define MOCK_CONST_METHOD8_T_WITH_CALLTYPE(ct, m, ...) \
MOCK_CONST_METHOD8_WITH_CALLTYPE(ct, m, __VA_ARGS__)
#define MOCK_CONST_METHOD9_T_WITH_CALLTYPE(ct, m, ...) \
MOCK_CONST_METHOD9_WITH_CALLTYPE(ct, m, __VA_ARGS__)
#define MOCK_CONST_METHOD10_T_WITH_CALLTYPE(ct, m, ...) \
MOCK_CONST_METHOD10_WITH_CALLTYPE(ct, m, __VA_ARGS__)
#define GMOCK_INTERNAL_MOCK_METHODN(constness, ct, Method, args_num, ...) \
GMOCK_INTERNAL_ASSERT_VALID_SIGNATURE( \
args_num, ::testing::internal::identity_t<__VA_ARGS__>); \
GMOCK_INTERNAL_MOCK_METHOD_IMPL( \
args_num, Method, GMOCK_PP_NARG0(constness), 0, 0, , ct, , \
(::testing::internal::identity_t<__VA_ARGS__>))
#define GMOCK_MOCKER_(arity, constness, Method) \
GTEST_CONCAT_TOKEN_(gmock##constness##arity##_##Method##_, __LINE__)
#endif | #include "gmock/gmock-function-mocker.h"
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4503)
#ifdef GTEST_OS_WINDOWS
#include <objbase.h>
#endif
#include <functional>
#include <map>
#include <string>
#include <type_traits>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
namespace testing {
namespace gmock_function_mocker_test {
using testing::_;
using testing::A;
using testing::An;
using testing::AnyNumber;
using testing::Const;
using testing::DoDefault;
using testing::Eq;
using testing::Lt;
using testing::MockFunction;
using testing::Ref;
using testing::Return;
using testing::ReturnRef;
using testing::TypedEq;
template <typename T>
class TemplatedCopyable {
public:
TemplatedCopyable() = default;
template <typename U>
TemplatedCopyable(const U& other) {}
};
class FooInterface {
public:
virtual ~FooInterface() = default;
virtual void VoidReturning(int x) = 0;
virtual int Nullary() = 0;
virtual bool Unary(int x) = 0;
virtual long Binary(short x, int y) = 0;
virtual int Decimal(bool b, char c, short d, int e, long f,
float g, double h, unsigned i, char* j,
const std::string& k) = 0;
virtual bool TakesNonConstReference(int& n) = 0;
virtual std::string TakesConstReference(const int& n) = 0;
virtual bool TakesConst(const int x) = 0;
virtual int OverloadedOnArgumentNumber() = 0;
virtual int OverloadedOnArgumentNumber(int n) = 0;
virtual int OverloadedOnArgumentType(int n) = 0;
virtual char OverloadedOnArgumentType(char c) = 0;
virtual int OverloadedOnConstness() = 0;
virtual char OverloadedOnConstness() const = 0;
virtual int TypeWithHole(int (*func)()) = 0;
virtual int TypeWithComma(const std::map<int, std::string>& a_map) = 0;
virtual int TypeWithTemplatedCopyCtor(const TemplatedCopyable<int>&) = 0;
virtual int (*ReturnsFunctionPointer1(int))(bool) = 0;
using fn_ptr = int (*)(bool);
virtual fn_ptr ReturnsFunctionPointer2(int) = 0;
virtual int RefQualifiedConstRef() const& = 0;
virtual int RefQualifiedConstRefRef() const&& = 0;
virtual int RefQualifiedRef() & = 0;
virtual int RefQualifiedRefRef() && = 0;
virtual int RefQualifiedOverloaded() const& = 0;
virtual int RefQualifiedOverloaded() const&& = 0;
virtual int RefQualifiedOverloaded() & = 0;
virtual int RefQualifiedOverloaded() && = 0;
#ifdef GTEST_OS_WINDOWS
STDMETHOD_(int, CTNullary)() = 0;
STDMETHOD_(bool, CTUnary)(int x) = 0;
STDMETHOD_(int, CTDecimal)
(bool b, char c, short d, int e, long f,
float g, double h, unsigned i, char* j, const std::string& k) = 0;
STDMETHOD_(char, CTConst)(int x) const = 0;
#endif
};
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4373)
class MockFoo : public FooInterface {
public:
MockFoo() = default;
MOCK_METHOD(void, VoidReturning, (int n));
MOCK_METHOD(int, Nullary, ());
MOCK_METHOD(bool, Unary, (int));
MOCK_METHOD(long, Binary, (short, int));
MOCK_METHOD(int, Decimal,
(bool, char, short, int, long, float,
double, unsigned, char*, const std::string& str),
(override));
MOCK_METHOD(bool, TakesNonConstReference, (int&));
MOCK_METHOD(std::string, TakesConstReference, (const int&));
MOCK_METHOD(bool, TakesConst, (const int));
MOCK_METHOD((std::map<int, std::string>), ReturnTypeWithComma, (), ());
MOCK_METHOD((std::map<int, std::string>), ReturnTypeWithComma, (int),
(const));
MOCK_METHOD(int, OverloadedOnArgumentNumber, ());
MOCK_METHOD(int, OverloadedOnArgumentNumber, (int));
MOCK_METHOD(int, OverloadedOnArgumentType, (int));
MOCK_METHOD(char, OverloadedOnArgumentType, (char));
MOCK_METHOD(int, OverloadedOnConstness, (), (override));
MOCK_METHOD(char, OverloadedOnConstness, (), (override, const));
MOCK_METHOD(int, TypeWithHole, (int (*)()), ());
MOCK_METHOD(int, TypeWithComma, ((const std::map<int, std::string>&)));
MOCK_METHOD(int, TypeWithTemplatedCopyCtor,
(const TemplatedCopyable<int>&));
MOCK_METHOD(int (*)(bool), ReturnsFunctionPointer1, (int), ());
MOCK_METHOD(fn_ptr, ReturnsFunctionPointer2, (int), ());
#ifdef GTEST_OS_WINDOWS
MOCK_METHOD(int, CTNullary, (), (Calltype(STDMETHODCALLTYPE)));
MOCK_METHOD(bool, CTUnary, (int), (Calltype(STDMETHODCALLTYPE)));
MOCK_METHOD(int, CTDecimal,
(bool b, char c, short d, int e, long f, float g, double h,
unsigned i, char* j, const std::string& k),
(Calltype(STDMETHODCALLTYPE)));
MOCK_METHOD(char, CTConst, (int), (const, Calltype(STDMETHODCALLTYPE)));
MOCK_METHOD((std::map<int, std::string>), CTReturnTypeWithComma, (),
(Calltype(STDMETHODCALLTYPE)));
#endif
MOCK_METHOD(int, RefQualifiedConstRef, (), (const, ref(&), override));
MOCK_METHOD(int, RefQualifiedConstRefRef, (), (const, ref(&&), override));
MOCK_METHOD(int, RefQualifiedRef, (), (ref(&), override));
MOCK_METHOD(int, RefQualifiedRefRef, (), (ref(&&), override));
MOCK_METHOD(int, RefQualifiedOverloaded, (), (const, ref(&), override));
MOCK_METHOD(int, RefQualifiedOverloaded, (), (const, ref(&&), override));
MOCK_METHOD(int, RefQualifiedOverloaded, (), (ref(&), override));
MOCK_METHOD(int, RefQualifiedOverloaded, (), (ref(&&), override));
private:
MockFoo(const MockFoo&) = delete;
MockFoo& operator=(const MockFoo&) = delete;
};
class LegacyMockFoo : public FooInterface {
public:
LegacyMockFoo() = default;
MOCK_METHOD1(VoidReturning, void(int n));
MOCK_METHOD0(Nullary, int());
MOCK_METHOD1(Unary, bool(int));
MOCK_METHOD2(Binary, long(short, int));
MOCK_METHOD10(Decimal, int(bool, char, short, int, long, float,
double, unsigned, char*, const std::string& str));
MOCK_METHOD1(TakesNonConstReference, bool(int&));
MOCK_METHOD1(TakesConstReference, std::string(const int&));
MOCK_METHOD1(TakesConst, bool(const int));
MOCK_METHOD0(ReturnTypeWithComma, std::map<int, std::string>());
MOCK_CONST_METHOD1(ReturnTypeWithComma,
std::map<int, std::string>(int));
MOCK_METHOD0(OverloadedOnArgumentNumber, int());
MOCK_METHOD1(OverloadedOnArgumentNumber, int(int));
MOCK_METHOD1(OverloadedOnArgumentType, int(int));
MOCK_METHOD1(OverloadedOnArgumentType, char(char));
MOCK_METHOD0(OverloadedOnConstness, int());
MOCK_CONST_METHOD0(OverloadedOnConstness, char());
MOCK_METHOD1(TypeWithHole, int(int (*)()));
MOCK_METHOD1(TypeWithComma,
int(const std::map<int, std::string>&));
MOCK_METHOD1(TypeWithTemplatedCopyCtor,
int(const TemplatedCopyable<int>&));
MOCK_METHOD1(ReturnsFunctionPointer1, int (*(int))(bool));
MOCK_METHOD1(ReturnsFunctionPointer2, fn_ptr(int));
#ifdef GTEST_OS_WINDOWS
MOCK_METHOD0_WITH_CALLTYPE(STDMETHODCALLTYPE, CTNullary, int());
MOCK_METHOD1_WITH_CALLTYPE(STDMETHODCALLTYPE, CTUnary, bool(int));
MOCK_METHOD10_WITH_CALLTYPE(STDMETHODCALLTYPE, CTDecimal,
int(bool b, char c, short d, int e,
long f, float g, double h,
unsigned i, char* j, const std::string& k));
MOCK_CONST_METHOD1_WITH_CALLTYPE(STDMETHODCALLTYPE, CTConst,
char(int));
MOCK_METHOD0_WITH_CALLTYPE(STDMETHODCALLTYPE, CTReturnTypeWithComma,
std::map<int, std::string>());
#endif
int RefQualifiedConstRef() const& override { return 0; }
int RefQualifiedConstRefRef() const&& override { return 0; }
int RefQualifiedRef() & override { return 0; }
int RefQualifiedRefRef() && override { return 0; }
int RefQualifiedOverloaded() const& override { return 0; }
int RefQualifiedOverloaded() const&& override { return 0; }
int RefQualifiedOverloaded() & override { return 0; }
int RefQualifiedOverloaded() && override { return 0; }
private:
LegacyMockFoo(const LegacyMockFoo&) = delete;
LegacyMockFoo& operator=(const LegacyMockFoo&) = delete;
};
GTEST_DISABLE_MSC_WARNINGS_POP_()
template <class T>
class FunctionMockerTest : public testing::Test {
protected:
FunctionMockerTest() : foo_(&mock_foo_) {}
FooInterface* const foo_;
T mock_foo_;
};
using FunctionMockerTestTypes = ::testing::Types<MockFoo, LegacyMockFoo>;
TYPED_TEST_SUITE(FunctionMockerTest, FunctionMockerTestTypes);
TYPED_TEST(FunctionMockerTest, MocksVoidFunction) {
EXPECT_CALL(this->mock_foo_, VoidReturning(Lt(100)));
this->foo_->VoidReturning(0);
}
TYPED_TEST(FunctionMockerTest, MocksNullaryFunction) {
EXPECT_CALL(this->mock_foo_, Nullary())
.WillOnce(DoDefault())
.WillOnce(Return(1));
EXPECT_EQ(0, this->foo_->Nullary());
EXPECT_EQ(1, this->foo_->Nullary());
}
TYPED_TEST(FunctionMockerTest, MocksUnaryFunction) {
EXPECT_CALL(this->mock_foo_, Unary(Eq(2))).Times(2).WillOnce(Return(true));
EXPECT_TRUE(this->foo_->Unary(2));
EXPECT_FALSE(this->foo_->Unary(2));
}
TYPED_TEST(FunctionMockerTest, MocksBinaryFunction) {
EXPECT_CALL(this->mock_foo_, Binary(2, _)).WillOnce(Return(3));
EXPECT_EQ(3, this->foo_->Binary(2, 1));
}
TYPED_TEST(FunctionMockerTest, MocksDecimalFunction) {
EXPECT_CALL(this->mock_foo_,
Decimal(true, 'a', 0, 0, 1L, A<float>(), Lt(100), 5U, NULL, "hi"))
.WillOnce(Return(5));
EXPECT_EQ(5, this->foo_->Decimal(true, 'a', 0, 0, 1, 0, 0, 5, nullptr, "hi"));
}
TYPED_TEST(FunctionMockerTest, MocksFunctionWithNonConstReferenceArgument) {
int a = 0;
EXPECT_CALL(this->mock_foo_, TakesNonConstReference(Ref(a)))
.WillOnce(Return(true));
EXPECT_TRUE(this->foo_->TakesNonConstReference(a));
}
TYPED_TEST(FunctionMockerTest, MocksFunctionWithConstReferenceArgument) {
int a = 0;
EXPECT_CALL(this->mock_foo_, TakesConstReference(Ref(a)))
.WillOnce(Return("Hello"));
EXPECT_EQ("Hello", this->foo_->TakesConstReference(a));
}
TYPED_TEST(FunctionMockerTest, MocksFunctionWithConstArgument) {
EXPECT_CALL(this->mock_foo_, TakesConst(Lt(10))).WillOnce(DoDefault());
EXPECT_FALSE(this->foo_->TakesConst(5));
}
TYPED_TEST(FunctionMockerTest, MocksFunctionsOverloadedOnArgumentNumber) {
EXPECT_CALL(this->mock_foo_, OverloadedOnArgumentNumber())
.WillOnce(Return(1));
EXPECT_CALL(this->mock_foo_, OverloadedOnArgumentNumber(_))
.WillOnce(Return(2));
EXPECT_EQ(2, this->foo_->OverloadedOnArgumentNumber(1));
EXPECT_EQ(1, this->foo_->OverloadedOnArgumentNumber());
}
TYPED_TEST(FunctionMockerTest, MocksFunctionsOverloadedOnArgumentType) {
EXPECT_CALL(this->mock_foo_, OverloadedOnArgumentType(An<int>()))
.WillOnce(Return(1));
EXPECT_CALL(this->mock_foo_, OverloadedOnArgumentType(TypedEq<char>('a')))
.WillOnce(Return('b'));
EXPECT_EQ(1, this->foo_->OverloadedOnArgumentType(0));
EXPECT_EQ('b', this->foo_->OverloadedOnArgumentType('a'));
}
TYPED_TEST(FunctionMockerTest, MocksFunctionsOverloadedOnConstnessOfThis) {
EXPECT_CALL(this->mock_foo_, OverloadedOnConstness());
EXPECT_CALL(Const(this->mock_foo_), OverloadedOnConstness())
.WillOnce(Return('a'));
EXPECT_EQ(0, this->foo_->OverloadedOnConstness());
EXPECT_EQ('a', Const(*this->foo_).OverloadedOnConstness());
}
TYPED_TEST(FunctionMockerTest, MocksReturnTypeWithComma) {
const std::map<int, std::string> a_map;
EXPECT_CALL(this->mock_foo_, ReturnTypeWithComma()).WillOnce(Return(a_map));
EXPECT_CALL(this->mock_foo_, ReturnTypeWithComma(42)).WillOnce(Return(a_map));
EXPECT_EQ(a_map, this->mock_foo_.ReturnTypeWithComma());
EXPECT_EQ(a_map, this->mock_foo_.ReturnTypeWithComma(42));
}
TYPED_TEST(FunctionMockerTest, MocksTypeWithTemplatedCopyCtor) {
EXPECT_CALL(this->mock_foo_, TypeWithTemplatedCopyCtor(_))
.WillOnce(Return(true));
EXPECT_TRUE(this->foo_->TypeWithTemplatedCopyCtor(TemplatedCopyable<int>()));
}
#ifdef GTEST_OS_WINDOWS
TYPED_TEST(FunctionMockerTest, MocksNullaryFunctionWithCallType) {
EXPECT_CALL(this->mock_foo_, CTNullary())
.WillOnce(Return(-1))
.WillOnce(Return(0));
EXPECT_EQ(-1, this->foo_->CTNullary());
EXPECT_EQ(0, this->foo_->CTNullary());
}
TYPED_TEST(FunctionMockerTest, MocksUnaryFunctionWithCallType) {
EXPECT_CALL(this->mock_foo_, CTUnary(Eq(2)))
.Times(2)
.WillOnce(Return(true))
.WillOnce(Return(false));
EXPECT_TRUE(this->foo_->CTUnary(2));
EXPECT_FALSE(this->foo_->CTUnary(2));
}
TYPED_TEST(FunctionMockerTest, MocksDecimalFunctionWithCallType) {
EXPECT_CALL(this->mock_foo_, CTDecimal(true, 'a', 0, 0, 1L, A<float>(),
Lt(100), 5U, NULL, "hi"))
.WillOnce(Return(10));
EXPECT_EQ(10, this->foo_->CTDecimal(true, 'a', 0, 0, 1, 0, 0, 5, NULL, "hi"));
}
TYPED_TEST(FunctionMockerTest, MocksFunctionsConstFunctionWithCallType) {
EXPECT_CALL(Const(this->mock_foo_), CTConst(_)).WillOnce(Return('a'));
EXPECT_EQ('a', Const(*this->foo_).CTConst(0));
}
TYPED_TEST(FunctionMockerTest, MocksReturnTypeWithCommaAndCallType) {
const std::map<int, std::string> a_map;
EXPECT_CALL(this->mock_foo_, CTReturnTypeWithComma()).WillOnce(Return(a_map));
EXPECT_EQ(a_map, this->mock_foo_.CTReturnTypeWithComma());
}
#endif
TEST(FunctionMockerTest, RefQualified) {
MockFoo mock_foo;
EXPECT_CALL(mock_foo, RefQualifiedConstRef).WillOnce(Return(1));
EXPECT_CALL(std::move(mock_foo),
RefQualifiedConstRefRef)
.WillOnce(Return(2));
EXPECT_CALL(mock_foo, RefQualifiedRef).WillOnce(Return(3));
EXPECT_CALL(std::move(mock_foo),
RefQualifiedRefRef)
.WillOnce(Return(4));
EXPECT_CALL(static_cast<const MockFoo&>(mock_foo), RefQualifiedOverloaded())
.WillOnce(Return(5));
EXPECT_CALL(static_cast<const MockFoo&&>(mock_foo), RefQualifiedOverloaded())
.WillOnce(Return(6));
EXPECT_CALL(static_cast<MockFoo&>(mock_foo), RefQualifiedOverloaded())
.WillOnce(Return(7));
EXPECT_CALL(static_cast<MockFoo&&>(mock_foo), RefQualifiedOverloaded())
.WillOnce(Return(8));
EXPECT_EQ(mock_foo.RefQualifiedConstRef(), 1);
EXPECT_EQ(std::move(mock_foo).RefQualifiedConstRefRef(), 2);
EXPECT_EQ(mock_foo.RefQualifiedRef(), 3);
EXPECT_EQ(std::move(mock_foo).RefQualifiedRefRef(), 4);
EXPECT_EQ(std::cref(mock_foo).get().RefQualifiedOverloaded(), 5);
EXPECT_EQ(std::move(std::cref(mock_foo).get())
.RefQualifiedOverloaded(),
6);
EXPECT_EQ(mock_foo.RefQualifiedOverloaded(), 7);
EXPECT_EQ(std::move(mock_foo).RefQualifiedOverloaded(), 8);
}
class MockB {
public:
MockB() = default;
MOCK_METHOD(void, DoB, ());
private:
MockB(const MockB&) = delete;
MockB& operator=(const MockB&) = delete;
};
class LegacyMockB {
public:
LegacyMockB() = default;
MOCK_METHOD0(DoB, void());
private:
LegacyMockB(const LegacyMockB&) = delete;
LegacyMockB& operator=(const LegacyMockB&) = delete;
};
template <typename T>
class ExpectCallTest : public ::testing::Test {};
using ExpectCallTestTypes = ::testing::Types<MockB, LegacyMockB>;
TYPED_TEST_SUITE(ExpectCallTest, ExpectCallTestTypes);
TYPED_TEST(ExpectCallTest, UnmentionedFunctionCanBeCalledAnyNumberOfTimes) {
{ TypeParam b; }
{
TypeParam b;
b.DoB();
}
{
TypeParam b;
b.DoB();
b.DoB();
}
}
template <typename T>
class StackInterface {
public:
virtual ~StackInterface() = default;
virtual void Push(const T& value) = 0;
virtual void Pop() = 0;
virtual int GetSize() const = 0;
virtual const T& GetTop() const = 0;
};
template <typename T>
class MockStack : public StackInterface<T> {
public:
MockStack() = default;
MOCK_METHOD(void, Push, (const T& elem), ());
MOCK_METHOD(void, Pop, (), (final));
MOCK_METHOD(int, GetSize, (), (const, override));
MOCK_METHOD(const T&, GetTop, (), (const));
MOCK_METHOD((std::map<int, int>), ReturnTypeWithComma, (), ());
MOCK_METHOD((std::map<int, int>), ReturnTypeWithComma, (int), (const));
private:
MockStack(const MockStack&) = delete;
MockStack& operator=(const MockStack&) = delete;
};
template <typename T>
class LegacyMockStack : public StackInterface<T> {
public:
LegacyMockStack() = default;
MOCK_METHOD1_T(Push, void(const T& elem));
MOCK_METHOD0_T(Pop, void());
MOCK_CONST_METHOD0_T(GetSize, int());
MOCK_CONST_METHOD0_T(GetTop, const T&());
MOCK_METHOD0_T(ReturnTypeWithComma, std::map<int, int>());
MOCK_CONST_METHOD1_T(ReturnTypeWithComma, std::map<int, int>(int));
private:
LegacyMockStack(const LegacyMockStack&) = delete;
LegacyMockStack& operator=(const LegacyMockStack&) = delete;
};
template <typename T>
class TemplateMockTest : public ::testing::Test {};
using TemplateMockTestTypes =
::testing::Types<MockStack<int>, LegacyMockStack<int>>;
TYPED_TEST_SUITE(TemplateMockTest, TemplateMockTestTypes);
TYPED_TEST(TemplateMockTest, Works) {
TypeParam mock;
EXPECT_CALL(mock, GetSize())
.WillOnce(Return(0))
.WillOnce(Return(1))
.WillOnce(Return(0));
EXPECT_CALL(mock, Push(_));
int n = 5;
EXPECT_CALL(mock, GetTop()).WillOnce(ReturnRef(n));
EXPECT_CALL(mock, Pop()).Times(AnyNumber());
EXPECT_EQ(0, mock.GetSize());
mock.Push(5);
EXPECT_EQ(1, mock.GetSize());
EXPECT_EQ(5, mock.GetTop());
mock.Pop();
EXPECT_EQ(0, mock.GetSize());
}
TYPED_TEST(TemplateMockTest, MethodWithCommaInReturnTypeWorks) {
TypeParam mock;
const std::map<int, int> a_map;
EXPECT_CALL(mock, ReturnTypeWithComma()).WillOnce(Return(a_map));
EXPECT_CALL(mock, ReturnTypeWithComma(1)).WillOnce(Return(a_map));
EXPECT_EQ(a_map, mock.ReturnTypeWithComma());
EXPECT_EQ(a_map, mock.ReturnTypeWithComma(1));
}
#ifdef GTEST_OS_WINDOWS
template <typename T>
class StackInterfaceWithCallType {
public:
virtual ~StackInterfaceWithCallType() {}
STDMETHOD_(void, Push)(const T& value) = 0;
STDMETHOD_(void, Pop)() = 0;
STDMETHOD_(int, GetSize)() const = 0;
STDMETHOD_(const T&, GetTop)() const = 0;
};
template <typename T>
class MockStackWithCallType : public StackInterfaceWithCallType<T> {
public:
MockStackWithCallType() {}
MOCK_METHOD(void, Push, (const T& elem),
(Calltype(STDMETHODCALLTYPE), override));
MOCK_METHOD(void, Pop, (), (Calltype(STDMETHODCALLTYPE), override));
MOCK_METHOD(int, GetSize, (), (Calltype(STDMETHODCALLTYPE), override, const));
MOCK_METHOD(const T&, GetTop, (),
(Calltype(STDMETHODCALLTYPE), override, const));
private:
MockStackWithCallType(const MockStackWithCallType&) = delete;
MockStackWithCallType& operator=(const MockStackWithCallType&) = delete;
};
template <typename T>
class LegacyMockStackWithCallType : public StackInterfaceWithCallType<T> {
public:
LegacyMockStackWithCallType() {}
MOCK_METHOD1_T_WITH_CALLTYPE(STDMETHODCALLTYPE, Push, void(const T& elem));
MOCK_METHOD0_T_WITH_CALLTYPE(STDMETHODCALLTYPE, Pop, void());
MOCK_CONST_METHOD0_T_WITH_CALLTYPE(STDMETHODCALLTYPE, GetSize, int());
MOCK_CONST_METHOD0_T_WITH_CALLTYPE(STDMETHODCALLTYPE, GetTop, const T&());
private:
LegacyMockStackWithCallType(const LegacyMockStackWithCallType&) = delete;
LegacyMockStackWithCallType& operator=(const LegacyMockStackWithCallType&) =
delete;
};
template <typename T>
class TemplateMockTestWithCallType : public ::testing::Test {};
using TemplateMockTestWithCallTypeTypes =
::testing::Types<MockStackWithCallType<int>,
LegacyMockStackWithCallType<int>>;
TYPED_TEST_SUITE(TemplateMockTestWithCallType,
TemplateMockTestWithCallTypeTypes);
TYPED_TEST(TemplateMockTestWithCallType, Works) {
TypeParam mock;
EXPECT_CALL(mock, GetSize())
.WillOnce(Return(0))
.WillOnce(Return(1))
.WillOnce(Return(0));
EXPECT_CALL(mock, Push(_));
int n = 5;
EXPECT_CALL(mock, GetTop()).WillOnce(ReturnRef(n));
EXPECT_CALL(mock, Pop()).Times(AnyNumber());
EXPECT_EQ(0, mock.GetSize());
mock.Push(5);
EXPECT_EQ(1, mock.GetSize());
EXPECT_EQ(5, mock.GetTop());
mock.Pop();
EXPECT_EQ(0, mock.GetSize());
}
#endif
#define MY_MOCK_METHODS1_ \
MOCK_METHOD(void, Overloaded, ()); \
MOCK_METHOD(int, Overloaded, (int), (const)); \
MOCK_METHOD(bool, Overloaded, (bool f, int n))
#define LEGACY_MY_MOCK_METHODS1_ \
MOCK_METHOD0(Overloaded, void()); \
MOCK_CONST_METHOD1(Overloaded, int(int n)); \
MOCK_METHOD2(Overloaded, bool(bool f, int n))
class MockOverloadedOnArgNumber {
public:
MockOverloadedOnArgNumber() = default;
MY_MOCK_METHODS1_;
private:
MockOverloadedOnArgNumber(const MockOverloadedOnArgNumber&) = delete;
MockOverloadedOnArgNumber& operator=(const MockOverloadedOnArgNumber&) =
delete;
};
class LegacyMockOverloadedOnArgNumber {
public:
LegacyMockOverloadedOnArgNumber() = default;
LEGACY_MY_MOCK_METHODS1_;
private:
LegacyMockOverloadedOnArgNumber(const LegacyMockOverloadedOnArgNumber&) =
delete;
LegacyMockOverloadedOnArgNumber& operator=(
const LegacyMockOverloadedOnArgNumber&) = delete;
};
template <typename T>
class OverloadedMockMethodTest : public ::testing::Test {};
using OverloadedMockMethodTestTypes =
::testing::Types<MockOverloadedOnArgNumber,
LegacyMockOverloadedOnArgNumber>;
TYPED_TEST_SUITE(OverloadedMockMethodTest, OverloadedMockMethodTestTypes);
TYPED_TEST(OverloadedMockMethodTest, CanOverloadOnArgNumberInMacroBody) {
TypeParam mock;
EXPECT_CALL(mock, Overloaded());
EXPECT_CALL(mock, Overloaded(1)).WillOnce(Return(2));
EXPECT_CALL(mock, Overloaded(true, 1)).WillOnce(Return(true));
mock.Overloaded();
EXPECT_EQ(2, mock.Overloaded(1));
EXPECT_TRUE(mock.Overloaded(true, 1));
}
#define MY_MOCK_METHODS2_ \
MOCK_CONST_METHOD1(Overloaded, int(int n)); \
MOCK_METHOD1(Overloaded, int(int n))
class MockOverloadedOnConstness {
public:
MockOverloadedOnConstness() = default;
MY_MOCK_METHODS2_;
private:
MockOverloadedOnConstness(const MockOverloadedOnConstness&) = delete;
MockOverloadedOnConstness& operator=(const MockOverloadedOnConstness&) =
delete;
};
TEST(MockMethodOverloadedMockMethodTest, CanOverloadOnConstnessInMacroBody) {
MockOverloadedOnConstness mock;
const MockOverloadedOnConstness* const_mock = &mock;
EXPECT_CALL(mock, Overloaded(1)).WillOnce(Return(2));
EXPECT_CALL(*const_mock, Overloaded(1)).WillOnce(Return(3));
EXPECT_EQ(2, mock.Overloaded(1));
EXPECT_EQ(3, const_mock->Overloaded(1));
}
TEST(MockMethodMockFunctionTest, WorksForVoidNullary) {
MockFunction<void()> foo;
EXPECT_CALL(foo, Call());
foo.Call();
}
TEST(MockMethodMockFunctionTest, WorksForNonVoidNullary) {
MockFunction<int()> foo;
EXPECT_CALL(foo, Call()).WillOnce(Return(1)).WillOnce(Return(2));
EXPECT_EQ(1, foo.Call());
EXPECT_EQ(2, foo.Call());
}
TEST(MockMethodMockFunctionTest, WorksForVoidUnary) {
MockFunction<void(int)> foo;
EXPECT_CALL(foo, Call(1));
foo.Call(1);
}
TEST(MockMethodMockFunctionTest, WorksForNonVoidBinary) {
MockFunction<int(bool, int)> foo;
EXPECT_CALL(foo, Call(false, 42)).WillOnce(Return(1)).WillOnce(Return(2));
EXPECT_CALL(foo, Call(true, Ge(100))).WillOnce(Return(3));
EXPECT_EQ(1, foo.Call(false, 42));
EXPECT_EQ(2, foo.Call(false, 42));
EXPECT_EQ(3, foo.Call(true, 120));
}
TEST(MockMethodMockFunctionTest, WorksFor10Arguments) {
MockFunction<int(bool a0, char a1, int a2, int a3, int a4, int a5, int a6,
char a7, int a8, bool a9)>
foo;
EXPECT_CALL(foo, Call(_, 'a', _, _, _, _, _, _, _, _))
.WillOnce(Return(1))
.WillOnce(Return(2));
EXPECT_EQ(1, foo.Call(false, 'a', 0, 0, 0, 0, 0, 'b', 0, true));
EXPECT_EQ(2, foo.Call(true, 'a', 0, 0, 0, 0, 0, 'b', 1, false));
}
TEST(MockMethodMockFunctionTest, AsStdFunction) {
MockFunction<int(int)> foo;
auto call = [](const std::function<int(int)>& f, int i) { return f(i); };
EXPECT_CALL(foo, Call(1)).WillOnce(Return(-1));
EXPECT_CALL(foo, Call(2)).WillOnce(Return(-2));
EXPECT_EQ(-1, call(foo.AsStdFunction(), 1));
EXPECT_EQ(-2, call(foo.AsStdFunction(), 2));
}
TEST(MockMethodMockFunctionTest, AsStdFunctionReturnsReference) {
MockFunction<int&()> foo;
int value = 1;
EXPECT_CALL(foo, Call()).WillOnce(ReturnRef(value));
int& ref = foo.AsStdFunction()();
EXPECT_EQ(1, ref);
value = 2;
EXPECT_EQ(2, ref);
}
TEST(MockMethodMockFunctionTest, AsStdFunctionWithReferenceParameter) {
MockFunction<int(int&)> foo;
auto call = [](const std::function<int(int&)>& f, int& i) { return f(i); };
int i = 42;
EXPECT_CALL(foo, Call(i)).WillOnce(Return(-1));
EXPECT_EQ(-1, call(foo.AsStdFunction(), i));
}
namespace {
template <typename Expected, typename F>
static constexpr bool IsMockFunctionTemplateArgumentDeducedTo(
const internal::MockFunction<F>&) {
return std::is_same<F, Expected>::value;
}
}
template <typename F>
class MockMethodMockFunctionSignatureTest : public Test {};
using MockMethodMockFunctionSignatureTypes =
Types<void(), int(), void(int), int(int), int(bool, int),
int(bool, char, int, int, int, int, int, char, int, bool)>;
TYPED_TEST_SUITE(MockMethodMockFunctionSignatureTest,
MockMethodMockFunctionSignatureTypes);
TYPED_TEST(MockMethodMockFunctionSignatureTest,
IsMockFunctionTemplateArgumentDeducedForRawSignature) {
using Argument = TypeParam;
MockFunction<Argument> foo;
EXPECT_TRUE(IsMockFunctionTemplateArgumentDeducedTo<TypeParam>(foo));
}
TYPED_TEST(MockMethodMockFunctionSignatureTest,
IsMockFunctionTemplateArgumentDeducedForStdFunction) {
using Argument = std::function<TypeParam>;
MockFunction<Argument> foo;
EXPECT_TRUE(IsMockFunctionTemplateArgumentDeducedTo<TypeParam>(foo));
}
TYPED_TEST(
MockMethodMockFunctionSignatureTest,
IsMockFunctionCallMethodSignatureTheSameForRawSignatureAndStdFunction) {
using ForRawSignature = decltype(&MockFunction<TypeParam>::Call);
using ForStdFunction =
decltype(&MockFunction<std::function<TypeParam>>::Call);
EXPECT_TRUE((std::is_same<ForRawSignature, ForStdFunction>::value));
}
template <typename F>
struct AlternateCallable {};
TYPED_TEST(MockMethodMockFunctionSignatureTest,
IsMockFunctionTemplateArgumentDeducedForAlternateCallable) {
using Argument = AlternateCallable<TypeParam>;
MockFunction<Argument> foo;
EXPECT_TRUE(IsMockFunctionTemplateArgumentDeducedTo<TypeParam>(foo));
}
TYPED_TEST(MockMethodMockFunctionSignatureTest,
IsMockFunctionCallMethodSignatureTheSameForAlternateCallable) {
using ForRawSignature = decltype(&MockFunction<TypeParam>::Call);
using ForStdFunction =
decltype(&MockFunction<std::function<TypeParam>>::Call);
EXPECT_TRUE((std::is_same<ForRawSignature, ForStdFunction>::value));
}
struct MockMethodSizes0 {
MOCK_METHOD(void, func, ());
};
struct MockMethodSizes1 {
MOCK_METHOD(void, func, (int));
};
struct MockMethodSizes2 {
MOCK_METHOD(void, func, (int, int));
};
struct MockMethodSizes3 {
MOCK_METHOD(void, func, (int, int, int));
};
struct MockMethodSizes4 {
MOCK_METHOD(void, func, (int, int, int, int));
};
struct LegacyMockMethodSizes0 {
MOCK_METHOD0(func, void());
};
struct LegacyMockMethodSizes1 {
MOCK_METHOD1(func, void(int));
};
struct LegacyMockMethodSizes2 {
MOCK_METHOD2(func, void(int, int));
};
struct LegacyMockMethodSizes3 {
MOCK_METHOD3(func, void(int, int, int));
};
struct LegacyMockMethodSizes4 {
MOCK_METHOD4(func, void(int, int, int, int));
};
TEST(MockMethodMockFunctionTest, MockMethodSizeOverhead) {
EXPECT_EQ(sizeof(MockMethodSizes0), sizeof(MockMethodSizes1));
EXPECT_EQ(sizeof(MockMethodSizes0), sizeof(MockMethodSizes2));
EXPECT_EQ(sizeof(MockMethodSizes0), sizeof(MockMethodSizes3));
EXPECT_EQ(sizeof(MockMethodSizes0), sizeof(MockMethodSizes4));
EXPECT_EQ(sizeof(LegacyMockMethodSizes0), sizeof(LegacyMockMethodSizes1));
EXPECT_EQ(sizeof(LegacyMockMethodSizes0), sizeof(LegacyMockMethodSizes2));
EXPECT_EQ(sizeof(LegacyMockMethodSizes0), sizeof(LegacyMockMethodSizes3));
EXPECT_EQ(sizeof(LegacyMockMethodSizes0), sizeof(LegacyMockMethodSizes4));
EXPECT_EQ(sizeof(LegacyMockMethodSizes0), sizeof(MockMethodSizes0));
}
TEST(MockMethodMockFunctionTest, EnsureNoUnusedMemberFunction) {
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic error "-Wunused-member-function"
#endif
struct Foo {
MOCK_METHOD(void, foo, ());
};
EXPECT_CALL(Foo(), foo()).Times(0);
#ifdef __clang__
#pragma clang diagnostic pop
#endif
}
void hasTwoParams(int, int);
void MaybeThrows();
void DoesntThrow() noexcept;
struct MockMethodNoexceptSpecifier {
MOCK_METHOD(void, func1, (), (noexcept));
MOCK_METHOD(void, func2, (), (noexcept(true)));
MOCK_METHOD(void, func3, (), (noexcept(false)));
MOCK_METHOD(void, func4, (), (noexcept(noexcept(MaybeThrows()))));
MOCK_METHOD(void, func5, (), (noexcept(noexcept(DoesntThrow()))));
MOCK_METHOD(void, func6, (), (noexcept(noexcept(DoesntThrow())), const));
MOCK_METHOD(void, func7, (), (const, noexcept(noexcept(DoesntThrow()))));
MOCK_METHOD(void, func8, (), (noexcept(noexcept(hasTwoParams(1, 2))), const));
};
TEST(MockMethodMockFunctionTest, NoexceptSpecifierPreserved) {
EXPECT_TRUE(noexcept(std::declval<MockMethodNoexceptSpecifier>().func1()));
EXPECT_TRUE(noexcept(std::declval<MockMethodNoexceptSpecifier>().func2()));
EXPECT_FALSE(noexcept(std::declval<MockMethodNoexceptSpecifier>().func3()));
EXPECT_FALSE(noexcept(std::declval<MockMethodNoexceptSpecifier>().func4()));
EXPECT_TRUE(noexcept(std::declval<MockMethodNoexceptSpecifier>().func5()));
EXPECT_TRUE(noexcept(std::declval<MockMethodNoexceptSpecifier>().func6()));
EXPECT_TRUE(noexcept(std::declval<MockMethodNoexceptSpecifier>().func7()));
EXPECT_EQ(noexcept(std::declval<MockMethodNoexceptSpecifier>().func8()),
noexcept(hasTwoParams(1, 2)));
}
}
}
GTEST_DISABLE_MSC_WARNINGS_POP_() | https://github.com/google/googletest/blob/a1e255a582377e1006bb88a408ac3f933ba7c916/googlemock/include/gmock/gmock-function-mocker.h | https://github.com/google/googletest/blob/a1e255a582377e1006bb88a408ac3f933ba7c916/googlemock/test/gmock-function-mocker_test.cc | a1e255a582377e1006bb88a408ac3f933ba7c916 |
785c1424-b0f8-4811-be48-bce25417681f | cpp | tensorflow/tensorflow | legalize_tf_to_hlo | tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_to_hlo.cc | tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_to_hlo_test.cc | #include "tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_to_hlo.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/log/log.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Pass/Pass.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/compilation_timer.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_mlir.h"
#include "tensorflow/compiler/tf2xla/layout_util.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/compile_only_client.h"
#include "xla/shape.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
#include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
using metrics::IncrementTfMlirBridgeSecondPhaseCounter;
using metrics::MlirBridgeSecondPhaseMetric;
using tpu::MlirToHloArgs;
absl::StatusOr<XlaCompilationResult> LegalizeTfToHlo(
const tpu::MlirToHloArgs& computation,
const tpu::TPUCompileMetadataProto& metadata, bool use_tuple_args,
llvm::StringRef device_type,
XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
const std::vector<tensorflow::TensorShape>& arg_shapes,
std::vector<tpu::ShardingAndIndex>* arg_core_mapping,
std::vector<std::vector<xla::Shape>>* per_core_arg_shapes,
std::vector<std::unique_ptr<mlir::Pass>>& custom_legalization_passes,
xla::CompileOnlyClient* client, XlaCompilationResult* compilation_result) {
LOG_FIRST_N(INFO, 1) << "Compiling MLIR computation to XLA HLO using the "
"Combined MLIR Tf2Xla Bridge.";
absl::StatusOr<std::string> mlir_compilation =
internal::CompileFromMlirToXlaHlo(
false, computation, metadata, device_type,
shape_determination_fns, use_tuple_args, compilation_result,
custom_legalization_passes, arg_shapes, arg_core_mapping,
per_core_arg_shapes);
if (!mlir_compilation.ok()) {
IncrementTfMlirBridgeSecondPhaseCounter(
MlirBridgeSecondPhaseMetric::kMlirCombinedMlirFailure);
return mlir_compilation.status();
}
IncrementTfMlirBridgeSecondPhaseCounter(
MlirBridgeSecondPhaseMetric::kMlirCombinedMlirSuccess);
Status old_bridge_status = v1::CompileTensorflowGraphToHlo(
MlirToHloArgs{mlir_compilation.value()}, metadata, use_tuple_args,
shape_determination_fns, arg_shapes, arg_core_mapping,
per_core_arg_shapes, client, compilation_result);
if (!old_bridge_status.ok()) {
IncrementTfMlirBridgeSecondPhaseCounter(
MlirBridgeSecondPhaseMetric::kMlirCombinedOldFailure);
return old_bridge_status;
}
IncrementTfMlirBridgeSecondPhaseCounter(
MlirBridgeSecondPhaseMetric::kMlirCombinedOldSuccess);
return *compilation_result;
}
};
};
}; | #include "tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_to_hlo.h"
#include <cstdint>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "mlir/Pass/Pass.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/test_matchers.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/client_library.h"
#include "xla/shape.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
#include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
using ::tensorflow::monitoring::testing::CellReader;
using tpu::MlirToHloArgs;
using tpu::ShardingAndIndex;
using tpu::TPUCompileMetadataProto;
static constexpr char kMlirLegalizeCount[] =
"/tensorflow/core/tf2xla/v1/mlir_failed_xla_legalize_tf_count";
static constexpr char kMlirLegalizeErrors[] =
"/tensorflow/core/tf2xla/v1/mlir_failed_xla_legalize_tf_pass_count";
static constexpr char kBridgeStatusCounter[] =
"/tensorflow/core/tf2xla/api/v2/phase2_compilation_status";
constexpr char kMlirCombinedMlirSuccess[] = "kMlirCombinedMlirSuccess";
constexpr char kMlirCombinedOldSuccess[] = "kMlirCombinedOldSuccess";
constexpr char kMlirCombinedOldFailure[] = "kMlirCombinedOldFailure";
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main(%arg0 : tensor<1xf32>) -> tensor<1xf32> {
%0 = "tf.Acos"(%arg0) : (tensor<1xf32>) -> tensor<1xf32>
func.return %0 : tensor<1xf32>
}
})";
static constexpr char kBadMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> tensor<1xi32> {
%0 = "tf.DoesntExist"() {value = dense<1000> : tensor<1xi32>} : () -> tensor<1xi32>
func.return %0 : tensor<1xi32>
}
})";
absl::StatusOr<XlaCompiler::CompilationResult> CompileMlirModule(
const char* module_str) {
MlirToHloArgs mlir_to_hlo_args;
mlir_to_hlo_args.rollout_state =
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED;
mlir_to_hlo_args.mlir_module = module_str;
se::Platform* platform =
se::PlatformManager::PlatformWithName("Host").value();
auto client =
xla::ClientLibrary::GetOrCreateCompileOnlyClient(platform).value();
std::vector<TensorShape> arg_shapes = {{1}};
TPUCompileMetadataProto metadata_proto;
auto arg = metadata_proto.add_args();
arg->set_dtype(DataType::DT_FLOAT);
arg->set_kind(TPUCompileMetadataProto::Arg::PARAMETER);
metadata_proto.add_retvals();
bool use_tuple_args = true;
std::vector<ShardingAndIndex> arg_core_mapping;
std::vector<std::vector<xla::Shape>> per_core_arg_shapes;
std::vector<std::unique_ptr<mlir::Pass>> custom_legalization_passes;
auto compilation_result = std::make_unique<XlaCompilationResult>();
return LegalizeTfToHlo(mlir_to_hlo_args, metadata_proto, use_tuple_args,
"XLA_TPU_JIT",
{}, arg_shapes,
&arg_core_mapping, &per_core_arg_shapes,
custom_legalization_passes, client,
compilation_result.get());
}
TEST(LegalizeWithCombinedBridge, DoesNotUseMlirLowering) {
CellReader<int64_t> mlir_bridge_legalize_count(kMlirLegalizeCount);
CellReader<int64_t> counts(kBridgeStatusCounter);
auto result = CompileMlirModule(kMlirModuleStr);
ASSERT_THAT(result, IsOkOrFiltered());
EXPECT_EQ(mlir_bridge_legalize_count.Delta("tf.Acos"), 0);
EXPECT_THAT(result,
IncrementedOrFiltered(counts.Delta(kMlirCombinedMlirSuccess), 1));
EXPECT_THAT(result,
IncrementedOrFiltered(counts.Delta(kMlirCombinedOldSuccess), 1));
}
TEST(LegalizeWithCombinedBridge,
CorrectlyCountsMlirBridgePassingAndGraphBridgeFailing) {
CellReader<int64_t> legalize_failure_count(kMlirLegalizeErrors);
CellReader<int64_t> counts(kBridgeStatusCounter);
auto result = CompileMlirModule(kBadMlirModuleStr);
ASSERT_FALSE(result.ok());
EXPECT_EQ(legalize_failure_count.Read("tf.DoesntExist", "Unknown"), 0);
EXPECT_THAT(result,
IncrementedOrFiltered(counts.Delta(kMlirCombinedMlirSuccess), 1));
EXPECT_THAT(result,
IncrementedOrFiltered(counts.Delta(kMlirCombinedOldFailure), 1));
}
TEST(LegalizeWithCombinedBridge, RecordsDynamicOps) {
static constexpr char kDynamismFunctionCounterStreamzName[] =
"/tensorflow/core/tf2xla/api/v2/dynamism_function_counter";
constexpr char kNotDynamicFunctionName[] = "kNotDynamicFunction";
CellReader<int64_t> dynamic_function_op_count(
kDynamismFunctionCounterStreamzName);
auto result = CompileMlirModule(kMlirModuleStr);
ASSERT_TRUE(result.ok());
EXPECT_EQ(dynamic_function_op_count.Delta(kNotDynamicFunctionName), 1);
}
};
};
}; | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_to_hlo.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_to_hlo_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ec564571-bff9-401b-a529-0fcf0afbe055 | cpp | tensorflow/tensorflow | convolution_transposed_thin | tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_thin.cc | tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_thin_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_thin.h"
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/lite/delegates/gpu/common/task/work_group_picking.h"
namespace tflite {
namespace gpu {
ConvolutionTransposedThin::ConvolutionTransposedThin(
const OperationDef& definition, const ConvolutionTransposedAttributes& attr,
const GpuInfo& gpu_info)
: GPUOperation(definition) {
code_ = GenerateConvolutionTransposedCode(
definition_, DivideRoundUp(attr.weights.shape.i, 4), attr.weights.shape.o,
int2(attr.weights.shape.w, attr.weights.shape.h));
if (definition_.precision == CalculationsPrecision::F16 &&
gpu_info.IsAdreno() && gpu_info.adreno_info.IsAdreno3xx()) {
compiler_options_.push_back(CompilerOptions::kAdrenoFullSimd);
}
}
ConvolutionTransposedThin::ConvolutionTransposedThin(
ConvolutionTransposedThin&& operation)
: GPUOperation(std::move(operation)) {}
ConvolutionTransposedThin& ConvolutionTransposedThin::operator=(
ConvolutionTransposedThin&& operation) {
if (this != &operation) {
GPUOperation::operator=(std::move(operation));
}
return *this;
}
std::string ConvolutionTransposedThin::GenerateConvolutionTransposedCode(
const OperationDef& op_def, int src_depth, int dst_channels,
const int2& kernel_size) {
AddSrcTensor("src_tensor", op_def.src_tensors[0]);
AddDstTensor("dst_tensor", op_def.dst_tensors[0]);
const std::string channel_x = dst_channels == 1 ? "" : ".x";
const std::vector<std::string> postfix = {channel_x, ".y", ".z", ".w"};
const std::vector<std::string> channel = {".x", ".y", ".z", ".w"};
const std::string type_postfix =
dst_channels == 1 ? "" : std::to_string(dst_channels);
std::string accum_type;
switch (op_def.precision) {
case CalculationsPrecision::F32:
case CalculationsPrecision::F32_F16:
accum_type = "float" + type_postfix;
break;
case CalculationsPrecision::F16:
accum_type = "half" + type_postfix;
break;
}
std::string c;
c += "MAIN_FUNCTION($0) {\n";
if (op_def.IsBatchSupported()) {
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int X = linear_id / args.dst_tensor.Batch();\n";
c += " int B = linear_id % args.dst_tensor.Batch();\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
c += " args.src_tensor.SetBatchRef(B);\n";
} else {
c += " int X = GLOBAL_ID_0;\n";
}
c += " int Y = GLOBAL_ID_1;\n";
c += " if (X >= args.src_tensor.Width() || Y >= args.src_tensor.Height()) "
"return;\n";
c += " " + accum_type + " r[" + std::to_string(kernel_size.y) + "][" +
std::to_string(kernel_size.x) + "];\n";
c += " {\n";
c += " FLT4 src = args.src_tensor.Read(X, Y, 0);\n";
int index = 0;
for (int y = 0; y < kernel_size.y; ++y) {
for (int x = 0; x < kernel_size.x; ++x) {
std::string r_s =
" r[" + std::to_string(y) + "][" + std::to_string(x) + "]";
for (int d = 0; d < dst_channels; ++d) {
c += r_s + postfix[d] + " = dot(src, args.weights.Read(" +
std::to_string(index) + "));\n";
index++;
}
}
}
c += " }\n";
for (int i = 1; i < src_depth; ++i) {
c += " if (X > " + std::to_string(-i) +
") {
c +=
" FLT4 src = args.src_tensor.Read(X, Y, " + std::to_string(i) + ");\n";
for (int y = 0; y < kernel_size.y; ++y) {
for (int x = 0; x < kernel_size.x; ++x) {
std::string r_s =
" r[" + std::to_string(y) + "][" + std::to_string(x) + "]";
for (int d = 0; d < dst_channels; ++d) {
c += r_s + postfix[d] + " += dot(src, args.weights.Read(" +
std::to_string(index) + "));\n";
index++;
}
}
}
c += " }\n";
}
c += " X *= " + std::to_string(kernel_size.x) + ";\n";
c += " Y *= " + std::to_string(kernel_size.y) + ";\n";
for (int y = 0; y < kernel_size.y; ++y) {
for (int x = 0; x < kernel_size.x; ++x) {
const std::string x_coord = "X + " + std::to_string(x);
const std::string y_coord = "Y + " + std::to_string(y);
c += " if (" + x_coord + " < args.dst_tensor.Width() && " + y_coord +
" < args.dst_tensor.Height()) {\n";
c += " FLT4 result = args.weights.Read(" + std::to_string(index) +
");\n";
for (int d = 0; d < dst_channels; ++d) {
c += " result" + channel[d] + " += r[" + std::to_string(y) + "][" +
std::to_string(x) + "]" + postfix[d] + ";\n";
}
c += " args.dst_tensor.Write(result, " + x_coord + ", " + y_coord +
", 0);\n";
c += " }\n";
}
}
c += "}\n";
return c;
}
int3 ConvolutionTransposedThin::GetGridSize() const {
const int grid_x = src_[0]->Width() * dst_[0]->Batch();
const int grid_y = src_[0]->Height();
const int grid_z = 1;
return int3(grid_x, grid_y, grid_z);
}
bool IsConvolutionTransposedThinSupported(
const ConvolutionTransposedAttributes& attr) {
return attr.weights.shape.o <= 4 && attr.weights.shape.w == attr.stride.w &&
attr.weights.shape.h == attr.stride.h &&
attr.padding.prepended.w == 0 && attr.padding.prepended.h == 0 &&
attr.padding.appended.w == 0 && attr.padding.appended.h == 0;
}
ConvolutionTransposedThin CreateConvolutionTransposedThin(
const GpuInfo& gpu_info, const OperationDef& definition,
const ConvolutionTransposedAttributes& attr) {
ConvolutionTransposedThin result(definition, attr, gpu_info);
result.UploadData(attr.weights, attr.bias);
return result;
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_thin_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
TEST_F(OpenCLOperationTest, ConvolutionTransposedThinSimpleWeights) {
auto status = ConvolutionTransposedThinSimpleWeightsTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, ConvolutionTransposedThin) {
auto status = ConvolutionTransposedThinTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_thin.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_thin_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b2311b06-979b-4608-94cb-3cc979515934 | cpp | google/quiche | quic_arena_scoped_ptr | quiche/quic/core/quic_arena_scoped_ptr.h | quiche/quic/core/quic_arena_scoped_ptr_test.cc | #ifndef QUICHE_QUIC_CORE_QUIC_ARENA_SCOPED_PTR_H_
#define QUICHE_QUIC_CORE_QUIC_ARENA_SCOPED_PTR_H_
#include <cstdint>
#include "quiche/quic/platform/api/quic_export.h"
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
template <typename T>
class QUICHE_NO_EXPORT QuicArenaScopedPtr {
static_assert(alignof(T*) > 1,
"QuicArenaScopedPtr can only store objects that are aligned to "
"greater than 1 byte.");
public:
QuicArenaScopedPtr();
explicit QuicArenaScopedPtr(T* value);
template <typename U>
QuicArenaScopedPtr(QuicArenaScopedPtr<U>&& other);
template <typename U>
QuicArenaScopedPtr& operator=(QuicArenaScopedPtr<U>&& other);
~QuicArenaScopedPtr();
T* get() const;
T& operator*() const;
T* operator->() const;
void swap(QuicArenaScopedPtr& other);
void reset(T* value = nullptr);
bool is_from_arena();
private:
template <typename U>
friend class QuicArenaScopedPtr;
template <uint32_t ArenaSize>
friend class QuicOneBlockArena;
enum class ConstructFrom { kHeap, kArena };
QuicArenaScopedPtr(void* value, ConstructFrom from);
QuicArenaScopedPtr(const QuicArenaScopedPtr&) = delete;
QuicArenaScopedPtr& operator=(const QuicArenaScopedPtr&) = delete;
static const uintptr_t kFromArenaMask = 0x1;
void* value_;
};
template <typename T>
bool operator==(const QuicArenaScopedPtr<T>& left,
const QuicArenaScopedPtr<T>& right) {
return left.get() == right.get();
}
template <typename T>
bool operator!=(const QuicArenaScopedPtr<T>& left,
const QuicArenaScopedPtr<T>& right) {
return left.get() != right.get();
}
template <typename T>
bool operator==(std::nullptr_t, const QuicArenaScopedPtr<T>& right) {
return nullptr == right.get();
}
template <typename T>
bool operator!=(std::nullptr_t, const QuicArenaScopedPtr<T>& right) {
return nullptr != right.get();
}
template <typename T>
bool operator==(const QuicArenaScopedPtr<T>& left, std::nullptr_t) {
return left.get() == nullptr;
}
template <typename T>
bool operator!=(const QuicArenaScopedPtr<T>& left, std::nullptr_t) {
return left.get() != nullptr;
}
template <typename T>
QuicArenaScopedPtr<T>::QuicArenaScopedPtr() : value_(nullptr) {}
template <typename T>
QuicArenaScopedPtr<T>::QuicArenaScopedPtr(T* value)
: QuicArenaScopedPtr(value, ConstructFrom::kHeap) {}
template <typename T>
template <typename U>
QuicArenaScopedPtr<T>::QuicArenaScopedPtr(QuicArenaScopedPtr<U>&& other)
: value_(other.value_) {
static_assert(
std::is_base_of<T, U>::value || std::is_same<T, U>::value,
"Cannot construct QuicArenaScopedPtr; type is not derived or same.");
other.value_ = nullptr;
}
template <typename T>
template <typename U>
QuicArenaScopedPtr<T>& QuicArenaScopedPtr<T>::operator=(
QuicArenaScopedPtr<U>&& other) {
static_assert(
std::is_base_of<T, U>::value || std::is_same<T, U>::value,
"Cannot assign QuicArenaScopedPtr; type is not derived or same.");
swap(other);
return *this;
}
template <typename T>
QuicArenaScopedPtr<T>::~QuicArenaScopedPtr() {
reset();
}
template <typename T>
T* QuicArenaScopedPtr<T>::get() const {
return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(value_) &
~kFromArenaMask);
}
template <typename T>
T& QuicArenaScopedPtr<T>::operator*() const {
return *get();
}
template <typename T>
T* QuicArenaScopedPtr<T>::operator->() const {
return get();
}
template <typename T>
void QuicArenaScopedPtr<T>::swap(QuicArenaScopedPtr& other) {
using std::swap;
swap(value_, other.value_);
}
template <typename T>
bool QuicArenaScopedPtr<T>::is_from_arena() {
return (reinterpret_cast<uintptr_t>(value_) & kFromArenaMask) != 0;
}
template <typename T>
void QuicArenaScopedPtr<T>::reset(T* value) {
if (value_ != nullptr) {
if (is_from_arena()) {
get()->~T();
} else {
delete get();
}
}
QUICHE_DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(value) & kFromArenaMask);
value_ = value;
}
template <typename T>
QuicArenaScopedPtr<T>::QuicArenaScopedPtr(void* value, ConstructFrom from_arena)
: value_(value) {
QUICHE_DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(value_) & kFromArenaMask);
switch (from_arena) {
case ConstructFrom::kHeap:
break;
case ConstructFrom::kArena:
value_ = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(value_) |
QuicArenaScopedPtr<T>::kFromArenaMask);
break;
}
}
}
#endif | #include "quiche/quic/core/quic_arena_scoped_ptr.h"
#include <string>
#include <utility>
#include <vector>
#include "quiche/quic/core/quic_one_block_arena.h"
#include "quiche/quic/platform/api/quic_test.h"
namespace quic::test {
namespace {
enum class TestParam { kFromHeap, kFromArena };
struct TestObject {
explicit TestObject(uintptr_t value) : value(value) { buffer.resize(1024); }
uintptr_t value;
std::vector<char> buffer;
};
std::string PrintToString(const TestParam& p) {
switch (p) {
case TestParam::kFromHeap:
return "heap";
case TestParam::kFromArena:
return "arena";
}
QUICHE_DCHECK(false);
return "?";
}
class QuicArenaScopedPtrParamTest : public QuicTestWithParam<TestParam> {
protected:
QuicArenaScopedPtr<TestObject> CreateObject(uintptr_t value) {
QuicArenaScopedPtr<TestObject> ptr;
switch (GetParam()) {
case TestParam::kFromHeap:
ptr = QuicArenaScopedPtr<TestObject>(new TestObject(value));
QUICHE_CHECK(!ptr.is_from_arena());
break;
case TestParam::kFromArena:
ptr = arena_.New<TestObject>(value);
QUICHE_CHECK(ptr.is_from_arena());
break;
}
return ptr;
}
private:
QuicOneBlockArena<1024> arena_;
};
INSTANTIATE_TEST_SUITE_P(QuicArenaScopedPtrParamTest,
QuicArenaScopedPtrParamTest,
testing::Values(TestParam::kFromHeap,
TestParam::kFromArena),
::testing::PrintToStringParamName());
TEST_P(QuicArenaScopedPtrParamTest, NullObjects) {
QuicArenaScopedPtr<TestObject> def;
QuicArenaScopedPtr<TestObject> null(nullptr);
EXPECT_EQ(def, null);
EXPECT_EQ(def, nullptr);
EXPECT_EQ(null, nullptr);
}
TEST_P(QuicArenaScopedPtrParamTest, FromArena) {
QuicOneBlockArena<1024> arena_;
EXPECT_TRUE(arena_.New<TestObject>(0).is_from_arena());
EXPECT_FALSE(
QuicArenaScopedPtr<TestObject>(new TestObject(0)).is_from_arena());
}
TEST_P(QuicArenaScopedPtrParamTest, Assign) {
QuicArenaScopedPtr<TestObject> ptr = CreateObject(12345);
ptr = CreateObject(54321);
EXPECT_EQ(54321u, ptr->value);
}
TEST_P(QuicArenaScopedPtrParamTest, MoveConstruct) {
QuicArenaScopedPtr<TestObject> ptr1 = CreateObject(12345);
QuicArenaScopedPtr<TestObject> ptr2(std::move(ptr1));
EXPECT_EQ(nullptr, ptr1);
EXPECT_EQ(12345u, ptr2->value);
}
TEST_P(QuicArenaScopedPtrParamTest, Accessors) {
QuicArenaScopedPtr<TestObject> ptr = CreateObject(12345);
EXPECT_EQ(12345u, (*ptr).value);
EXPECT_EQ(12345u, ptr->value);
EXPECT_EQ(12345u, ptr.get()->value);
}
TEST_P(QuicArenaScopedPtrParamTest, Reset) {
QuicArenaScopedPtr<TestObject> ptr = CreateObject(12345);
ptr.reset(new TestObject(54321));
EXPECT_EQ(54321u, ptr->value);
}
TEST_P(QuicArenaScopedPtrParamTest, Swap) {
QuicArenaScopedPtr<TestObject> ptr1 = CreateObject(12345);
QuicArenaScopedPtr<TestObject> ptr2 = CreateObject(54321);
ptr1.swap(ptr2);
EXPECT_EQ(12345u, ptr2->value);
EXPECT_EQ(54321u, ptr1->value);
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_arena_scoped_ptr.h | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_arena_scoped_ptr_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
ddc5b0d0-1019-4b7a-97cc-3ae0acc9d1cb | cpp | google/quiche | qpack_decoded_headers_accumulator | quiche/quic/core/qpack/qpack_decoded_headers_accumulator.cc | quiche/quic/core/qpack/qpack_decoded_headers_accumulator_test.cc | #include "quiche/quic/core/qpack/qpack_decoded_headers_accumulator.h"
#include <utility>
#include "absl/strings/string_view.h"
#include "quiche/quic/core/qpack/qpack_decoder.h"
#include "quiche/quic/core/qpack/qpack_header_table.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_flags.h"
namespace quic {
QpackDecodedHeadersAccumulator::QpackDecodedHeadersAccumulator(
QuicStreamId id, QpackDecoder* qpack_decoder, Visitor* visitor,
size_t max_header_list_size)
: decoder_(qpack_decoder->CreateProgressiveDecoder(id, this)),
visitor_(visitor),
max_header_list_size_(max_header_list_size),
uncompressed_header_bytes_including_overhead_(0),
uncompressed_header_bytes_without_overhead_(0),
compressed_header_bytes_(0),
header_list_size_limit_exceeded_(false),
headers_decoded_(false),
error_detected_(false) {}
void QpackDecodedHeadersAccumulator::OnHeaderDecoded(absl::string_view name,
absl::string_view value) {
QUICHE_DCHECK(!error_detected_);
uncompressed_header_bytes_without_overhead_ += name.size() + value.size();
if (header_list_size_limit_exceeded_) {
return;
}
uncompressed_header_bytes_including_overhead_ +=
name.size() + value.size() + kQpackEntrySizeOverhead;
const size_t uncompressed_header_bytes =
GetQuicFlag(quic_header_size_limit_includes_overhead)
? uncompressed_header_bytes_including_overhead_
: uncompressed_header_bytes_without_overhead_;
if (uncompressed_header_bytes > max_header_list_size_) {
header_list_size_limit_exceeded_ = true;
}
quic_header_list_.OnHeader(name, value);
}
void QpackDecodedHeadersAccumulator::OnDecodingCompleted() {
QUICHE_DCHECK(!headers_decoded_);
QUICHE_DCHECK(!error_detected_);
headers_decoded_ = true;
quic_header_list_.OnHeaderBlockEnd(
uncompressed_header_bytes_without_overhead_, compressed_header_bytes_);
visitor_->OnHeadersDecoded(std::move(quic_header_list_),
header_list_size_limit_exceeded_);
}
void QpackDecodedHeadersAccumulator::OnDecodingErrorDetected(
QuicErrorCode error_code, absl::string_view error_message) {
QUICHE_DCHECK(!error_detected_);
QUICHE_DCHECK(!headers_decoded_);
error_detected_ = true;
visitor_->OnHeaderDecodingError(error_code, error_message);
}
void QpackDecodedHeadersAccumulator::Decode(absl::string_view data) {
QUICHE_DCHECK(!error_detected_);
compressed_header_bytes_ += data.size();
decoder_->Decode(data);
}
void QpackDecodedHeadersAccumulator::EndHeaderBlock() {
QUICHE_DCHECK(!error_detected_);
QUICHE_DCHECK(!headers_decoded_);
if (!decoder_) {
QUIC_BUG(b215142466_EndHeaderBlock);
return;
}
decoder_->EndHeaderBlock();
}
} | #include "quiche/quic/core/qpack/qpack_decoded_headers_accumulator.h"
#include <cstring>
#include <string>
#include "absl/strings/escaping.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/qpack/qpack_decoder.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/qpack/qpack_test_utils.h"
using ::testing::_;
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::Pair;
using ::testing::SaveArg;
using ::testing::StrictMock;
namespace quic {
namespace test {
namespace {
QuicStreamId kTestStreamId = 1;
const size_t kMaxHeaderListSize = 100;
const size_t kMaxDynamicTableCapacity = 100;
const uint64_t kMaximumBlockedStreams = 1;
const char* const kHeaderAcknowledgement = "\x81";
class MockVisitor : public QpackDecodedHeadersAccumulator::Visitor {
public:
~MockVisitor() override = default;
MOCK_METHOD(void, OnHeadersDecoded,
(QuicHeaderList headers, bool header_list_size_limit_exceeded),
(override));
MOCK_METHOD(void, OnHeaderDecodingError,
(QuicErrorCode error_code, absl::string_view error_message),
(override));
};
}
class QpackDecodedHeadersAccumulatorTest : public QuicTest {
protected:
QpackDecodedHeadersAccumulatorTest()
: qpack_decoder_(kMaxDynamicTableCapacity, kMaximumBlockedStreams,
&encoder_stream_error_delegate_),
accumulator_(kTestStreamId, &qpack_decoder_, &visitor_,
kMaxHeaderListSize) {
qpack_decoder_.set_qpack_stream_sender_delegate(
&decoder_stream_sender_delegate_);
}
NoopEncoderStreamErrorDelegate encoder_stream_error_delegate_;
StrictMock<MockQpackStreamSenderDelegate> decoder_stream_sender_delegate_;
QpackDecoder qpack_decoder_;
StrictMock<MockVisitor> visitor_;
QpackDecodedHeadersAccumulator accumulator_;
};
TEST_F(QpackDecodedHeadersAccumulatorTest, EmptyPayload) {
EXPECT_CALL(visitor_,
OnHeaderDecodingError(QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Incomplete header data prefix.")));
accumulator_.EndHeaderBlock();
}
TEST_F(QpackDecodedHeadersAccumulatorTest, TruncatedHeaderBlockPrefix) {
std::string encoded_data;
ASSERT_TRUE(absl::HexStringToBytes("00", &encoded_data));
accumulator_.Decode(encoded_data);
EXPECT_CALL(visitor_,
OnHeaderDecodingError(QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Incomplete header data prefix.")));
accumulator_.EndHeaderBlock();
}
TEST_F(QpackDecodedHeadersAccumulatorTest, EmptyHeaderList) {
std::string encoded_data;
ASSERT_TRUE(absl::HexStringToBytes("0000", &encoded_data));
accumulator_.Decode(encoded_data);
QuicHeaderList header_list;
EXPECT_CALL(visitor_, OnHeadersDecoded(_, false))
.WillOnce(SaveArg<0>(&header_list));
accumulator_.EndHeaderBlock();
EXPECT_EQ(0u, header_list.uncompressed_header_bytes());
EXPECT_EQ(encoded_data.size(), header_list.compressed_header_bytes());
EXPECT_TRUE(header_list.empty());
}
TEST_F(QpackDecodedHeadersAccumulatorTest, TruncatedPayload) {
std::string encoded_data;
ASSERT_TRUE(absl::HexStringToBytes("00002366", &encoded_data));
accumulator_.Decode(encoded_data);
EXPECT_CALL(visitor_, OnHeaderDecodingError(QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Incomplete header block.")));
accumulator_.EndHeaderBlock();
}
TEST_F(QpackDecodedHeadersAccumulatorTest, InvalidPayload) {
EXPECT_CALL(visitor_,
OnHeaderDecodingError(QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Static table entry not found.")));
std::string encoded_data;
ASSERT_TRUE(absl::HexStringToBytes("0000ff23ff24", &encoded_data));
accumulator_.Decode(encoded_data);
}
TEST_F(QpackDecodedHeadersAccumulatorTest, Success) {
std::string encoded_data;
ASSERT_TRUE(absl::HexStringToBytes("000023666f6f03626172", &encoded_data));
accumulator_.Decode(encoded_data);
QuicHeaderList header_list;
EXPECT_CALL(visitor_, OnHeadersDecoded(_, false))
.WillOnce(SaveArg<0>(&header_list));
accumulator_.EndHeaderBlock();
EXPECT_THAT(header_list, ElementsAre(Pair("foo", "bar")));
EXPECT_EQ(strlen("foo") + strlen("bar"),
header_list.uncompressed_header_bytes());
EXPECT_EQ(encoded_data.size(), header_list.compressed_header_bytes());
}
TEST_F(QpackDecodedHeadersAccumulatorTest, ExceedLimitThenSplitInstruction) {
std::string encoded_data;
ASSERT_TRUE(absl::HexStringToBytes(
"0000"
"26666f6f626172"
"7d61616161616161616161616161616161616161"
"616161616161616161616161616161616161616161616161616161616161616161616161"
"616161616161616161616161616161616161616161616161616161616161616161616161"
"61616161616161616161616161616161616161616161616161616161616161616161"
"ff",
&encoded_data));
accumulator_.Decode(encoded_data);
ASSERT_TRUE(absl::HexStringToBytes(
"0f",
&encoded_data));
accumulator_.Decode(encoded_data);
EXPECT_CALL(visitor_, OnHeadersDecoded(_, true));
accumulator_.EndHeaderBlock();
}
TEST_F(QpackDecodedHeadersAccumulatorTest, ExceedLimitBlocked) {
std::string encoded_data;
ASSERT_TRUE(absl::HexStringToBytes(
"0200"
"80"
"26666f6f626172"
"7d61616161616161616161616161616161616161"
"616161616161616161616161616161616161616161616161616161616161616161616161"
"616161616161616161616161616161616161616161616161616161616161616161616161"
"61616161616161616161616161616161616161616161616161616161616161616161",
&encoded_data));
accumulator_.Decode(encoded_data);
accumulator_.EndHeaderBlock();
qpack_decoder_.OnSetDynamicTableCapacity(kMaxDynamicTableCapacity);
EXPECT_CALL(decoder_stream_sender_delegate_,
WriteStreamData(Eq(kHeaderAcknowledgement)));
EXPECT_CALL(visitor_, OnHeadersDecoded(_, true));
qpack_decoder_.OnInsertWithoutNameReference("foo", "bar");
qpack_decoder_.FlushDecoderStream();
}
TEST_F(QpackDecodedHeadersAccumulatorTest, BlockedDecoding) {
std::string encoded_data;
ASSERT_TRUE(absl::HexStringToBytes("020080", &encoded_data));
accumulator_.Decode(encoded_data);
accumulator_.EndHeaderBlock();
qpack_decoder_.OnSetDynamicTableCapacity(kMaxDynamicTableCapacity);
EXPECT_CALL(decoder_stream_sender_delegate_,
WriteStreamData(Eq(kHeaderAcknowledgement)));
QuicHeaderList header_list;
EXPECT_CALL(visitor_, OnHeadersDecoded(_, false))
.WillOnce(SaveArg<0>(&header_list));
qpack_decoder_.OnInsertWithoutNameReference("foo", "bar");
EXPECT_THAT(header_list, ElementsAre(Pair("foo", "bar")));
EXPECT_EQ(strlen("foo") + strlen("bar"),
header_list.uncompressed_header_bytes());
EXPECT_EQ(encoded_data.size(), header_list.compressed_header_bytes());
qpack_decoder_.FlushDecoderStream();
}
TEST_F(QpackDecodedHeadersAccumulatorTest,
BlockedDecodingUnblockedBeforeEndOfHeaderBlock) {
std::string encoded_data;
ASSERT_TRUE(absl::HexStringToBytes("020080", &encoded_data));
accumulator_.Decode(encoded_data);
qpack_decoder_.OnSetDynamicTableCapacity(kMaxDynamicTableCapacity);
qpack_decoder_.OnInsertWithoutNameReference("foo", "bar");
EXPECT_CALL(decoder_stream_sender_delegate_,
WriteStreamData(Eq(kHeaderAcknowledgement)));
ASSERT_TRUE(absl::HexStringToBytes("80", &encoded_data));
accumulator_.Decode(encoded_data);
QuicHeaderList header_list;
EXPECT_CALL(visitor_, OnHeadersDecoded(_, false))
.WillOnce(SaveArg<0>(&header_list));
accumulator_.EndHeaderBlock();
EXPECT_THAT(header_list, ElementsAre(Pair("foo", "bar"), Pair("foo", "bar")));
qpack_decoder_.FlushDecoderStream();
}
TEST_F(QpackDecodedHeadersAccumulatorTest,
BlockedDecodingUnblockedAndErrorBeforeEndOfHeaderBlock) {
std::string encoded_data;
ASSERT_TRUE(absl::HexStringToBytes("0200", &encoded_data));
accumulator_.Decode(encoded_data);
ASSERT_TRUE(absl::HexStringToBytes("80", &encoded_data));
accumulator_.Decode(encoded_data);
ASSERT_TRUE(absl::HexStringToBytes("81", &encoded_data));
accumulator_.Decode(encoded_data);
qpack_decoder_.OnSetDynamicTableCapacity(kMaxDynamicTableCapacity);
EXPECT_CALL(visitor_, OnHeaderDecodingError(QUIC_QPACK_DECOMPRESSION_FAILED,
Eq("Invalid relative index.")));
qpack_decoder_.OnInsertWithoutNameReference("foo", "bar");
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/qpack/qpack_decoded_headers_accumulator.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/qpack/qpack_decoded_headers_accumulator_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
135e2997-7d12-4167-bdc6-65a292190b26 | cpp | google/arolla | properties | arolla/qtype/standard_type_properties/properties.cc | arolla/qtype/standard_type_properties/properties_test.cc | #include "arolla/qtype/standard_type_properties/properties.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "arolla/qtype/array_like/array_like_qtype.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/shape_qtype.h"
namespace arolla {
const QType* GetScalarQTypeOrNull(
const QType* qtype) {
if (qtype != nullptr) {
if (auto* value_qtype = qtype->value_qtype()) {
return value_qtype;
}
if (IsScalarQType(qtype)) {
return qtype;
}
}
return nullptr;
}
absl::StatusOr<QTypePtr> GetScalarQType(QTypePtr qtype) {
DCHECK(qtype);
if (auto* result = GetScalarQTypeOrNull(qtype)) {
return result;
}
return absl::InvalidArgumentError(absl::StrFormat(
"there is no corresponding scalar type for %s", qtype->name()));
}
const ShapeQType* GetShapeQTypeOrNull(
const QType* qtype) {
if (qtype != nullptr) {
if (qtype->value_qtype() == nullptr) {
if (IsScalarQType(qtype)) {
return static_cast<const ShapeQType*>(GetQType<ScalarShape>());
}
} else {
if (IsOptionalQType(qtype)) {
return static_cast<const ShapeQType*>(GetQType<OptionalScalarShape>());
}
if (auto* array_qtype = dynamic_cast<const ArrayLikeQType*>(qtype)) {
return array_qtype->shape_qtype();
}
}
}
return nullptr;
}
absl::StatusOr<const ShapeQType*> GetShapeQType(QTypePtr qtype) {
DCHECK(qtype);
if (auto* result = GetShapeQTypeOrNull(qtype)) {
return result;
}
return absl::InvalidArgumentError(
absl::StrFormat("no shape type for %s", qtype->name()));
}
QTypePtr DecayContainerQType(QTypePtr qtype) {
DCHECK(qtype);
auto* value_qtype = qtype->value_qtype();
if (value_qtype != nullptr) {
return value_qtype;
}
return qtype;
}
absl::StatusOr<QTypePtr> WithScalarQType(QTypePtr qtype,
QTypePtr new_scalar_qtype) {
DCHECK(qtype);
DCHECK(new_scalar_qtype);
if (!IsScalarQType(new_scalar_qtype)) {
return absl::InvalidArgumentError(absl::StrFormat(
"unable to replace scalar type in %s with a non-scalar type %s",
qtype->name(), new_scalar_qtype->name()));
}
if (auto shape_qtype = GetShapeQType(qtype); shape_qtype.ok()) {
return (**shape_qtype).WithValueQType(new_scalar_qtype);
}
return absl::InvalidArgumentError(
absl::StrFormat("unable to replace scalar type in %s", qtype->name()));
}
absl::StatusOr<QTypePtr> GetPresenceQType(QTypePtr qtype) {
DCHECK(qtype);
if (auto shape_qtype = GetShapeQType(qtype); shape_qtype.ok()) {
return (**shape_qtype).presence_qtype();
}
return absl::InvalidArgumentError(
absl::StrFormat("no type to represent presence in %s", qtype->name()));
}
bool IsOptionalLikeQType(const QType* qtype) {
return qtype != nullptr && qtype->value_qtype() != nullptr &&
(IsOptionalQType(qtype) || IsArrayLikeQType(qtype));
}
absl::StatusOr<QTypePtr> ToOptionalLikeQType(QTypePtr qtype) {
DCHECK(qtype);
if (qtype->value_qtype() == nullptr) {
if (IsScalarQType(qtype)) {
return ToOptionalQType(qtype);
}
} else if (IsOptionalLikeQType(qtype)) {
return qtype;
}
return absl::InvalidArgumentError(
absl::StrFormat("no optional-like qtype for %s", qtype->name()));
}
} | #include "arolla/qtype/standard_type_properties/properties.h"
#include <cstdint>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/array/array.h"
#include "arolla/array/qtype/types.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/shape_qtype.h"
#include "arolla/qtype/tuple_qtype.h"
#include "arolla/util/unit.h"
namespace arolla {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::testing::MatchesRegex;
TEST(TypeProperties, GetScalarQType) {
EXPECT_THAT(GetScalarQType(GetQType<int64_t>()),
IsOkAndHolds(GetQType<int64_t>()));
EXPECT_THAT(GetScalarQType(GetOptionalQType<int64_t>()),
IsOkAndHolds(GetQType<int64_t>()));
EXPECT_THAT(GetScalarQType(GetDenseArrayQType<int64_t>()),
IsOkAndHolds(GetQType<int64_t>()));
EXPECT_THAT(GetScalarQType(GetDenseArrayWeakFloatQType()),
IsOkAndHolds(GetWeakFloatQType()));
EXPECT_THAT(GetScalarQType(GetArrayWeakFloatQType()),
IsOkAndHolds(GetWeakFloatQType()));
EXPECT_THAT(
GetScalarQType(MakeTupleQType({GetQType<int64_t>()})),
StatusIs(absl::StatusCode::kInvalidArgument,
MatchesRegex("there is no corresponding scalar type for .*")));
}
TEST(TypeProperties, GetShapeQType) {
EXPECT_THAT(GetShapeQType(GetQType<int64_t>()),
IsOkAndHolds(GetQType<ScalarShape>()));
EXPECT_THAT(GetShapeQType(GetOptionalQType<int64_t>()),
IsOkAndHolds(GetQType<OptionalScalarShape>()));
EXPECT_THAT(GetShapeQType(GetDenseArrayQType<int64_t>()),
IsOkAndHolds(GetQType<DenseArrayShape>()));
EXPECT_THAT(GetShapeQType(GetDenseArrayWeakFloatQType()),
IsOkAndHolds(GetQType<DenseArrayShape>()));
EXPECT_THAT(GetShapeQType(GetArrayWeakFloatQType()),
IsOkAndHolds(GetQType<ArrayShape>()));
EXPECT_THAT(GetShapeQType(MakeTupleQType({GetQType<int64_t>()})),
StatusIs(absl::StatusCode::kInvalidArgument,
MatchesRegex("no shape type for .*")));
}
TEST(TypeProperties, WithScalarQType) {
EXPECT_THAT(WithScalarQType(GetQType<int64_t>(), GetQType<float>()),
IsOkAndHolds(GetQType<float>()));
EXPECT_THAT(WithScalarQType(GetOptionalQType<int64_t>(), GetQType<float>()),
IsOkAndHolds(GetOptionalQType<float>()));
EXPECT_THAT(WithScalarQType(GetDenseArrayQType<int64_t>(), GetQType<float>()),
IsOkAndHolds(GetDenseArrayQType<float>()));
EXPECT_THAT(
WithScalarQType(GetDenseArrayQType<int64_t>(), GetWeakFloatQType()),
IsOkAndHolds(GetDenseArrayWeakFloatQType()));
EXPECT_THAT(WithScalarQType(GetArrayQType<int64_t>(), GetWeakFloatQType()),
IsOkAndHolds(GetArrayWeakFloatQType()));
EXPECT_THAT(WithScalarQType(MakeTupleQType({GetQType<int64_t>()}),
GetOptionalQType<float>()),
StatusIs(absl::StatusCode::kInvalidArgument,
MatchesRegex("unable to replace scalar type in .* with "
"a non-scalar type .*")));
EXPECT_THAT(
WithScalarQType(MakeTupleQType({GetQType<int64_t>()}), GetQType<float>()),
StatusIs(absl::StatusCode::kInvalidArgument,
MatchesRegex("unable to replace scalar type in .*")));
}
TEST(TypeProperties, GetPresenceQType) {
EXPECT_THAT(GetPresenceQType(GetQType<int64_t>()),
IsOkAndHolds(GetQType<Unit>()));
EXPECT_THAT(GetPresenceQType(GetOptionalQType<int64_t>()),
IsOkAndHolds(GetQType<OptionalUnit>()));
EXPECT_THAT(GetPresenceQType(GetDenseArrayQType<int64_t>()),
IsOkAndHolds(GetDenseArrayQType<Unit>()));
EXPECT_THAT(GetPresenceQType(GetDenseArrayWeakFloatQType()),
IsOkAndHolds(GetDenseArrayQType<Unit>()));
EXPECT_THAT(GetPresenceQType(GetArrayWeakFloatQType()),
IsOkAndHolds(GetArrayQType<Unit>()));
EXPECT_THAT(GetPresenceQType(MakeTupleQType({GetQType<int64_t>()})),
StatusIs(absl::StatusCode::kInvalidArgument,
MatchesRegex("no type to represent presence in .*")));
}
TEST(TypeProperties, IsOptionalLikeQType) {
EXPECT_FALSE(IsOptionalLikeQType(GetQType<int64_t>()));
EXPECT_TRUE(IsOptionalLikeQType(GetOptionalQType<int64_t>()));
EXPECT_TRUE(IsOptionalLikeQType(GetDenseArrayQType<int64_t>()));
EXPECT_TRUE(IsOptionalLikeQType(GetDenseArrayWeakFloatQType()));
EXPECT_TRUE(IsOptionalLikeQType(GetArrayWeakFloatQType()));
}
TEST(TypeProperties, ToOptionalLikeQType) {
EXPECT_THAT(ToOptionalLikeQType(GetQType<int64_t>()),
IsOkAndHolds(GetOptionalQType<int64_t>()));
EXPECT_THAT(ToOptionalLikeQType(GetOptionalQType<int64_t>()),
IsOkAndHolds(GetOptionalQType<int64_t>()));
EXPECT_THAT(ToOptionalLikeQType(GetDenseArrayQType<int64_t>()),
IsOkAndHolds(GetDenseArrayQType<int64_t>()));
EXPECT_THAT(ToOptionalLikeQType(GetDenseArrayWeakFloatQType()),
IsOkAndHolds(GetDenseArrayWeakFloatQType()));
EXPECT_THAT(ToOptionalLikeQType(GetArrayWeakFloatQType()),
IsOkAndHolds(GetArrayWeakFloatQType()));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/standard_type_properties/properties.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/standard_type_properties/properties_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
75dce5ff-5b75-4027-b504-c171a5ad68f2 | cpp | abseil/abseil-cpp | randen_slow | absl/random/internal/randen_slow.cc | absl/random/internal/randen_slow_test.cc | #include "absl/random/internal/randen_slow.h"
#include <cstddef>
#include <cstdint>
#include <cstring>
#include "absl/base/attributes.h"
#include "absl/base/internal/endian.h"
#include "absl/numeric/int128.h"
#include "absl/random/internal/platform.h"
#include "absl/random/internal/randen_traits.h"
#if ABSL_HAVE_ATTRIBUTE(always_inline) || \
(defined(__GNUC__) && !defined(__clang__))
#define ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE \
__attribute__((always_inline))
#elif defined(_MSC_VER)
#define ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE __forceinline
#else
#define ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE
#endif
namespace {
constexpr uint32_t te0[256] = {
0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6, 0x0df2f2ff, 0xbd6b6bd6,
0xb16f6fde, 0x54c5c591, 0x50303060, 0x03010102, 0xa96767ce, 0x7d2b2b56,
0x19fefee7, 0x62d7d7b5, 0xe6abab4d, 0x9a7676ec, 0x45caca8f, 0x9d82821f,
0x40c9c989, 0x877d7dfa, 0x15fafaef, 0xeb5959b2, 0xc947478e, 0x0bf0f0fb,
0xecadad41, 0x67d4d4b3, 0xfda2a25f, 0xeaafaf45, 0xbf9c9c23, 0xf7a4a453,
0x967272e4, 0x5bc0c09b, 0xc2b7b775, 0x1cfdfde1, 0xae93933d, 0x6a26264c,
0x5a36366c, 0x413f3f7e, 0x02f7f7f5, 0x4fcccc83, 0x5c343468, 0xf4a5a551,
0x34e5e5d1, 0x08f1f1f9, 0x937171e2, 0x73d8d8ab, 0x53313162, 0x3f15152a,
0x0c040408, 0x52c7c795, 0x65232346, 0x5ec3c39d, 0x28181830, 0xa1969637,
0x0f05050a, 0xb59a9a2f, 0x0907070e, 0x36121224, 0x9b80801b, 0x3de2e2df,
0x26ebebcd, 0x6927274e, 0xcdb2b27f, 0x9f7575ea, 0x1b090912, 0x9e83831d,
0x742c2c58, 0x2e1a1a34, 0x2d1b1b36, 0xb26e6edc, 0xee5a5ab4, 0xfba0a05b,
0xf65252a4, 0x4d3b3b76, 0x61d6d6b7, 0xceb3b37d, 0x7b292952, 0x3ee3e3dd,
0x712f2f5e, 0x97848413, 0xf55353a6, 0x68d1d1b9, 0x00000000, 0x2cededc1,
0x60202040, 0x1ffcfce3, 0xc8b1b179, 0xed5b5bb6, 0xbe6a6ad4, 0x46cbcb8d,
0xd9bebe67, 0x4b393972, 0xde4a4a94, 0xd44c4c98, 0xe85858b0, 0x4acfcf85,
0x6bd0d0bb, 0x2aefefc5, 0xe5aaaa4f, 0x16fbfbed, 0xc5434386, 0xd74d4d9a,
0x55333366, 0x94858511, 0xcf45458a, 0x10f9f9e9, 0x06020204, 0x817f7ffe,
0xf05050a0, 0x443c3c78, 0xba9f9f25, 0xe3a8a84b, 0xf35151a2, 0xfea3a35d,
0xc0404080, 0x8a8f8f05, 0xad92923f, 0xbc9d9d21, 0x48383870, 0x04f5f5f1,
0xdfbcbc63, 0xc1b6b677, 0x75dadaaf, 0x63212142, 0x30101020, 0x1affffe5,
0x0ef3f3fd, 0x6dd2d2bf, 0x4ccdcd81, 0x140c0c18, 0x35131326, 0x2fececc3,
0xe15f5fbe, 0xa2979735, 0xcc444488, 0x3917172e, 0x57c4c493, 0xf2a7a755,
0x827e7efc, 0x473d3d7a, 0xac6464c8, 0xe75d5dba, 0x2b191932, 0x957373e6,
0xa06060c0, 0x98818119, 0xd14f4f9e, 0x7fdcdca3, 0x66222244, 0x7e2a2a54,
0xab90903b, 0x8388880b, 0xca46468c, 0x29eeeec7, 0xd3b8b86b, 0x3c141428,
0x79dedea7, 0xe25e5ebc, 0x1d0b0b16, 0x76dbdbad, 0x3be0e0db, 0x56323264,
0x4e3a3a74, 0x1e0a0a14, 0xdb494992, 0x0a06060c, 0x6c242448, 0xe45c5cb8,
0x5dc2c29f, 0x6ed3d3bd, 0xefacac43, 0xa66262c4, 0xa8919139, 0xa4959531,
0x37e4e4d3, 0x8b7979f2, 0x32e7e7d5, 0x43c8c88b, 0x5937376e, 0xb76d6dda,
0x8c8d8d01, 0x64d5d5b1, 0xd24e4e9c, 0xe0a9a949, 0xb46c6cd8, 0xfa5656ac,
0x07f4f4f3, 0x25eaeacf, 0xaf6565ca, 0x8e7a7af4, 0xe9aeae47, 0x18080810,
0xd5baba6f, 0x887878f0, 0x6f25254a, 0x722e2e5c, 0x241c1c38, 0xf1a6a657,
0xc7b4b473, 0x51c6c697, 0x23e8e8cb, 0x7cdddda1, 0x9c7474e8, 0x211f1f3e,
0xdd4b4b96, 0xdcbdbd61, 0x868b8b0d, 0x858a8a0f, 0x907070e0, 0x423e3e7c,
0xc4b5b571, 0xaa6666cc, 0xd8484890, 0x05030306, 0x01f6f6f7, 0x120e0e1c,
0xa36161c2, 0x5f35356a, 0xf95757ae, 0xd0b9b969, 0x91868617, 0x58c1c199,
0x271d1d3a, 0xb99e9e27, 0x38e1e1d9, 0x13f8f8eb, 0xb398982b, 0x33111122,
0xbb6969d2, 0x70d9d9a9, 0x898e8e07, 0xa7949433, 0xb69b9b2d, 0x221e1e3c,
0x92878715, 0x20e9e9c9, 0x49cece87, 0xff5555aa, 0x78282850, 0x7adfdfa5,
0x8f8c8c03, 0xf8a1a159, 0x80898909, 0x170d0d1a, 0xdabfbf65, 0x31e6e6d7,
0xc6424284, 0xb86868d0, 0xc3414182, 0xb0999929, 0x772d2d5a, 0x110f0f1e,
0xcbb0b07b, 0xfc5454a8, 0xd6bbbb6d, 0x3a16162c,
};
constexpr uint32_t te1[256] = {
0x6363c6a5, 0x7c7cf884, 0x7777ee99, 0x7b7bf68d, 0xf2f2ff0d, 0x6b6bd6bd,
0x6f6fdeb1, 0xc5c59154, 0x30306050, 0x01010203, 0x6767cea9, 0x2b2b567d,
0xfefee719, 0xd7d7b562, 0xabab4de6, 0x7676ec9a, 0xcaca8f45, 0x82821f9d,
0xc9c98940, 0x7d7dfa87, 0xfafaef15, 0x5959b2eb, 0x47478ec9, 0xf0f0fb0b,
0xadad41ec, 0xd4d4b367, 0xa2a25ffd, 0xafaf45ea, 0x9c9c23bf, 0xa4a453f7,
0x7272e496, 0xc0c09b5b, 0xb7b775c2, 0xfdfde11c, 0x93933dae, 0x26264c6a,
0x36366c5a, 0x3f3f7e41, 0xf7f7f502, 0xcccc834f, 0x3434685c, 0xa5a551f4,
0xe5e5d134, 0xf1f1f908, 0x7171e293, 0xd8d8ab73, 0x31316253, 0x15152a3f,
0x0404080c, 0xc7c79552, 0x23234665, 0xc3c39d5e, 0x18183028, 0x969637a1,
0x05050a0f, 0x9a9a2fb5, 0x07070e09, 0x12122436, 0x80801b9b, 0xe2e2df3d,
0xebebcd26, 0x27274e69, 0xb2b27fcd, 0x7575ea9f, 0x0909121b, 0x83831d9e,
0x2c2c5874, 0x1a1a342e, 0x1b1b362d, 0x6e6edcb2, 0x5a5ab4ee, 0xa0a05bfb,
0x5252a4f6, 0x3b3b764d, 0xd6d6b761, 0xb3b37dce, 0x2929527b, 0xe3e3dd3e,
0x2f2f5e71, 0x84841397, 0x5353a6f5, 0xd1d1b968, 0x00000000, 0xededc12c,
0x20204060, 0xfcfce31f, 0xb1b179c8, 0x5b5bb6ed, 0x6a6ad4be, 0xcbcb8d46,
0xbebe67d9, 0x3939724b, 0x4a4a94de, 0x4c4c98d4, 0x5858b0e8, 0xcfcf854a,
0xd0d0bb6b, 0xefefc52a, 0xaaaa4fe5, 0xfbfbed16, 0x434386c5, 0x4d4d9ad7,
0x33336655, 0x85851194, 0x45458acf, 0xf9f9e910, 0x02020406, 0x7f7ffe81,
0x5050a0f0, 0x3c3c7844, 0x9f9f25ba, 0xa8a84be3, 0x5151a2f3, 0xa3a35dfe,
0x404080c0, 0x8f8f058a, 0x92923fad, 0x9d9d21bc, 0x38387048, 0xf5f5f104,
0xbcbc63df, 0xb6b677c1, 0xdadaaf75, 0x21214263, 0x10102030, 0xffffe51a,
0xf3f3fd0e, 0xd2d2bf6d, 0xcdcd814c, 0x0c0c1814, 0x13132635, 0xececc32f,
0x5f5fbee1, 0x979735a2, 0x444488cc, 0x17172e39, 0xc4c49357, 0xa7a755f2,
0x7e7efc82, 0x3d3d7a47, 0x6464c8ac, 0x5d5dbae7, 0x1919322b, 0x7373e695,
0x6060c0a0, 0x81811998, 0x4f4f9ed1, 0xdcdca37f, 0x22224466, 0x2a2a547e,
0x90903bab, 0x88880b83, 0x46468cca, 0xeeeec729, 0xb8b86bd3, 0x1414283c,
0xdedea779, 0x5e5ebce2, 0x0b0b161d, 0xdbdbad76, 0xe0e0db3b, 0x32326456,
0x3a3a744e, 0x0a0a141e, 0x494992db, 0x06060c0a, 0x2424486c, 0x5c5cb8e4,
0xc2c29f5d, 0xd3d3bd6e, 0xacac43ef, 0x6262c4a6, 0x919139a8, 0x959531a4,
0xe4e4d337, 0x7979f28b, 0xe7e7d532, 0xc8c88b43, 0x37376e59, 0x6d6ddab7,
0x8d8d018c, 0xd5d5b164, 0x4e4e9cd2, 0xa9a949e0, 0x6c6cd8b4, 0x5656acfa,
0xf4f4f307, 0xeaeacf25, 0x6565caaf, 0x7a7af48e, 0xaeae47e9, 0x08081018,
0xbaba6fd5, 0x7878f088, 0x25254a6f, 0x2e2e5c72, 0x1c1c3824, 0xa6a657f1,
0xb4b473c7, 0xc6c69751, 0xe8e8cb23, 0xdddda17c, 0x7474e89c, 0x1f1f3e21,
0x4b4b96dd, 0xbdbd61dc, 0x8b8b0d86, 0x8a8a0f85, 0x7070e090, 0x3e3e7c42,
0xb5b571c4, 0x6666ccaa, 0x484890d8, 0x03030605, 0xf6f6f701, 0x0e0e1c12,
0x6161c2a3, 0x35356a5f, 0x5757aef9, 0xb9b969d0, 0x86861791, 0xc1c19958,
0x1d1d3a27, 0x9e9e27b9, 0xe1e1d938, 0xf8f8eb13, 0x98982bb3, 0x11112233,
0x6969d2bb, 0xd9d9a970, 0x8e8e0789, 0x949433a7, 0x9b9b2db6, 0x1e1e3c22,
0x87871592, 0xe9e9c920, 0xcece8749, 0x5555aaff, 0x28285078, 0xdfdfa57a,
0x8c8c038f, 0xa1a159f8, 0x89890980, 0x0d0d1a17, 0xbfbf65da, 0xe6e6d731,
0x424284c6, 0x6868d0b8, 0x414182c3, 0x999929b0, 0x2d2d5a77, 0x0f0f1e11,
0xb0b07bcb, 0x5454a8fc, 0xbbbb6dd6, 0x16162c3a,
};
constexpr uint32_t te2[256] = {
0x63c6a563, 0x7cf8847c, 0x77ee9977, 0x7bf68d7b, 0xf2ff0df2, 0x6bd6bd6b,
0x6fdeb16f, 0xc59154c5, 0x30605030, 0x01020301, 0x67cea967, 0x2b567d2b,
0xfee719fe, 0xd7b562d7, 0xab4de6ab, 0x76ec9a76, 0xca8f45ca, 0x821f9d82,
0xc98940c9, 0x7dfa877d, 0xfaef15fa, 0x59b2eb59, 0x478ec947, 0xf0fb0bf0,
0xad41ecad, 0xd4b367d4, 0xa25ffda2, 0xaf45eaaf, 0x9c23bf9c, 0xa453f7a4,
0x72e49672, 0xc09b5bc0, 0xb775c2b7, 0xfde11cfd, 0x933dae93, 0x264c6a26,
0x366c5a36, 0x3f7e413f, 0xf7f502f7, 0xcc834fcc, 0x34685c34, 0xa551f4a5,
0xe5d134e5, 0xf1f908f1, 0x71e29371, 0xd8ab73d8, 0x31625331, 0x152a3f15,
0x04080c04, 0xc79552c7, 0x23466523, 0xc39d5ec3, 0x18302818, 0x9637a196,
0x050a0f05, 0x9a2fb59a, 0x070e0907, 0x12243612, 0x801b9b80, 0xe2df3de2,
0xebcd26eb, 0x274e6927, 0xb27fcdb2, 0x75ea9f75, 0x09121b09, 0x831d9e83,
0x2c58742c, 0x1a342e1a, 0x1b362d1b, 0x6edcb26e, 0x5ab4ee5a, 0xa05bfba0,
0x52a4f652, 0x3b764d3b, 0xd6b761d6, 0xb37dceb3, 0x29527b29, 0xe3dd3ee3,
0x2f5e712f, 0x84139784, 0x53a6f553, 0xd1b968d1, 0x00000000, 0xedc12ced,
0x20406020, 0xfce31ffc, 0xb179c8b1, 0x5bb6ed5b, 0x6ad4be6a, 0xcb8d46cb,
0xbe67d9be, 0x39724b39, 0x4a94de4a, 0x4c98d44c, 0x58b0e858, 0xcf854acf,
0xd0bb6bd0, 0xefc52aef, 0xaa4fe5aa, 0xfbed16fb, 0x4386c543, 0x4d9ad74d,
0x33665533, 0x85119485, 0x458acf45, 0xf9e910f9, 0x02040602, 0x7ffe817f,
0x50a0f050, 0x3c78443c, 0x9f25ba9f, 0xa84be3a8, 0x51a2f351, 0xa35dfea3,
0x4080c040, 0x8f058a8f, 0x923fad92, 0x9d21bc9d, 0x38704838, 0xf5f104f5,
0xbc63dfbc, 0xb677c1b6, 0xdaaf75da, 0x21426321, 0x10203010, 0xffe51aff,
0xf3fd0ef3, 0xd2bf6dd2, 0xcd814ccd, 0x0c18140c, 0x13263513, 0xecc32fec,
0x5fbee15f, 0x9735a297, 0x4488cc44, 0x172e3917, 0xc49357c4, 0xa755f2a7,
0x7efc827e, 0x3d7a473d, 0x64c8ac64, 0x5dbae75d, 0x19322b19, 0x73e69573,
0x60c0a060, 0x81199881, 0x4f9ed14f, 0xdca37fdc, 0x22446622, 0x2a547e2a,
0x903bab90, 0x880b8388, 0x468cca46, 0xeec729ee, 0xb86bd3b8, 0x14283c14,
0xdea779de, 0x5ebce25e, 0x0b161d0b, 0xdbad76db, 0xe0db3be0, 0x32645632,
0x3a744e3a, 0x0a141e0a, 0x4992db49, 0x060c0a06, 0x24486c24, 0x5cb8e45c,
0xc29f5dc2, 0xd3bd6ed3, 0xac43efac, 0x62c4a662, 0x9139a891, 0x9531a495,
0xe4d337e4, 0x79f28b79, 0xe7d532e7, 0xc88b43c8, 0x376e5937, 0x6ddab76d,
0x8d018c8d, 0xd5b164d5, 0x4e9cd24e, 0xa949e0a9, 0x6cd8b46c, 0x56acfa56,
0xf4f307f4, 0xeacf25ea, 0x65caaf65, 0x7af48e7a, 0xae47e9ae, 0x08101808,
0xba6fd5ba, 0x78f08878, 0x254a6f25, 0x2e5c722e, 0x1c38241c, 0xa657f1a6,
0xb473c7b4, 0xc69751c6, 0xe8cb23e8, 0xdda17cdd, 0x74e89c74, 0x1f3e211f,
0x4b96dd4b, 0xbd61dcbd, 0x8b0d868b, 0x8a0f858a, 0x70e09070, 0x3e7c423e,
0xb571c4b5, 0x66ccaa66, 0x4890d848, 0x03060503, 0xf6f701f6, 0x0e1c120e,
0x61c2a361, 0x356a5f35, 0x57aef957, 0xb969d0b9, 0x86179186, 0xc19958c1,
0x1d3a271d, 0x9e27b99e, 0xe1d938e1, 0xf8eb13f8, 0x982bb398, 0x11223311,
0x69d2bb69, 0xd9a970d9, 0x8e07898e, 0x9433a794, 0x9b2db69b, 0x1e3c221e,
0x87159287, 0xe9c920e9, 0xce8749ce, 0x55aaff55, 0x28507828, 0xdfa57adf,
0x8c038f8c, 0xa159f8a1, 0x89098089, 0x0d1a170d, 0xbf65dabf, 0xe6d731e6,
0x4284c642, 0x68d0b868, 0x4182c341, 0x9929b099, 0x2d5a772d, 0x0f1e110f,
0xb07bcbb0, 0x54a8fc54, 0xbb6dd6bb, 0x162c3a16,
};
constexpr uint32_t te3[256] = {
0xc6a56363, 0xf8847c7c, 0xee997777, 0xf68d7b7b, 0xff0df2f2, 0xd6bd6b6b,
0xdeb16f6f, 0x9154c5c5, 0x60503030, 0x02030101, 0xcea96767, 0x567d2b2b,
0xe719fefe, 0xb562d7d7, 0x4de6abab, 0xec9a7676, 0x8f45caca, 0x1f9d8282,
0x8940c9c9, 0xfa877d7d, 0xef15fafa, 0xb2eb5959, 0x8ec94747, 0xfb0bf0f0,
0x41ecadad, 0xb367d4d4, 0x5ffda2a2, 0x45eaafaf, 0x23bf9c9c, 0x53f7a4a4,
0xe4967272, 0x9b5bc0c0, 0x75c2b7b7, 0xe11cfdfd, 0x3dae9393, 0x4c6a2626,
0x6c5a3636, 0x7e413f3f, 0xf502f7f7, 0x834fcccc, 0x685c3434, 0x51f4a5a5,
0xd134e5e5, 0xf908f1f1, 0xe2937171, 0xab73d8d8, 0x62533131, 0x2a3f1515,
0x080c0404, 0x9552c7c7, 0x46652323, 0x9d5ec3c3, 0x30281818, 0x37a19696,
0x0a0f0505, 0x2fb59a9a, 0x0e090707, 0x24361212, 0x1b9b8080, 0xdf3de2e2,
0xcd26ebeb, 0x4e692727, 0x7fcdb2b2, 0xea9f7575, 0x121b0909, 0x1d9e8383,
0x58742c2c, 0x342e1a1a, 0x362d1b1b, 0xdcb26e6e, 0xb4ee5a5a, 0x5bfba0a0,
0xa4f65252, 0x764d3b3b, 0xb761d6d6, 0x7dceb3b3, 0x527b2929, 0xdd3ee3e3,
0x5e712f2f, 0x13978484, 0xa6f55353, 0xb968d1d1, 0x00000000, 0xc12ceded,
0x40602020, 0xe31ffcfc, 0x79c8b1b1, 0xb6ed5b5b, 0xd4be6a6a, 0x8d46cbcb,
0x67d9bebe, 0x724b3939, 0x94de4a4a, 0x98d44c4c, 0xb0e85858, 0x854acfcf,
0xbb6bd0d0, 0xc52aefef, 0x4fe5aaaa, 0xed16fbfb, 0x86c54343, 0x9ad74d4d,
0x66553333, 0x11948585, 0x8acf4545, 0xe910f9f9, 0x04060202, 0xfe817f7f,
0xa0f05050, 0x78443c3c, 0x25ba9f9f, 0x4be3a8a8, 0xa2f35151, 0x5dfea3a3,
0x80c04040, 0x058a8f8f, 0x3fad9292, 0x21bc9d9d, 0x70483838, 0xf104f5f5,
0x63dfbcbc, 0x77c1b6b6, 0xaf75dada, 0x42632121, 0x20301010, 0xe51affff,
0xfd0ef3f3, 0xbf6dd2d2, 0x814ccdcd, 0x18140c0c, 0x26351313, 0xc32fecec,
0xbee15f5f, 0x35a29797, 0x88cc4444, 0x2e391717, 0x9357c4c4, 0x55f2a7a7,
0xfc827e7e, 0x7a473d3d, 0xc8ac6464, 0xbae75d5d, 0x322b1919, 0xe6957373,
0xc0a06060, 0x19988181, 0x9ed14f4f, 0xa37fdcdc, 0x44662222, 0x547e2a2a,
0x3bab9090, 0x0b838888, 0x8cca4646, 0xc729eeee, 0x6bd3b8b8, 0x283c1414,
0xa779dede, 0xbce25e5e, 0x161d0b0b, 0xad76dbdb, 0xdb3be0e0, 0x64563232,
0x744e3a3a, 0x141e0a0a, 0x92db4949, 0x0c0a0606, 0x486c2424, 0xb8e45c5c,
0x9f5dc2c2, 0xbd6ed3d3, 0x43efacac, 0xc4a66262, 0x39a89191, 0x31a49595,
0xd337e4e4, 0xf28b7979, 0xd532e7e7, 0x8b43c8c8, 0x6e593737, 0xdab76d6d,
0x018c8d8d, 0xb164d5d5, 0x9cd24e4e, 0x49e0a9a9, 0xd8b46c6c, 0xacfa5656,
0xf307f4f4, 0xcf25eaea, 0xcaaf6565, 0xf48e7a7a, 0x47e9aeae, 0x10180808,
0x6fd5baba, 0xf0887878, 0x4a6f2525, 0x5c722e2e, 0x38241c1c, 0x57f1a6a6,
0x73c7b4b4, 0x9751c6c6, 0xcb23e8e8, 0xa17cdddd, 0xe89c7474, 0x3e211f1f,
0x96dd4b4b, 0x61dcbdbd, 0x0d868b8b, 0x0f858a8a, 0xe0907070, 0x7c423e3e,
0x71c4b5b5, 0xccaa6666, 0x90d84848, 0x06050303, 0xf701f6f6, 0x1c120e0e,
0xc2a36161, 0x6a5f3535, 0xaef95757, 0x69d0b9b9, 0x17918686, 0x9958c1c1,
0x3a271d1d, 0x27b99e9e, 0xd938e1e1, 0xeb13f8f8, 0x2bb39898, 0x22331111,
0xd2bb6969, 0xa970d9d9, 0x07898e8e, 0x33a79494, 0x2db69b9b, 0x3c221e1e,
0x15928787, 0xc920e9e9, 0x8749cece, 0xaaff5555, 0x50782828, 0xa57adfdf,
0x038f8c8c, 0x59f8a1a1, 0x09808989, 0x1a170d0d, 0x65dabfbf, 0xd731e6e6,
0x84c64242, 0xd0b86868, 0x82c34141, 0x29b09999, 0x5a772d2d, 0x1e110f0f,
0x7bcbb0b0, 0xa8fc5454, 0x6dd6bbbb, 0x2c3a1616,
};
struct alignas(16) Vector128 {
uint32_t s[4];
};
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128
Vector128Load(const void* from) {
Vector128 result;
std::memcpy(result.s, from, sizeof(Vector128));
return result;
}
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void Vector128Store(
const Vector128& v, void* to) {
std::memcpy(to, v.s, sizeof(Vector128));
}
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE Vector128
AesRound(const Vector128& state, const Vector128& round_key) {
Vector128 result;
#ifdef ABSL_IS_LITTLE_ENDIAN
result.s[0] = round_key.s[0] ^
te0[uint8_t(state.s[0])] ^
te1[uint8_t(state.s[1] >> 8)] ^
te2[uint8_t(state.s[2] >> 16)] ^
te3[uint8_t(state.s[3] >> 24)];
result.s[1] = round_key.s[1] ^
te0[uint8_t(state.s[1])] ^
te1[uint8_t(state.s[2] >> 8)] ^
te2[uint8_t(state.s[3] >> 16)] ^
te3[uint8_t(state.s[0] >> 24)];
result.s[2] = round_key.s[2] ^
te0[uint8_t(state.s[2])] ^
te1[uint8_t(state.s[3] >> 8)] ^
te2[uint8_t(state.s[0] >> 16)] ^
te3[uint8_t(state.s[1] >> 24)];
result.s[3] = round_key.s[3] ^
te0[uint8_t(state.s[3])] ^
te1[uint8_t(state.s[0] >> 8)] ^
te2[uint8_t(state.s[1] >> 16)] ^
te3[uint8_t(state.s[2] >> 24)];
#else
result.s[0] = round_key.s[0] ^
te0[uint8_t(state.s[0])] ^
te1[uint8_t(state.s[3] >> 8)] ^
te2[uint8_t(state.s[2] >> 16)] ^
te3[uint8_t(state.s[1] >> 24)];
result.s[1] = round_key.s[1] ^
te0[uint8_t(state.s[1])] ^
te1[uint8_t(state.s[0] >> 8)] ^
te2[uint8_t(state.s[3] >> 16)] ^
te3[uint8_t(state.s[2] >> 24)];
result.s[2] = round_key.s[2] ^
te0[uint8_t(state.s[2])] ^
te1[uint8_t(state.s[1] >> 8)] ^
te2[uint8_t(state.s[0] >> 16)] ^
te3[uint8_t(state.s[3] >> 24)];
result.s[3] = round_key.s[3] ^
te0[uint8_t(state.s[3])] ^
te1[uint8_t(state.s[2] >> 8)] ^
te2[uint8_t(state.s[1] >> 16)] ^
te3[uint8_t(state.s[0] >> 24)];
#endif
return result;
}
using ::absl::random_internal::RandenTraits;
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void BlockShuffle(
absl::uint128* state) {
static_assert(RandenTraits::kFeistelBlocks == 16,
"Feistel block shuffle only works for 16 blocks.");
constexpr size_t shuffle[RandenTraits::kFeistelBlocks] = {
7, 2, 13, 4, 11, 8, 3, 6, 15, 0, 9, 10, 1, 14, 5, 12};
#if 0
absl::uint128 source[RandenTraits::kFeistelBlocks];
std::memcpy(source, state, sizeof(source));
for (size_t i = 0; i < RandenTraits::kFeistelBlocks; i++) {
const absl::uint128 v0 = source[shuffle[i]];
state[i] = v0;
}
return;
#endif
const absl::uint128 v0 = state[shuffle[0]];
const absl::uint128 v1 = state[shuffle[1]];
const absl::uint128 v2 = state[shuffle[2]];
const absl::uint128 v3 = state[shuffle[3]];
const absl::uint128 v4 = state[shuffle[4]];
const absl::uint128 v5 = state[shuffle[5]];
const absl::uint128 v6 = state[shuffle[6]];
const absl::uint128 v7 = state[shuffle[7]];
const absl::uint128 w0 = state[shuffle[8]];
const absl::uint128 w1 = state[shuffle[9]];
const absl::uint128 w2 = state[shuffle[10]];
const absl::uint128 w3 = state[shuffle[11]];
const absl::uint128 w4 = state[shuffle[12]];
const absl::uint128 w5 = state[shuffle[13]];
const absl::uint128 w6 = state[shuffle[14]];
const absl::uint128 w7 = state[shuffle[15]];
state[0] = v0;
state[1] = v1;
state[2] = v2;
state[3] = v3;
state[4] = v4;
state[5] = v5;
state[6] = v6;
state[7] = v7;
state[8] = w0;
state[9] = w1;
state[10] = w2;
state[11] = w3;
state[12] = w4;
state[13] = w5;
state[14] = w6;
state[15] = w7;
}
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE const absl::uint128*
FeistelRound(absl::uint128* ABSL_RANDOM_INTERNAL_RESTRICT state,
const absl::uint128* ABSL_RANDOM_INTERNAL_RESTRICT keys) {
for (size_t branch = 0; branch < RandenTraits::kFeistelBlocks; branch += 4) {
const Vector128 s0 = Vector128Load(state + branch);
const Vector128 s1 = Vector128Load(state + branch + 1);
const Vector128 f0 = AesRound(s0, Vector128Load(keys));
keys++;
const Vector128 o1 = AesRound(f0, s1);
Vector128Store(o1, state + branch + 1);
const Vector128 s2 = Vector128Load(state + branch + 2);
const Vector128 s3 = Vector128Load(state + branch + 3);
const Vector128 f2 = AesRound(s2, Vector128Load(keys));
keys++;
const Vector128 o3 = AesRound(f2, s3);
Vector128Store(o3, state + branch + 3);
}
return keys;
}
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void Permute(
absl::uint128* state,
const absl::uint128* ABSL_RANDOM_INTERNAL_RESTRICT keys) {
for (size_t round = 0; round < RandenTraits::kFeistelRounds; ++round) {
keys = FeistelRound(state, keys);
BlockShuffle(state);
}
}
inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void SwapEndian(
absl::uint128* state) {
#ifdef ABSL_IS_BIG_ENDIAN
for (uint32_t block = 0; block < RandenTraits::kFeistelBlocks; ++block) {
uint64_t new_lo = absl::little_endian::ToHost64(
static_cast<uint64_t>(state[block] >> 64));
uint64_t new_hi = absl::little_endian::ToHost64(
static_cast<uint64_t>((state[block] << 64) >> 64));
state[block] = (static_cast<absl::uint128>(new_hi) << 64) | new_lo;
}
#else
(void)state;
#endif
}
}
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace random_internal {
const void* RandenSlow::GetKeys() {
#ifdef ABSL_IS_LITTLE_ENDIAN
return kRandenRoundKeys;
#else
return kRandenRoundKeysBE;
#endif
}
void RandenSlow::Absorb(const void* seed_void, void* state_void) {
auto* state =
reinterpret_cast<uint64_t * ABSL_RANDOM_INTERNAL_RESTRICT>(state_void);
const auto* seed =
reinterpret_cast<const uint64_t * ABSL_RANDOM_INTERNAL_RESTRICT>(
seed_void);
constexpr size_t kCapacityBlocks =
RandenTraits::kCapacityBytes / sizeof(uint64_t);
static_assert(
kCapacityBlocks * sizeof(uint64_t) == RandenTraits::kCapacityBytes,
"Not i*V");
for (size_t i = kCapacityBlocks;
i < RandenTraits::kStateBytes / sizeof(uint64_t); ++i) {
state[i] ^= seed[i - kCapacityBlocks];
}
}
void RandenSlow::Generate(const void* keys_void, void* state_void) {
static_assert(RandenTraits::kCapacityBytes == sizeof(absl::uint128),
"Capacity mismatch");
auto* state = reinterpret_cast<absl::uint128*>(state_void);
const auto* keys = reinterpret_cast<const absl::uint128*>(keys_void);
const absl::uint128 prev_inner = state[0];
SwapEndian(state);
Permute(state, keys);
SwapEndian(state);
*state ^= prev_inner;
}
}
ABSL_NAMESPACE_END
} | #include "absl/random/internal/randen_slow.h"
#include <cstring>
#include "gtest/gtest.h"
#include "absl/base/internal/endian.h"
#include "absl/random/internal/randen_traits.h"
namespace {
using absl::random_internal::RandenSlow;
using absl::random_internal::RandenTraits;
TEST(RandenSlowTest, Default) {
constexpr uint8_t kGolden[] = {
0xee, 0xd3, 0xe6, 0x0e, 0x09, 0x34, 0x65, 0x6c, 0xc6, 0x33, 0x53, 0x9d,
0x9b, 0x2b, 0x4e, 0x04, 0x77, 0x39, 0x43, 0x4e, 0x13, 0x4f, 0xc1, 0xc3,
0xee, 0x10, 0x04, 0xd9, 0x7c, 0xf4, 0xa9, 0xdd, 0x10, 0xca, 0xd8, 0x7f,
0x08, 0xf3, 0x7b, 0x88, 0x12, 0x29, 0xc7, 0x45, 0xf5, 0x80, 0xb7, 0xf0,
0x9f, 0x59, 0x96, 0x76, 0xd3, 0xb1, 0xdb, 0x15, 0x59, 0x6d, 0x3c, 0xff,
0xba, 0x63, 0xec, 0x30, 0xa6, 0x20, 0x7f, 0x6f, 0x60, 0x73, 0x9f, 0xb2,
0x4c, 0xa5, 0x49, 0x6f, 0x31, 0x8a, 0x80, 0x02, 0x0e, 0xe5, 0xc8, 0xd5,
0xf9, 0xea, 0x8f, 0x3b, 0x8a, 0xde, 0xd9, 0x3f, 0x5e, 0x60, 0xbf, 0x9c,
0xbb, 0x3b, 0x18, 0x78, 0x1a, 0xae, 0x70, 0xc9, 0xd5, 0x1e, 0x30, 0x56,
0xd3, 0xff, 0xb2, 0xd8, 0x37, 0x3c, 0xc7, 0x0f, 0xfe, 0x27, 0xb3, 0xf4,
0x19, 0x9a, 0x8f, 0xeb, 0x76, 0x8d, 0xfd, 0xcd, 0x9d, 0x0c, 0x42, 0x91,
0xeb, 0x06, 0xa5, 0xc3, 0x56, 0x95, 0xff, 0x3e, 0xdd, 0x05, 0xaf, 0xd5,
0xa1, 0xc4, 0x83, 0x8f, 0xb7, 0x1b, 0xdb, 0x48, 0x8c, 0xfe, 0x6b, 0x0d,
0x0e, 0x92, 0x23, 0x70, 0x42, 0x6d, 0x95, 0x34, 0x58, 0x57, 0xd3, 0x58,
0x40, 0xb8, 0x87, 0x6b, 0xc2, 0xf4, 0x1e, 0xed, 0xf3, 0x2d, 0x0b, 0x3e,
0xa2, 0x32, 0xef, 0x8e, 0xfc, 0x54, 0x11, 0x43, 0xf3, 0xab, 0x7c, 0x49,
0x8b, 0x9a, 0x02, 0x70, 0x05, 0x37, 0x24, 0x4e, 0xea, 0xe5, 0x90, 0xf0,
0x49, 0x57, 0x8b, 0xd8, 0x2f, 0x69, 0x70, 0xa9, 0x82, 0xa5, 0x51, 0xc6,
0xf5, 0x42, 0x63, 0xbb, 0x2c, 0xec, 0xfc, 0x78, 0xdb, 0x55, 0x2f, 0x61,
0x45, 0xb7, 0x3c, 0x46, 0xe3, 0xaf, 0x16, 0x18, 0xad, 0xe4, 0x2e, 0x35,
0x7e, 0xda, 0x01, 0xc1, 0x74, 0xf3, 0x6f, 0x02, 0x51, 0xe8, 0x3d, 0x1c,
0x82, 0xf0, 0x1e, 0x81,
};
alignas(16) uint8_t state[RandenTraits::kStateBytes];
std::memset(state, 0, sizeof(state));
RandenSlow::Generate(RandenSlow::GetKeys(), state);
EXPECT_EQ(0, std::memcmp(state, kGolden, sizeof(state)));
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/internal/randen_slow.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/internal/randen_slow_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
fe174884-e168-4067-9989-54ac45037e21 | cpp | google/tensorstore | env | tensorstore/internal/env.cc | tensorstore/internal/env_test.cc | #include "tensorstore/internal/env.h"
#ifdef _WIN32
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#include <windows.h>
#include <processenv.h>
#endif
#include <stddef.h>
#include <cstdlib>
#include <cstring>
#include <memory>
#include <optional>
#include <string>
#include "absl/container/flat_hash_map.h"
#ifndef _WIN32
extern char** environ;
#endif
namespace tensorstore {
namespace internal {
absl::flat_hash_map<std::string, std::string> GetEnvironmentMap() {
absl::flat_hash_map<std::string, std::string> result;
#if _WIN32
char* envblock = GetEnvironmentStrings();
for (auto p = envblock; *p; ) {
if (const char* eq = strchr(p, '=')) {
result[std::string(p, eq - p)] = eq + 1;
}
p += strlen(p) + 1;
}
FreeEnvironmentStrings(envblock);
#else
for (auto p = environ; *p; ++p) {
if (const char* eq = strchr(*p, '=')) {
result[std::string(*p, eq - *p)] = eq + 1;
}
}
#endif
return result;
}
std::optional<std::string> GetEnv(char const* variable) {
#if _WIN32
char* buffer;
size_t size;
_dupenv_s(&buffer, &size, variable);
std::unique_ptr<char, decltype(&free)> release(buffer, &free);
#else
char* buffer = std::getenv(variable);
#endif
if (buffer == nullptr) {
return std::optional<std::string>();
}
return std::optional<std::string>(std::string{buffer});
}
void SetEnv(const char* variable, const char* value) {
#if _WIN32
::_putenv_s(variable, value);
#else
::setenv(variable, value, 1);
#endif
}
void UnsetEnv(const char* variable) {
#if _WIN32
::_putenv_s(variable, "");
#else
::unsetenv(variable);
#endif
}
}
} | #include "tensorstore/internal/env.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal::GetEnv;
using ::tensorstore::internal::GetEnvironmentMap;
using ::tensorstore::internal::GetEnvValue;
using ::tensorstore::internal::SetEnv;
using ::tensorstore::internal::UnsetEnv;
TEST(GetEnvTest, Basic) {
SetEnv("TENSORSTORE_TEST_ENV_VAR", "test env var");
{
auto var = GetEnv("TENSORSTORE_TEST_ENV_VAR");
EXPECT_TRUE(var);
EXPECT_EQ("test env var", *var);
}
UnsetEnv("TENSORSTORE_TEST_ENV_VAR");
{
auto var = GetEnv("TENSORSTORE_TEST_ENV_VAR");
EXPECT_FALSE(var);
}
}
TEST(GetEnvTest, GetEnvironmentMap) {
SetEnv("TENSORSTORE_TEST_ENV_VAR", "test env var");
auto allenv = GetEnvironmentMap();
EXPECT_FALSE(allenv.empty());
EXPECT_THAT(allenv.count("TENSORSTORE_TEST_ENV_VAR"), 1);
}
TEST(GetEnvTest, ParseBool) {
SetEnv("TENSORSTORE_TEST_ENV_VAR", "trUe");
{
EXPECT_THAT(GetEnvValue<bool>("TENSORSTORE_TEST_ENV_VAR"),
testing::Optional(true));
}
UnsetEnv("TENSORSTORE_TEST_ENV_VAR");
{
auto var = GetEnvValue<bool>("TENSORSTORE_TEST_ENV_VAR");
EXPECT_FALSE(var);
}
}
TEST(GetEnvTest, ParseInt) {
SetEnv("TENSORSTORE_TEST_ENV_VAR", "123");
{
EXPECT_THAT(GetEnvValue<int>("TENSORSTORE_TEST_ENV_VAR"),
testing::Optional(123));
}
UnsetEnv("TENSORSTORE_TEST_ENV_VAR");
{
auto var = GetEnvValue<int>("TENSORSTORE_TEST_ENV_VAR");
EXPECT_FALSE(var);
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/env.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/env_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
fc1119e2-8b8e-45bf-a750-e8292627415a | cpp | google/arolla | repr | arolla/jagged_shape/util/repr.cc | arolla/jagged_shape/util/repr_test.cc | #include "arolla/jagged_shape/util/repr.h"
#include <cstddef>
#include <cstdint>
#include <sstream>
#include <string>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "arolla/util/string.h"
namespace arolla {
std::string CompactSplitPointsAsSizesRepr(
absl::Span<const int64_t> split_points, size_t max_part_size) {
if (split_points.size() <= 1) {
return "[]";
}
int64_t size = split_points[1] - split_points[0];
if (absl::c_adjacent_find(split_points, [size](int64_t a, int64_t b) {
return b - a != size;
}) == split_points.end()) {
return absl::StrCat(size);
}
std::ostringstream result;
result << "[";
bool first = true;
const auto sizes_size = split_points.size() - 1;
if (sizes_size <= 2 * max_part_size) {
for (size_t i = 0; i < sizes_size; ++i) {
result << NonFirstComma(first) << split_points[i + 1] - split_points[i];
}
} else {
for (size_t i = 0; i < max_part_size; ++i) {
result << NonFirstComma(first) << split_points[i + 1] - split_points[i];
}
result << NonFirstComma(first) << "...";
for (size_t i = sizes_size - max_part_size; i < sizes_size; ++i) {
result << NonFirstComma(first) << split_points[i + 1] - split_points[i];
}
}
result << "]";
return std::move(result).str();
}
} | #include "arolla/jagged_shape/util/repr.h"
#include "gtest/gtest.h"
namespace arolla {
namespace {
TEST(ReprTest, CompactSplitPointsAsSizesRepr) {
{
EXPECT_EQ(CompactSplitPointsAsSizesRepr({}, 0), "[]");
EXPECT_EQ(CompactSplitPointsAsSizesRepr({}, 2), "[]");
}
{
EXPECT_EQ(CompactSplitPointsAsSizesRepr({0}, 0), "[]");
EXPECT_EQ(CompactSplitPointsAsSizesRepr({0}, 2), "[]");
}
{
EXPECT_EQ(
CompactSplitPointsAsSizesRepr({0, 1, 2, 3, 4}, 0),
"1");
EXPECT_EQ(CompactSplitPointsAsSizesRepr({0, 2, 4}, 1),
"2");
EXPECT_EQ(CompactSplitPointsAsSizesRepr({0, 0, 0}, 1),
"0");
}
{
EXPECT_EQ(
CompactSplitPointsAsSizesRepr({0, 2, 3, 4, 5, 8}, 0),
"[...]");
EXPECT_EQ(
CompactSplitPointsAsSizesRepr({0, 2, 3, 4, 5, 8}, 1),
"[2, ..., 3]");
EXPECT_EQ(
CompactSplitPointsAsSizesRepr({0, 2, 3, 4, 5, 8}, 2),
"[2, 1, ..., 1, 3]");
EXPECT_EQ(
CompactSplitPointsAsSizesRepr({0, 2, 3, 4, 5, 8}, 3),
"[2, 1, 1, 1, 3]");
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/jagged_shape/util/repr.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/jagged_shape/util/repr_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
fad4df4d-79a0-4439-a363-0b60791c4666 | cpp | tensorflow/tensorflow | conv_rewriter | third_party/xla/xla/service/gpu/transforms/conv_rewriter.cc | third_party/xla/xla/service/gpu/transforms/conv_rewriter_test.cc | #include "xla/service/gpu/transforms/conv_rewriter.h"
#include <cstdint>
#include <cstdlib>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <string_view>
#include <tuple>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/permutation_util.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
absl::Status CheckTypes(HloInstruction* conv,
const se::GpuComputeCapability cc) {
auto valid_shape = [conv, &cc](const Shape& shape) -> absl::Status {
PrimitiveType type = shape.element_type();
if (!primitive_util::IsFloatingPointType(type) &&
!primitive_util::IsIntegralType(type)) {
return Unimplemented(
"Convolutions must have floating-point or integral operands/outputs, "
"but got convolution with type %s: %s",
primitive_util::LowercasePrimitiveTypeName(type), conv->ToString());
}
if (primitive_util::IsF8Type(type)) {
if (type != F8E4M3FN && type != F8E5M2) {
return Unimplemented(
"The only FP8 types supported in convolutions are f8e5m2 and "
"f8e4m3, "
"but got convolution with FP8 type %s: %s",
primitive_util::LowercasePrimitiveTypeName(type), conv->ToString());
}
if (!std::holds_alternative<se::CudaComputeCapability>(cc)) {
return Unimplemented(
"FP8 convolutions are only supported on CUDA GPUs, but got "
"FP8 convolution on ROCm GPU: %s",
conv->ToString());
} else if (!std::get<se::CudaComputeCapability>(cc).IsAtLeastHopper()) {
return Unimplemented(
"FP8 convolutions are only supported on CUDA GPUs with compute "
"capability at least 9.0, but got "
"FP8 convolution on GPU with compute capability %s: %s",
std::get<se::CudaComputeCapability>(cc).ToString(),
conv->ToString());
}
}
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(valid_shape(conv->shape()));
TF_RETURN_IF_ERROR(valid_shape(conv->operand(0)->shape()));
TF_RETURN_IF_ERROR(valid_shape(conv->operand(1)->shape()));
return absl::OkStatus();
}
using ConvolutionMatch = std::optional<
std::tuple<Window, ConvolutionDimensionNumbers, HloInstruction*>>;
bool MaybeConv1dToConv2d(HloInstruction* conv) {
if (conv->window().dimensions().size() != 2) {
return false;
}
if (conv->operand(1)->opcode() != HloOpcode::kReshape) {
return false;
}
auto filter = conv->operand(1);
std::optional<ShapeUtil::ShapeEqualityDescriptor> reshape_degenerate =
filter->ReshapeMerelyInsertsOrDeletes1SizedDimensions();
if (reshape_degenerate.has_value() &&
reshape_degenerate->deleted_dimensions.empty() &&
reshape_degenerate->inserted_dimensions.size() == 1) {
const auto& dnums = conv->convolution_dimension_numbers();
for (auto dim : dnums.kernel_spatial_dimensions()) {
if (dim == reshape_degenerate->inserted_dimensions[0]) {
return true;
}
}
}
return false;
}
bool CanImplementAsGpuForwardConv(HloInstruction* conv) {
const ConvolutionDimensionNumbers& dnums =
conv->convolution_dimension_numbers();
if (dnums.input_spatial_dimensions_size() > 3) {
return false;
}
if (ShapeUtil::IsZeroElementArray(conv->operand(0)->shape()) ||
ShapeUtil::IsZeroElementArray(conv->operand(1)->shape())) {
return false;
}
if (dnums.input_spatial_dimensions_size() == 2
? !window_util::AllOrNoneReversed(conv->window())
: window_util::HasWindowReversal(conv->window())) {
return false;
}
return true;
}
ConvolutionMatch MatchBackwardFilter(HloInstruction* conv) {
VLOG(2) << "Trying to match convolution backward filter.";
if (conv->feature_group_count() > 1) {
VLOG(1) << conv->ToString()
<< " is a forward convolution. All grouped backward filters are "
"mapped to batch grouped convolutions in tf2xla bridge. Hence "
"backward filter "
"convolutions cannot have feature groups greater than 1 at this "
"point. No need to fold to backward filter.";
return std::nullopt;
}
CHECK_EQ(HloOpcode::kConvolution, conv->opcode());
const ConvolutionDimensionNumbers& conv_dnums =
conv->convolution_dimension_numbers();
auto input_batch_dim = conv_dnums.input_batch_dimension();
auto input_feature_dim = conv_dnums.input_feature_dimension();
auto input_spatial_dims = conv_dnums.input_spatial_dimensions();
auto kernel_input_feature_dim = conv_dnums.kernel_input_feature_dimension();
auto kernel_output_feature_dim = conv_dnums.kernel_output_feature_dimension();
auto kernel_spatial_dims = conv_dnums.kernel_spatial_dimensions();
auto output_batch_dim = conv_dnums.output_batch_dimension();
auto output_feature_dim = conv_dnums.output_feature_dimension();
auto output_spatial_dims = conv_dnums.output_spatial_dimensions();
for (const WindowDimension& window_dim : conv->window().dimensions()) {
if (window_dim.stride() != 1) {
VLOG(1) << "Forward convolution's window "
<< conv->window().ShortDebugString()
<< " should have stride of 1.";
return std::nullopt;
}
if (window_dim.base_dilation() != 1) {
VLOG(1) << "Forward convolution's window "
<< conv->window().ShortDebugString()
<< " should have no base (LHS) dilation.";
return std::nullopt;
}
if (window_dim.padding_low() < 0) {
VLOG(1) << "Padding low should be non-negative.";
return std::nullopt;
}
if (window_dim.window_reversal()) {
VLOG(1) << "Window reversal field not supported";
return std::nullopt;
}
}
int small_kernel_dimension_num = 0;
for (int i = 0; i < kernel_spatial_dims.size(); ++i) {
if (conv->operand(1)->shape().dimensions(kernel_spatial_dims[i]) <=
conv->shape().dimensions(output_spatial_dims[i])) {
small_kernel_dimension_num += 1;
}
}
if ((kernel_spatial_dims.empty() || small_kernel_dimension_num > 1 ||
(!MaybeConv1dToConv2d(conv) && small_kernel_dimension_num == 1)) &&
!window_util::HasWindowDilation(conv->window())) {
VLOG(1) << conv->ToString()
<< " is a regular forward convolution. No need "
"to fold it to a backward filter convolution....";
return std::nullopt;
}
Window backward_conv_window;
for (int i = 0; i < input_spatial_dims.size(); ++i) {
WindowDimension* dim = backward_conv_window.add_dimensions();
int64_t filter_size = conv->shape().dimensions(output_spatial_dims[i]);
dim->set_size(filter_size);
dim->set_stride(conv->window().dimensions(i).window_dilation());
dim->set_padding_low(conv->window().dimensions(i).padding_low());
dim->set_base_dilation(1);
dim->set_window_dilation(1);
int64_t input_size =
conv->operand(0)->shape().dimensions(input_spatial_dims[i]);
int64_t output_size = conv->window().dimensions(i).size();
int64_t padded_input_size = filter_size + (output_size - 1) * dim->stride();
int64_t min_padding_high =
padded_input_size - input_size - dim->padding_low();
int64_t max_padding_high = min_padding_high + dim->stride() - 1;
CHECK_GE(dim->padding_low(), 0);
if (dim->padding_low() >= min_padding_high &&
dim->padding_low() <= max_padding_high) {
dim->set_padding_high(dim->padding_low());
} else {
if (dim->padding_low() < min_padding_high) {
dim->set_padding_high(min_padding_high);
} else {
dim->set_padding_high(max_padding_high);
}
}
if (dim->padding_high() < 0) {
LOG(WARNING)
<< "Fusing this pattern to backward filter convolution would cause "
"negative padding ("
<< dim->padding_high()
<< ") on right/bottom of the weight gradients, which is not "
"supported by GpuConvPaddingLegalization (b/32744257). "
"Falling back to "
"unfused convolution for instruction: "
<< conv->ToString();
return std::nullopt;
}
}
ConvolutionDimensionNumbers backward_conv_dnums;
backward_conv_dnums.set_input_batch_dimension(input_feature_dim);
backward_conv_dnums.set_input_feature_dimension(input_batch_dim);
for (int i = 0; i < input_spatial_dims.size(); ++i) {
backward_conv_dnums.add_input_spatial_dimensions(input_spatial_dims[i]);
}
backward_conv_dnums.set_output_batch_dimension(kernel_input_feature_dim);
backward_conv_dnums.set_output_feature_dimension(kernel_output_feature_dim);
for (int i = 0; i < kernel_spatial_dims.size(); ++i) {
backward_conv_dnums.add_output_spatial_dimensions(kernel_spatial_dims[i]);
}
backward_conv_dnums.set_kernel_input_feature_dimension(output_batch_dim);
backward_conv_dnums.set_kernel_output_feature_dimension(output_feature_dim);
for (int i = 0; i < output_spatial_dims.size(); ++i) {
backward_conv_dnums.add_kernel_spatial_dimensions(output_spatial_dims[i]);
}
HloInstruction* lhs = conv->mutable_operand(0);
return std::make_tuple(backward_conv_window, backward_conv_dnums, lhs);
}
ConvolutionMatch MatchBackwardInput(HloInstruction* conv) {
VLOG(2) << "Trying to match convolution backward input.";
if (conv->feature_group_count() > 1) {
return std::nullopt;
}
CHECK_EQ(HloOpcode::kConvolution, conv->opcode());
HloInstruction* reverse_filter = conv->mutable_operand(1);
ConvolutionDimensionNumbers dnums = conv->convolution_dimension_numbers();
auto kernel_out_feature_dim = dnums.kernel_output_feature_dimension();
auto kernel_out_features =
reverse_filter->shape().dimensions(kernel_out_feature_dim);
if (conv->feature_group_count() > 1 &&
kernel_out_features == conv->feature_group_count()) {
return std::nullopt;
}
bool is_reversed_filter =
reverse_filter->opcode() == HloOpcode::kReverse &&
absl::c_is_permutation(dnums.kernel_spatial_dimensions(),
reverse_filter->dimensions());
bool is_reversed_conv1d_filter =
MaybeConv1dToConv2d(conv) &&
reverse_filter->operand(0)->opcode() == HloOpcode::kReverse;
bool is_1x1_filter =
absl::c_all_of(conv->window().dimensions(),
[](const WindowDimension& d) { return d.size() == 1; });
if (!is_reversed_filter && !is_reversed_conv1d_filter &&
!(window_util::HasBaseDilation(conv->window()) &&
(reverse_filter->IsConstant() || is_1x1_filter))) {
VLOG(1) << "Can't match to backwards convolution. Either filter is not "
"kReverse, or it's not a base-dilated conv with a 1x1 or "
"constant filter.";
return std::nullopt;
}
for (const WindowDimension& window_dim : conv->window().dimensions()) {
if (window_dim.stride() != 1) {
VLOG(1) << "Forward convolution's window "
<< conv->window().ShortDebugString()
<< " should have stride of 1.";
return std::nullopt;
}
if (window_dim.window_dilation() != 1) {
VLOG(1) << "Forward convolution's window "
<< conv->window().ShortDebugString()
<< " should have no window dilation.";
return std::nullopt;
}
if (window_dim.window_reversal()) {
VLOG(1) << "Window reversal field not supported";
return std::nullopt;
}
}
const auto& input_spatial_dims = dnums.input_spatial_dimensions();
const auto& output_spatial_dims = dnums.output_spatial_dimensions();
CHECK_EQ(conv->window().dimensions().size(), input_spatial_dims.size());
CHECK_EQ(output_spatial_dims.size(), input_spatial_dims.size());
const Window& old_window = conv->window();
Window new_window = old_window;
for (size_t i = 0; i < input_spatial_dims.size(); ++i) {
auto dim = new_window.mutable_dimensions(i);
dim->set_stride(old_window.dimensions(i).base_dilation());
dim->set_base_dilation(1);
auto kernel_size = old_window.dimensions(i).size();
auto backward_padding_low =
kernel_size - 1 - old_window.dimensions(i).padding_low();
if (backward_padding_low < 0) {
LOG(WARNING)
<< "The low padding of the backward convolution would be negative ("
<< backward_padding_low
<< "), which isn't supported by GpuConvPaddingLegalization "
"for now (b/32744257).";
return std::nullopt;
}
dim->set_padding_low(backward_padding_low);
auto unpadded_input_size = conv->shape().dimensions(output_spatial_dims[i]);
auto output_size =
conv->operand(0)->shape().dimensions(input_spatial_dims[i]);
auto padded_input_size = kernel_size + dim->stride() * (output_size - 1);
auto total_pad_size = padded_input_size - unpadded_input_size;
auto min_padding_high = total_pad_size - backward_padding_low;
auto max_padding_high = min_padding_high + dim->stride() - 1;
if (backward_padding_low >= min_padding_high &&
backward_padding_low <= max_padding_high) {
dim->set_padding_high(backward_padding_low);
} else {
if (backward_padding_low < min_padding_high) {
dim->set_padding_high(min_padding_high);
} else {
dim->set_padding_high(max_padding_high);
}
}
if (dim->padding_high() < 0) {
LOG(WARNING) << "Fusing this pattern to backward convolution would cause "
"negative padding ("
<< dim->padding_high()
<< ") on right/bottom of the activations, which is not "
"supported by GpuConvPaddingLegalization (b/32744257). "
"Falling back to unfused convolution for instruction: "
<< conv->ToString();
return std::nullopt;
}
}
auto conv_dnums = conv->convolution_dimension_numbers();
dnums.set_kernel_input_feature_dimension(
conv_dnums.kernel_output_feature_dimension());
dnums.set_kernel_output_feature_dimension(
conv_dnums.kernel_input_feature_dimension());
for (int i = 0; i < input_spatial_dims.size(); ++i) {
dnums.set_input_spatial_dimensions(i,
conv_dnums.output_spatial_dimensions(i));
dnums.set_output_spatial_dimensions(i,
conv_dnums.input_spatial_dimensions(i));
}
dnums.set_input_feature_dimension(conv_dnums.output_feature_dimension());
dnums.set_input_batch_dimension(conv_dnums.output_batch_dimension());
dnums.set_output_feature_dimension(conv_dnums.input_feature_dimension());
dnums.set_output_batch_dimension(conv_dnums.input_batch_dimension());
if (reverse_filter->opcode() != HloOpcode::kReverse &&
reverse_filter->IsConstant()) {
HloComputation* c = conv->parent();
reverse_filter = c->AddInstruction(
HloInstruction::CreateReverse(reverse_filter->shape(), reverse_filter,
dnums.kernel_spatial_dimensions()));
reverse_filter = c->AddInstruction(
HloInstruction::CreateReverse(reverse_filter->shape(), reverse_filter,
dnums.kernel_spatial_dimensions()));
TF_CHECK_OK(conv->ReplaceOperandWith(1, reverse_filter));
}
HloInstruction* rhs = reverse_filter;
if (rhs->opcode() == HloOpcode::kReverse) {
rhs = rhs->mutable_operand(0);
} else if (is_reversed_conv1d_filter) {
auto src = rhs->mutable_operand(0)->mutable_operand(0);
rhs = conv->parent()->AddInstruction(
HloInstruction::CreateReshape(rhs->shape(), src));
}
if (conv->feature_group_count() == 1) {
return std::make_tuple(new_window, dnums, rhs);
}
int64_t input_feature_dimension = dnums.kernel_input_feature_dimension();
int64_t output_feature_dimension = dnums.kernel_output_feature_dimension();
if (std::abs(input_feature_dimension - output_feature_dimension) != 1) {
return std::nullopt;
}
int64_t input_features = rhs->shape().dimensions(input_feature_dimension);
int64_t output_features = rhs->shape().dimensions(output_feature_dimension);
std::vector<int64_t> reshape_dims = SpanToVector(rhs->shape().dimensions());
auto num_groups = conv->feature_group_count();
CHECK_EQ(input_features % num_groups, 0)
<< "Input feature count should be an exact multiple of feature group "
"count";
reshape_dims[input_feature_dimension] =
reshape_dims[input_feature_dimension] / num_groups;
reshape_dims.insert(reshape_dims.begin() + input_feature_dimension,
num_groups);
HloComputation* c = conv->parent();
rhs = c->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(rhs->shape().element_type(), reshape_dims), rhs));
std::vector<int64_t> transpose_dims(rhs->shape().dimensions_size());
std::iota(transpose_dims.begin(), transpose_dims.end(), 0);
transpose_dims.erase(transpose_dims.begin() + input_feature_dimension);
transpose_dims.insert(transpose_dims.begin() + output_feature_dimension,
input_feature_dimension);
std::vector<int64_t> transpose_reshape_dims =
SpanToVector(rhs->shape().dimensions());
transpose_reshape_dims.erase(transpose_reshape_dims.begin() +
input_feature_dimension);
transpose_reshape_dims.insert(
transpose_reshape_dims.begin() + output_feature_dimension, num_groups);
rhs = c->AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(rhs->shape().element_type(), transpose_reshape_dims),
rhs, transpose_dims));
Shape new_shape = rhs->shape();
new_shape.DeleteDimension(output_feature_dimension);
new_shape.set_dimensions(output_feature_dimension,
output_features * num_groups);
rhs = c->AddInstruction(HloInstruction::CreateReshape(new_shape, rhs));
return std::make_tuple(new_window, dnums, rhs);
}
HloInstruction* CreateGpuConv(absl::string_view call_target, const Shape& shape,
HloInstruction* lhs, HloInstruction* rhs,
const Window& window,
const ConvolutionDimensionNumbers& dnums,
int64_t feature_group_count,
const PrecisionConfig& precision_config,
const OpMetadata& metadata) {
HloComputation* computation = lhs->parent();
Shape call_shape =
ShapeUtil::MakeTupleShape({shape, ShapeUtil::MakeShape(U8, {0})});
HloInstruction* custom_call = computation->AddInstruction(
HloInstruction::CreateCustomCall(call_shape, {lhs, rhs}, call_target));
custom_call->set_window(window);
custom_call->set_convolution_dimension_numbers(dnums);
custom_call->set_feature_group_count(feature_group_count);
*custom_call->mutable_precision_config() = precision_config;
custom_call->set_metadata(metadata);
std::optional<std::string> name;
if (call_target == kCudnnConvForwardCallTarget) {
name = "cudnn-conv";
} else if (call_target == kCudnnConvBackwardInputCallTarget) {
name = "cudnn-conv-bw-input";
} else if (call_target == kCudnnConvBackwardFilterCallTarget) {
name = "cudnn-conv-bw-filter";
} else if (call_target == kCudnnConvBiasActivationForwardCallTarget) {
name = "cudnn-conv-bias-activation";
}
if (name.has_value()) {
computation->parent()->SetAndUniquifyInstrName(custom_call, *name);
}
return custom_call;
}
HloInstruction* ConvertBatchGroupedToFeatureGroupedConvolution(
HloInstruction* conv) {
CHECK_EQ(conv->feature_group_count(), 1);
int64_t num_groups = conv->batch_group_count();
auto dim_numbers = conv->convolution_dimension_numbers();
auto lhs = conv->mutable_operand(0);
auto rhs = conv->mutable_operand(1);
int64_t input_batch_dimension = dim_numbers.input_batch_dimension();
Shape output_shape = conv->shape();
int64_t input_feature_dimension = dim_numbers.input_feature_dimension();
int64_t input_feature = lhs->shape().dimensions(input_feature_dimension);
HloComputation* computation = lhs->parent();
auto add = [&](std::unique_ptr<HloInstruction> inst) {
return computation->AddInstruction(std::move(inst));
};
std::vector<int64_t> reshape_dims = SpanToVector(lhs->shape().dimensions());
reshape_dims[input_batch_dimension] =
reshape_dims[input_batch_dimension] / num_groups;
reshape_dims.insert(reshape_dims.begin() + input_batch_dimension, num_groups);
lhs = add(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(lhs->shape().element_type(), reshape_dims), lhs));
std::vector<int64_t> transpose_dims(lhs->shape().dimensions_size());
std::iota(transpose_dims.begin(), transpose_dims.end(), 0);
transpose_dims.erase(transpose_dims.begin() + input_batch_dimension);
transpose_dims.insert(transpose_dims.begin() + input_feature_dimension,
input_batch_dimension);
std::vector<int64_t> transpose_reshape_dims =
ComposePermutations(lhs->shape().dimensions(), transpose_dims);
lhs = add(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(lhs->shape().element_type(), transpose_reshape_dims),
lhs, transpose_dims));
Shape new_shape = lhs->shape();
new_shape.DeleteDimension(input_feature_dimension);
new_shape.set_dimensions(input_feature_dimension, input_feature * num_groups);
lhs = add(HloInstruction::CreateReshape(new_shape, lhs));
std::vector<HloInstruction*> new_operands = {lhs, rhs};
auto new_conv = conv->CloneWithNewOperands(output_shape, new_operands);
new_conv->set_feature_group_count(num_groups);
new_conv->set_batch_group_count(1);
new_conv->set_convolution_dimension_numbers(dim_numbers);
return computation->AddInstruction(std::move(new_conv));
}
CudnnConvBackendConfig GetDefaultBackendConfig() {
CudnnConvBackendConfig config;
config.set_conv_result_scale(1);
return config;
}
static absl::StatusOr<HloInstruction*> CreateCustomCallHelper(
HloInstruction* conv, const se::GpuComputeCapability& cc) {
TF_RETURN_IF_ERROR(CheckTypes(conv, cc));
if (ConvolutionMatch m = MatchBackwardInput(conv)) {
auto& [window, dnums, rhs] = *m;
return CreateGpuConv(kCudnnConvBackwardInputCallTarget, conv->shape(),
conv->mutable_operand(0), rhs, window, dnums,
conv->feature_group_count(), conv->precision_config(),
conv->metadata());
}
if (ConvolutionMatch m = MatchBackwardFilter(conv)) {
auto& [window, dnums, lhs] = *m;
return CreateGpuConv(kCudnnConvBackwardFilterCallTarget, conv->shape(), lhs,
conv->mutable_operand(1), window, dnums,
conv->batch_group_count(), conv->precision_config(),
conv->metadata());
}
if (CanImplementAsGpuForwardConv(conv)) {
if (conv->batch_group_count() > 1) {
conv = ConvertBatchGroupedToFeatureGroupedConvolution(conv);
}
return CreateGpuConv(kCudnnConvForwardCallTarget, conv->shape(),
conv->mutable_operand(0), conv->mutable_operand(1),
conv->window(), conv->convolution_dimension_numbers(),
conv->feature_group_count(), conv->precision_config(),
conv->metadata());
}
return nullptr;
}
absl::StatusOr<bool> RunOnInstruction(HloInstruction* conv,
const se::GpuComputeCapability& cc) {
CHECK_EQ(conv->opcode(), HloOpcode::kConvolution);
TF_ASSIGN_OR_RETURN(HloInstruction * custom_call,
CreateCustomCallHelper(conv, cc));
if (custom_call == nullptr) {
return false;
}
GpuBackendConfig gpu_backend_config;
*gpu_backend_config.mutable_cudnn_conv_backend_config() =
GetDefaultBackendConfig();
TF_RETURN_IF_ERROR(custom_call->set_backend_config(gpu_backend_config));
VLOG(1) << "Replacing convolution " << conv->ToString() << " with "
<< custom_call->ToString();
TF_RETURN_IF_ERROR(conv->parent()->ReplaceWithNewInstruction(
conv,
HloInstruction::CreateGetTupleElement(conv->shape(), custom_call, 0)));
return true;
}
absl::StatusOr<bool> RunOnComputation(HloComputation* computation,
const se::GpuComputeCapability& cc) {
std::vector<HloInstruction*> convs;
for (auto* hlo : computation->instructions()) {
if (hlo->opcode() == HloOpcode::kConvolution) {
convs.push_back(hlo);
}
}
bool changed = false;
for (HloInstruction* conv : convs) {
TF_ASSIGN_OR_RETURN(bool result, RunOnInstruction(conv, cc));
changed |= result;
}
return changed;
}
}
absl::StatusOr<bool> ConvRewriter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(2, "ConvRewriter::Run(), before:\n" + module->ToString());
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
TF_ASSIGN_OR_RETURN(bool result,
RunOnComputation(computation, compute_capability_));
changed |= result;
}
XLA_VLOG_LINES(2, "ConvRewriter::Run(), after:\n" + module->ToString());
return changed;
}
bool ConvRewriter::ConvIsLowerable(HloInstruction* conv) {
return CanImplementAsGpuForwardConv(conv) || MatchBackwardFilter(conv) ||
MatchBackwardInput(conv);
}
}
} | #include "xla/service/gpu/transforms/conv_rewriter.h"
#include <optional>
#include <string>
#include "absl/log/check.h"
#include "absl/strings/str_format.h"
#include "xla/array4d.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/literal_util.h"
#include "xla/protobuf_util.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/service/shape_inference.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
class ConvRewriterTest : public HloTestBase {
public:
ConvRewriterTest()
: HloTestBase(true,
false) {
for (int i = 0; i < 2; ++i) {
WindowDimension* window_dim = default_conv_window_.add_dimensions();
window_dim->set_size(1);
window_dim->set_stride(1);
window_dim->set_padding_low(0);
window_dim->set_padding_high(0);
window_dim->set_window_dilation(1);
window_dim->set_base_dilation(1);
}
tf_default_dnums_for_backward_filter_.set_input_batch_dimension(3);
tf_default_dnums_for_backward_filter_.set_input_feature_dimension(0);
tf_default_dnums_for_backward_filter_.add_input_spatial_dimensions(1);
tf_default_dnums_for_backward_filter_.add_input_spatial_dimensions(2);
tf_default_dnums_for_backward_filter_.set_kernel_input_feature_dimension(0);
tf_default_dnums_for_backward_filter_.set_kernel_output_feature_dimension(
3);
tf_default_dnums_for_backward_filter_.add_kernel_spatial_dimensions(1);
tf_default_dnums_for_backward_filter_.add_kernel_spatial_dimensions(2);
tf_default_dnums_for_backward_filter_.add_output_spatial_dimensions(0);
tf_default_dnums_for_backward_filter_.add_output_spatial_dimensions(1);
tf_default_dnums_for_backward_filter_.set_output_batch_dimension(2);
tf_default_dnums_for_backward_filter_.set_output_feature_dimension(3);
tf_default_dnums_for_backward_input_.set_input_batch_dimension(0);
tf_default_dnums_for_backward_input_.set_output_batch_dimension(0);
tf_default_dnums_for_backward_input_.set_input_feature_dimension(3);
tf_default_dnums_for_backward_input_.set_output_feature_dimension(3);
tf_default_dnums_for_backward_input_.add_input_spatial_dimensions(1);
tf_default_dnums_for_backward_input_.add_output_spatial_dimensions(1);
tf_default_dnums_for_backward_input_.add_input_spatial_dimensions(2);
tf_default_dnums_for_backward_input_.add_output_spatial_dimensions(2);
tf_default_dnums_for_backward_input_.set_kernel_input_feature_dimension(3);
tf_default_dnums_for_backward_input_.set_kernel_output_feature_dimension(2);
tf_default_dnums_for_backward_input_.add_kernel_spatial_dimensions(0);
tf_default_dnums_for_backward_input_.add_kernel_spatial_dimensions(1);
}
protected:
const se::GpuComputeCapability& GetComputeCapability() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.gpu_compute_capability();
}
bool RunPass(HloModule* module) {
return ConvRewriter(GetComputeCapability()).Run(module).value();
}
Window default_conv_window_;
ConvolutionDimensionNumbers tf_default_dnums_for_backward_filter_;
ConvolutionDimensionNumbers tf_default_dnums_for_backward_input_;
};
TEST_F(ConvRewriterTest, BackwardFilterConvolve) {
HloComputation::Builder builder(TestName());
HloInstruction* activations =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {1, 1, 3, 1}), "activations"));
HloInstruction* gradients =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {1, 1, 2, 1}), "gradients"));
Window conv_window = default_conv_window_;
conv_window.mutable_dimensions(1)->set_size(2);
conv_window.mutable_dimensions(1)->set_window_dilation(2);
auto* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeInference::InferConvolveShape(
activations->shape(), gradients->shape(), 1,
1, conv_window,
tf_default_dnums_for_backward_filter_,
std::nullopt)
.value(),
activations, gradients, 1,
1, conv_window,
tf_default_dnums_for_backward_filter_, DefaultPrecisionConfig(2)));
OpMetadata metadata;
metadata.set_op_name("foo");
conv->set_metadata(metadata);
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
ASSERT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardFilterCallTarget}), 0)));
const auto& md_after_opt =
entry_computation->root_instruction()->operand(0)->metadata();
EXPECT_TRUE(protobuf_util::ProtobufEquals(md_after_opt, metadata))
<< md_after_opt.DebugString() << " vs " << metadata.DebugString();
}
TEST_F(ConvRewriterTest, BackwardFilterConvolveEquivalentToForwardConvolution) {
HloComputation::Builder builder(TestName());
HloInstruction* activations =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {1, 1, 3, 1}), "activations"));
HloInstruction* gradients =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {1, 1, 3, 1}), "gradients"));
Window conv_window = default_conv_window_;
conv_window.mutable_dimensions(1)->set_size(3);
builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeInference::InferConvolveShape(
activations->shape(), gradients->shape(), 1,
1, conv_window,
tf_default_dnums_for_backward_filter_,
std::nullopt)
.value(),
activations, gradients, 1,
1, conv_window,
tf_default_dnums_for_backward_filter_, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
EXPECT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvForwardCallTarget}), 0)));
}
TEST_F(ConvRewriterTest, BackwardFilterConvolveWithPaddedActivations) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* activations =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {20, 35, 35, 32}), "activations"));
HloInstruction* gradients =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {20, 35, 35, 32}), "gradients"));
Window conv_window = default_conv_window_;
for (int i = 0; i < 2; ++i) {
conv_window.mutable_dimensions(i)->set_size(35);
conv_window.mutable_dimensions(i)->set_padding_low(1);
conv_window.mutable_dimensions(i)->set_padding_high(1);
}
builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeUtil::MakeShape(F32, {32, 3, 3, 32}), activations, gradients,
1, 1, conv_window,
tf_default_dnums_for_backward_filter_, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
EXPECT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardFilterCallTarget}), 0)));
}
TEST_F(ConvRewriterTest, BackwardFilterConvolveWithPaddedGradients) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* activations =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {20, 10, 10, 192}), "activations"));
HloInstruction* gradients =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {20, 4, 4, 320}), "gradients"));
Window conv_window = default_conv_window_;
for (int i = 0; i < 2; ++i) {
conv_window.mutable_dimensions(i)->set_size(4);
conv_window.mutable_dimensions(i)->set_padding_high(-1);
conv_window.mutable_dimensions(i)->set_window_dilation(2);
}
builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeUtil::MakeShape(F32, {320, 3, 3, 192}), activations, gradients,
1, 1, conv_window,
tf_default_dnums_for_backward_filter_, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
EXPECT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardFilterCallTarget}), 0)));
}
TEST_F(ConvRewriterTest, BackwardFilterConvolveWithUnevenPadding) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* activations =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {20, 35, 35, 32}), "activations"));
HloInstruction* gradients =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {20, 35, 35, 32}), "gradients"));
Window conv_window = default_conv_window_;
for (int i = 0; i < 2; ++i) {
conv_window.mutable_dimensions(i)->set_size(35);
conv_window.mutable_dimensions(i)->set_padding_high(1);
}
builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeUtil::MakeShape(F32, {32, 2, 2, 32}), activations, gradients,
1, 1, conv_window,
tf_default_dnums_for_backward_filter_, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
EXPECT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardFilterCallTarget}), 0)));
}
TEST_F(ConvRewriterTest, BackwardInputConvolveEvenPadding) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* output =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {4, 5, 16, 16}), "output"));
HloInstruction* kernel =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {5, 3, 7, 7}), "kernel"));
HloInstruction* reverse_kernel = builder.AddInstruction(
HloInstruction::CreateReverse(kernel->shape(), kernel, {2, 3}));
Window conv_window = default_conv_window_;
for (int i = 0; i < 2; ++i) {
conv_window.mutable_dimensions(i)->set_size(7);
conv_window.mutable_dimensions(i)->set_padding_low(3);
conv_window.mutable_dimensions(i)->set_padding_high(3);
}
ConvolutionDimensionNumbers conv_dnums;
conv_dnums.set_input_batch_dimension(0);
conv_dnums.set_output_batch_dimension(0);
conv_dnums.set_input_feature_dimension(1);
conv_dnums.set_output_feature_dimension(1);
conv_dnums.add_input_spatial_dimensions(2);
conv_dnums.add_output_spatial_dimensions(2);
conv_dnums.add_input_spatial_dimensions(3);
conv_dnums.add_output_spatial_dimensions(3);
conv_dnums.set_kernel_input_feature_dimension(0);
conv_dnums.set_kernel_output_feature_dimension(1);
conv_dnums.add_kernel_spatial_dimensions(2);
conv_dnums.add_kernel_spatial_dimensions(3);
HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeUtil::MakeShape(F32, {4, 3, 16, 16}), output,
reverse_kernel, 1,
1, conv_window, conv_dnums,
DefaultPrecisionConfig(2)));
CHECK(ShapeUtil::Compatible(
conv->shape(),
ShapeInference::InferConvolveShape(
output->shape(), reverse_kernel->shape(),
1, 1, conv_window,
conv_dnums, std::nullopt)
.value()));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
ASSERT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardInputCallTarget}), 0)));
const HloInstruction* custom_call =
entry_computation->root_instruction()->operand(0);
for (int i = 0; i < 2; ++i) {
const WindowDimension& window_dim = custom_call->window().dimensions(i);
EXPECT_EQ(3, window_dim.padding_low());
EXPECT_EQ(3, window_dim.padding_high());
EXPECT_EQ(1, window_dim.stride());
EXPECT_EQ(1, window_dim.base_dilation());
}
}
TEST_F(ConvRewriterTest, BackwardInputConvolve1x1Filter) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* output =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {1, 1, 3, 1}), "output"));
HloInstruction* kernel =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {1, 1, 1, 1}), "kernel"));
Window conv_window = default_conv_window_;
conv_window.mutable_dimensions(1)->set_base_dilation(2);
builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeInference::InferConvolveShape(
output->shape(), kernel->shape(),
1,
1, conv_window,
tf_default_dnums_for_backward_input_,
std::nullopt)
.value(),
output, kernel, 1,
1, conv_window,
tf_default_dnums_for_backward_input_, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
EXPECT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardInputCallTarget}), 0)));
}
TEST_F(ConvRewriterTest,
BackwardInputConvolve1x1FilterEquivalentToForwardConvolve) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* output =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {1, 1, 3, 1}), "output"));
HloInstruction* kernel =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {1, 1, 1, 1}), "kernel"));
builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeInference::InferConvolveShape(
output->shape(), kernel->shape(), 1,
1, default_conv_window_,
tf_default_dnums_for_backward_input_,
std::nullopt)
.value(),
output, kernel, 1,
1, default_conv_window_,
tf_default_dnums_for_backward_input_, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
EXPECT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvForwardCallTarget}), 0)));
}
TEST_F(ConvRewriterTest, BackwardInputConvolveUnevenPaddingOnGradients) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* output =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {20, 4, 4, 320}), "output"));
HloInstruction* kernel =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {3, 3, 192, 320}), "kernel"));
HloInstruction* reverse_kernel = builder.AddInstruction(
HloInstruction::CreateReverse(kernel->shape(), kernel, {0, 1}));
Window conv_window = default_conv_window_;
for (int i = 0; i < 2; ++i) {
conv_window.mutable_dimensions(i)->set_size(3);
conv_window.mutable_dimensions(i)->set_padding_low(2);
conv_window.mutable_dimensions(i)->set_padding_high(3);
conv_window.mutable_dimensions(i)->set_base_dilation(2);
}
HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeUtil::MakeShape(F32, {20, 10, 10, 192}), output, reverse_kernel,
1, 1, conv_window,
tf_default_dnums_for_backward_input_, DefaultPrecisionConfig(2)));
CHECK(ShapeUtil::Compatible(
conv->shape(), ShapeInference::InferConvolveShape(
output->shape(), reverse_kernel->shape(),
1, 1,
conv_window, tf_default_dnums_for_backward_input_,
std::nullopt)
.value()));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
ASSERT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardInputCallTarget}), 0)));
const HloInstruction* custom_call =
entry_computation->root_instruction()->operand(0);
for (int i = 0; i < 2; ++i) {
const WindowDimension& window_dim = custom_call->window().dimensions(i);
EXPECT_EQ(0, window_dim.padding_low());
EXPECT_EQ(0, window_dim.padding_high());
EXPECT_EQ(2, window_dim.stride());
EXPECT_EQ(1, window_dim.base_dilation());
}
}
TEST_F(ConvRewriterTest, BackwardInputConvolveLowPaddingTooLarge) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* output =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {20, 4, 4, 320}), "output"));
HloInstruction* kernel =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {3, 3, 192, 320}), "kernel"));
HloInstruction* reverse_kernel = builder.AddInstruction(
HloInstruction::CreateReverse(kernel->shape(), kernel, {0, 1}));
Window conv_window = default_conv_window_;
for (int i = 0; i < 2; ++i) {
conv_window.mutable_dimensions(i)->set_size(3);
conv_window.mutable_dimensions(i)->set_padding_low(3);
conv_window.mutable_dimensions(i)->set_padding_high(2);
conv_window.mutable_dimensions(i)->set_base_dilation(2);
}
HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeUtil::MakeShape(F32, {20, 10, 10, 192}), output, reverse_kernel,
1, 1, conv_window,
tf_default_dnums_for_backward_input_, DefaultPrecisionConfig(2)));
CHECK(ShapeUtil::Compatible(
conv->shape(), ShapeInference::InferConvolveShape(
output->shape(), reverse_kernel->shape(),
1, 1,
conv_window, tf_default_dnums_for_backward_input_,
std::nullopt)
.value()));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
EXPECT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvForwardCallTarget}), 0)));
}
TEST_F(ConvRewriterTest, BackwardInputConvolveUnevenPaddingOnActivations) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* output =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {1, 1, 7, 1}), "output"));
HloInstruction* kernel =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {1, 3, 1, 1}), "kernel"));
HloInstruction* reverse_kernel = builder.AddInstruction(
HloInstruction::CreateReverse(kernel->shape(), kernel, {0, 1}));
Window conv_window = default_conv_window_;
WindowDimension* forward_conv_col_dim = conv_window.mutable_dimensions(1);
forward_conv_col_dim->set_size(3);
forward_conv_col_dim->set_padding_low(2);
forward_conv_col_dim->set_padding_high(1);
forward_conv_col_dim->set_base_dilation(2);
HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeUtil::MakeShape(F32, {1, 1, 14, 1}), output, reverse_kernel,
1, 1, conv_window,
tf_default_dnums_for_backward_input_, DefaultPrecisionConfig(2)));
CHECK(ShapeUtil::Compatible(
conv->shape(), ShapeInference::InferConvolveShape(
output->shape(), reverse_kernel->shape(),
1, 1,
conv_window, tf_default_dnums_for_backward_input_,
std::nullopt)
.value()));
auto module = CreateNewVerifiedModule();
const HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
ASSERT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardInputCallTarget}), 0)));
const WindowDimension& backward_conv_col_dim =
entry_computation->root_instruction()->operand(0)->window().dimensions(1);
EXPECT_EQ(0, backward_conv_col_dim.padding_low());
EXPECT_EQ(1, backward_conv_col_dim.padding_high());
}
TEST_F(ConvRewriterTest,
BackwardInputConvolveNegativePaddingHighOnActivations) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* output =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {1, 1, 3, 1}), "output"));
HloInstruction* kernel =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {1, 2, 1, 1}), "kernel"));
HloInstruction* reverse_kernel = builder.AddInstruction(
HloInstruction::CreateReverse(kernel->shape(), kernel, {0, 1}));
Window conv_window = default_conv_window_;
WindowDimension* forward_conv_col_dim = conv_window.mutable_dimensions(1);
forward_conv_col_dim->set_size(2);
forward_conv_col_dim->set_padding_high(2);
HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeUtil::MakeShape(F32, {1, 1, 4, 1}), output, reverse_kernel,
1, 1, conv_window,
tf_default_dnums_for_backward_input_, DefaultPrecisionConfig(2)));
CHECK(ShapeUtil::Compatible(
conv->shape(), ShapeInference::InferConvolveShape(
output->shape(), reverse_kernel->shape(),
1, 1,
conv_window, tf_default_dnums_for_backward_input_,
std::nullopt)
.value()));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
EXPECT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvForwardCallTarget}), 0)));
}
TEST_F(ConvRewriterTest, BackwardInputConvolveConstantFilter) {
Array4D<float> constant_arr(4, 4, 2, 2);
constant_arr.FillIota(0);
std::string constant_str =
LiteralUtil::CreateR4FromArray4D(constant_arr).ToStringWithoutShape();
const std::string module_str = absl::StrFormat(R"(
HloModule test
ENTRY entry_computation {
param0 = f32[128,2,16,16]{3,2,1,0} parameter(0)
constant = f32[4,4,2,2]{3,2,1,0} constant(%s)
ROOT convolution = f32[128,2,32,32]{3,2,1,0} convolution(param0, constant),
window={size=4x4 pad=2_2x2_2 lhs_dilate=2x2},
dim_labels=bf01_01oi->bf01, feature_group_count=1
})",
constant_str);
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
EXPECT_TRUE(RunPass(m.get()));
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardInputCallTarget},
m::Parameter(), m::Reverse(m::Constant())),
0)));
}
TEST_F(ConvRewriterTest, TestBackwardFilterPatternMatch) {
const std::string module_str = absl::StrFormat(R"(
HloModule Test
ENTRY Test {
input = f32[8,120,256,256] parameter(0)
filter = f32[8,120,256,256] parameter(1)
ROOT conv = f32[120,120,3,3] convolution(input, filter), window={size=256x256 pad=1_1x1_1}, dim_labels=fb01_io01->fb01
})");
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
EXPECT_TRUE(RunPass(m.get()));
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardFilterCallTarget},
m::Parameter(0), m::Parameter(1)),
0)));
}
TEST_F(ConvRewriterTest, TestBackwardFilterPatternNoMatch) {
const std::string module_str = absl::StrFormat(R"(
HloModule Test
ENTRY Test {
input = f32[8,128,2,32] parameter(0)
filter = f32[3,3,128,128] parameter(1)
ROOT conv = f32[8,128,2,32] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01
})");
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
EXPECT_TRUE(RunPass(m.get()));
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvForwardCallTarget}, m::Parameter(0),
m::Parameter(1)),
0)));
}
TEST_F(ConvRewriterTest, TestConv1dBackwardFilterPatternMatch) {
const std::string module_str = absl::StrFormat(R"(
HloModule Test
ENTRY Test {
input = f32[8,256,128] parameter(0)
filter = f32[8,254,128] parameter(1)
reshape.1 = f32[8,1,256,128] reshape(input)
reshape.2 = f32[8,1,254,128] reshape(filter)
ROOT conv = f32[1,3,128,128] convolution(reshape.1, reshape.2), window={size=1x254}, dim_labels=f01b_i01o->01bf
})");
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
EXPECT_TRUE(RunPass(m.get()));
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardFilterCallTarget},
m::Reshape(), m::Reshape()),
0)));
}
TEST_F(ConvRewriterTest, TestConv1dBackwardInputPatternMatch) {
const std::string module_str = absl::StrFormat(R"(
HloModule Test
ENTRY Test {
input = f32[8,254,128] parameter(0)
filter = f32[3,128,128] parameter(1)
reverse = f32[3,128,128] reverse(filter), dimensions={0}
reshape.1 = f32[8,1,254,128] reshape(input)
reshape.2 = f32[1,3,128,128] reshape(reverse)
ROOT conv = f32[8,1,256,128] convolution(reshape.1, reshape.2), window={size=1x3 pad=0_0x2_2}, dim_labels=b01f_01oi->b01f
})");
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
EXPECT_TRUE(RunPass(m.get()));
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardInputCallTarget},
m::Reshape(), m::Reshape()),
0)));
}
TEST_F(ConvRewriterTest, TestInvalidTypes) {
const std::string module_str = absl::StrFormat(R"(
HloModule Test
ENTRY Test {
input = TYPE[1,17,9,9] parameter(0)
filter = TYPE[3,3,17,32] parameter(1)
ROOT conv = TYPE[1,32,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1
})");
for (std::string_view type : {"c64", "c128"}) {
const std::string module_with_type =
absl::StrReplaceAll(module_str, {{"TYPE", type}});
TF_ASSERT_OK_AND_ASSIGN(auto m,
ParseAndReturnVerifiedModule(module_with_type));
absl::Status s = ConvRewriter(GetComputeCapability()).Run(m.get()).status();
EXPECT_THAT(
s, tsl::testing::StatusIs(
absl::StatusCode::kUnimplemented,
::testing::HasSubstr("Convolutions must have floating-point or "
"integral operands/outputs")));
}
std::string module_with_type =
absl::StrReplaceAll(module_str, {{"TYPE", "f8e4m3fn"}});
TF_ASSERT_OK_AND_ASSIGN(auto m,
ParseAndReturnVerifiedModule(module_with_type));
absl::Status s =
ConvRewriter(se::CudaComputeCapability::Ampere()).Run(m.get()).status();
EXPECT_THAT(s, tsl::testing::StatusIs(
absl::StatusCode::kUnimplemented,
::testing::HasSubstr(
"FP8 convolutions are only supported on CUDA "
"GPUs with compute capability at least 9.0")));
s = ConvRewriter(se::RocmComputeCapability{"gfx942"}).Run(m.get()).status();
EXPECT_THAT(s, tsl::testing::StatusIs(
absl::StatusCode::kUnimplemented,
::testing::HasSubstr(
"FP8 convolutions are only supported on CUDA GPUs")));
module_with_type = absl::StrReplaceAll(module_str, {{"TYPE", "f8e4m3fnuz"}});
TF_ASSERT_OK_AND_ASSIGN(m, ParseAndReturnVerifiedModule(module_with_type));
s = ConvRewriter(GetComputeCapability()).Run(m.get()).status();
EXPECT_THAT(s,
tsl::testing::StatusIs(
absl::StatusCode::kUnimplemented,
::testing::HasSubstr("The only FP8 types supported in "
"convolutions are f8e5m2 and f8e4m3")));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/conv_rewriter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/conv_rewriter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c4fda505-d7d6-4f22-b4e5-da8367c55fed | cpp | google/tensorstore | std_array | tensorstore/internal/json_binding/std_array.h | tensorstore/internal/json_binding/std_array_test.cc | #ifndef TENSORSTORE_INTERNAL_JSON_BINDING_STD_ARRAY_H_
#define TENSORSTORE_INTERNAL_JSON_BINDING_STD_ARRAY_H_
#include <stddef.h>
#include <array>
#include <iterator>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json/array.h"
#include "tensorstore/internal/json/json.h"
#include "tensorstore/internal/json/value_as.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_json_binding {
template <bool kDiscardEmpty, typename GetSize, typename SetSize,
typename GetElement, typename ElementBinder>
struct ArrayBinderImpl {
GetSize get_size;
SetSize set_size;
GetElement get_element;
ElementBinder element_binder;
template <typename Loading, typename Options, typename Obj>
absl::Status operator()(Loading is_loading, const Options& options, Obj* obj,
::nlohmann::json* j) const {
::nlohmann::json::array_t* j_arr;
if constexpr (is_loading) {
if constexpr (kDiscardEmpty) {
if (j->is_discarded()) return absl::OkStatus();
}
j_arr = j->get_ptr<::nlohmann::json::array_t*>();
if (!j_arr) {
return internal_json::ExpectedError(*j, "array");
}
const size_t size = j_arr->size();
TENSORSTORE_RETURN_IF_ERROR(
internal::InvokeForStatus(set_size, *obj, size));
} else {
const auto size = get_size(*obj);
if constexpr (kDiscardEmpty) {
if (size == 0) {
*j = ::nlohmann::json(::nlohmann::json::value_t::discarded);
return absl::OkStatus();
}
}
*j = ::nlohmann::json::array_t(size);
j_arr = j->get_ptr<::nlohmann::json::array_t*>();
}
for (size_t i = 0, size = j_arr->size(); i < size; ++i) {
auto&& element = get_element(*obj, i);
TENSORSTORE_RETURN_IF_ERROR(
element_binder(is_loading, options, &element, &(*j_arr)[i]),
MaybeAnnotateStatus(
_, tensorstore::StrCat("Error ",
is_loading ? "parsing" : "converting",
" value at position ", i)));
}
return absl::OkStatus();
}
};
template <typename GetSize, typename SetSize, typename GetElement,
typename ElementBinder = decltype(DefaultBinder<>)>
constexpr auto Array(GetSize get_size, SetSize set_size, GetElement get_element,
ElementBinder element_binder = DefaultBinder<>) {
return ArrayBinderImpl<false, GetSize, SetSize, GetElement, ElementBinder>{
std::move(get_size), std::move(set_size), std::move(get_element),
std::move(element_binder)};
}
template <typename GetSize, typename SetSize, typename GetElement,
typename ElementBinder = decltype(DefaultBinder<>)>
constexpr auto OptionalArray(GetSize get_size, SetSize set_size,
GetElement get_element,
ElementBinder element_binder = DefaultBinder<>) {
return ArrayBinderImpl<true, GetSize, SetSize, GetElement, ElementBinder>{
std::move(get_size), std::move(set_size), std::move(get_element),
std::move(element_binder)};
}
template <typename ElementBinder = decltype(DefaultBinder<>)>
constexpr auto Array(ElementBinder element_binder = DefaultBinder<>) {
return internal_json_binding::Array(
[](auto& c) { return c.size(); },
[](auto& c, size_t size) { c.resize(size); },
[](auto& c, size_t i) -> decltype(auto) { return c[i]; }, element_binder);
}
template <typename ElementBinder = decltype(DefaultBinder<>)>
constexpr auto OptionalArray(ElementBinder element_binder = DefaultBinder<>) {
return internal_json_binding::OptionalArray(
[](auto& c) { return c.size(); },
[](auto& c, size_t size) { c.resize(size); },
[](auto& c, size_t i) -> decltype(auto) { return c[i]; }, element_binder);
}
template <typename ElementBinder = decltype(DefaultBinder<>)>
constexpr auto FixedSizeArray(ElementBinder element_binder = DefaultBinder<>) {
return internal_json_binding::Array(
[](auto& c) { return std::size(c); },
[](auto& c, size_t new_size) {
return internal_json::JsonValidateArrayLength(new_size, std::size(c));
},
[](auto& c, size_t i) -> decltype(auto) { return c[i]; }, element_binder);
}
namespace array_binder {
inline constexpr auto ArrayBinder = [](auto is_loading, const auto& options,
auto* obj, auto* j) -> absl::Status {
return internal_json_binding::Array()(is_loading, options, obj, j);
};
}
namespace fixed_size_array_binder {
inline constexpr auto FixedSizeArrayBinder = [](auto is_loading,
const auto& options, auto* obj,
auto* j) -> absl::Status {
return internal_json_binding::FixedSizeArray()(is_loading, options, obj, j);
};
}
using array_binder::ArrayBinder;
using fixed_size_array_binder::FixedSizeArrayBinder;
template <typename T, typename Allocator>
constexpr inline auto DefaultBinder<std::vector<T, Allocator>> = ArrayBinder;
template <typename T, size_t N>
constexpr inline auto DefaultBinder<std::array<T, N>> = FixedSizeArrayBinder;
template <typename T, std::ptrdiff_t Extent>
constexpr inline auto DefaultBinder<tensorstore::span<T, Extent>> =
FixedSizeArrayBinder;
}
}
#endif | #include "tensorstore/internal/json_binding/std_array.h"
#include <array>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/util/status_testutil.h"
using ::tensorstore::MatchesStatus;
namespace jb = tensorstore::internal_json_binding;
namespace {
TEST(JsonBindingTest, Array) {
const auto binder = jb::Array();
tensorstore::TestJsonBinderRoundTrip<std::vector<int>>(
{
{{1, 2, 3}, {1, 2, 3}},
},
binder);
tensorstore::TestJsonBinderFromJson<std::vector<int>>(
{
{{1, 2, "a"},
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Error parsing value at position 2: Expected integer .*")},
},
binder);
}
TEST(JsonBindingTest, FixedSizeArray) {
const auto binder = jb::FixedSizeArray();
tensorstore::TestJsonBinderRoundTrip<std::array<int, 3>>(
{
{{{1, 2, 3}}, {1, 2, 3}},
},
binder);
tensorstore::TestJsonBinderFromJson<std::array<int, 3>>(
{
{{1, 2, 3, 4},
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Array has length 4 but should have length 3")},
},
binder);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/std_array.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/std_array_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
81a22c35-8b49-46f0-88b8-0be2f7b1e6bb | cpp | google/arolla | bitmap | arolla/dense_array/bitmap.cc | arolla/dense_array/bitmap_test.cc | #include "arolla/dense_array/bitmap.h"
#include <algorithm>
#include <cstdint>
#include <cstring>
#include <utility>
#include "absl/log/check.h"
#include "arolla/util/bits.h"
namespace arolla::bitmap {
bool AreAllBitsSet(const Word* bitmap, int64_t bitCount) {
while (bitCount >= kWordBitCount) {
if (*bitmap != kFullWord) return false;
bitmap++;
bitCount -= kWordBitCount;
}
if (bitCount > 0) {
auto mask = kFullWord >> (kWordBitCount - bitCount);
return (*bitmap & mask) == mask;
}
return true;
}
int64_t CountBits(const Bitmap& bitmap, int64_t offset, int64_t size) {
DCHECK_GE(size, 0);
const int64_t begin = std::max<int64_t>(
0, std::min<int64_t>(bitmap.size() * kWordBitCount, offset));
const int64_t end = std::max<int64_t>(
begin, std::min<int64_t>(bitmap.size() * kWordBitCount, offset + size));
return size - (end - begin) +
GetOnesCountInRange(bitmap.span().data(), begin, end);
}
void AlmostFullBuilder::CreateFullBitmap() {
Bitmap::Builder bldr(BitmapSize(bit_count_), factory_);
auto span = bldr.GetMutableSpan();
bitmap_ = span.begin();
std::memset(bitmap_, 0xff, span.size() * sizeof(Word));
int64_t last_bits = bit_count_ & (kWordBitCount - 1);
if (last_bits != 0) {
span.back() &= ((Word{1} << last_bits) - 1);
}
bitmap_buffer_ = std::move(bldr).Build();
}
} | #include "arolla/dense_array/bitmap.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/random/distributions.h"
#include "absl/random/random.h"
#include "absl/types/span.h"
#include "arolla/memory/buffer.h"
namespace arolla::bitmap {
namespace {
TEST(BitmapTest, BitmapSize) {
EXPECT_EQ(BitmapSize(0), 0);
EXPECT_EQ(BitmapSize(1), 1);
EXPECT_EQ(BitmapSize(32), 1);
EXPECT_EQ(BitmapSize(33), 2);
EXPECT_EQ(BitmapSize(320), 10);
EXPECT_EQ(BitmapSize(351), 11);
}
TEST(BitmapTest, SetBit) {
Word bitmap[3] = {0, kFullWord, 0};
SetBit(bitmap, 3);
UnsetBit(bitmap, 32);
SetBit(bitmap, 64);
UnsetBit(bitmap, 65);
EXPECT_EQ(bitmap[0], 8);
EXPECT_EQ(bitmap[1], kFullWord - 1);
EXPECT_EQ(bitmap[2], 1);
}
TEST(BitmapTest, GetBit) {
Word bitmap[3] = {8, kFullWord - 1, 1};
EXPECT_TRUE(GetBit(bitmap, 3));
EXPECT_FALSE(GetBit(bitmap, 32));
EXPECT_TRUE(GetBit(bitmap, 64));
EXPECT_FALSE(GetBit(bitmap, 65));
}
TEST(BitmapTest, AreAllBitsSet) {
Word bitmap[4] = {kFullWord, kFullWord, 3, kFullWord};
EXPECT_TRUE(AreAllBitsSet(bitmap, 64));
EXPECT_TRUE(AreAllBitsSet(bitmap, 65));
EXPECT_TRUE(AreAllBitsSet(bitmap, 66));
EXPECT_FALSE(AreAllBitsSet(bitmap, 67));
EXPECT_FALSE(AreAllBitsSet(bitmap, 128));
}
TEST(BitmapTest, AreAllBitsUnset) {
Word bitmap[4] = {0, 0, 12};
EXPECT_TRUE(AreAllBitsUnset(bitmap, 0));
EXPECT_TRUE(AreAllBitsUnset(bitmap, 64));
EXPECT_TRUE(AreAllBitsUnset(bitmap, 65));
EXPECT_TRUE(AreAllBitsUnset(bitmap, 66));
EXPECT_FALSE(AreAllBitsUnset(bitmap, 67));
EXPECT_FALSE(AreAllBitsUnset(bitmap, 95));
EXPECT_FALSE(AreAllBitsUnset(bitmap, 96));
}
TEST(BitmapTest, Empty) {
Bitmap bitmap;
EXPECT_EQ(GetWord(bitmap, 0), kFullWord);
EXPECT_EQ(GetWord(bitmap, 13), kFullWord);
EXPECT_EQ(GetWordWithOffset(bitmap, 0, 7), kFullWord);
EXPECT_EQ(GetWordWithOffset(bitmap, 13, 7), kFullWord);
EXPECT_TRUE(GetBit(bitmap, 0));
EXPECT_TRUE(GetBit(bitmap, 1));
EXPECT_TRUE(GetBit(bitmap, 999));
int64_t count = 0;
auto check_fn = [&](bool v) {
count++;
EXPECT_TRUE(v);
};
Iterate(bitmap, 0, 0, check_fn);
EXPECT_EQ(count, 0);
Iterate(bitmap, 2, 17, check_fn);
EXPECT_EQ(count, 17);
count = 0;
Iterate(bitmap, 99, 138, check_fn);
EXPECT_EQ(count, 138);
}
TEST(BitmapTest, CreateEmpty) {
for (int64_t size = 0; size < (1 << 20); size = (size + 1) * 2) {
Bitmap bitmap = CreateEmptyBitmap(size);
for (int64_t i = 0; i < BitmapSize(size); ++i) {
EXPECT_EQ(GetWord(bitmap, i), 0);
}
for (int64_t i = 0; i < size; ++i) {
EXPECT_FALSE(GetBit(bitmap, i));
}
EXPECT_TRUE(AreAllBitsUnset(bitmap.span().data(), size));
}
}
TEST(BitmapTest, Iterate) {
Bitmap bitmap = CreateBuffer<Word>({0xffff4321, 0x0, 0xf0f0f0f0, 0xffffffff});
EXPECT_EQ(GetWord(bitmap, 0), 0xffff4321);
EXPECT_EQ(GetWord(bitmap, 2), 0xf0f0f0f0);
EXPECT_EQ(GetWordWithOffset(bitmap, 0, 0), 0xffff4321);
EXPECT_EQ(GetWordWithOffset(bitmap, 0, 31), 0x1);
EXPECT_EQ(GetWordWithOffset(bitmap, 2, 8), 0xfff0f0f0);
EXPECT_TRUE(GetBit(bitmap, 0));
EXPECT_FALSE(GetBit(bitmap, 1));
EXPECT_TRUE(GetBit(bitmap, 31));
EXPECT_FALSE(GetBit(bitmap, 32));
EXPECT_FALSE(GetBit(bitmap, 67));
EXPECT_TRUE(GetBit(bitmap, 68));
EXPECT_TRUE(GetBit(bitmap, 127));
int64_t bit = 0;
std::unique_ptr<int> x;
auto check_fn = [&, x(std::move(x))](bool v) {
EXPECT_EQ(v, GetBit(bitmap, bit));
bit++;
};
Iterate(bitmap, 0, 0, check_fn);
EXPECT_EQ(bit, 0);
Iterate(bitmap, 0, 17, check_fn);
EXPECT_EQ(bit, 17);
Iterate(bitmap, 17, 32, check_fn);
EXPECT_EQ(bit, 17 + 32);
Iterate(bitmap, 17 + 32, 69, check_fn);
EXPECT_EQ(bit, 17 + 32 + 69);
}
TEST(BitmapTest, Intersect) {
Bitmap b1 = CreateBuffer<Word>({0xffff4321, 0x0, 0xf0f0f0f0, 0xffffffff});
Bitmap b2 = CreateBuffer<Word>({0x43214321, 0x1, 0x0f0ff0f0, 0xffffffff});
Bitmap b3 =
CreateBuffer<Word>({0x43214321, 0x1, 0x0f0ff0f0, 0xffffffff, 0x8});
{
std::vector<Word> res(4);
Intersect(b1, b2, {res.data(), res.size()});
EXPECT_THAT(res, testing::ElementsAre(0x43214321, 0x0, 0xf0f0, 0xffffffff));
}
{
std::vector<Word> res(4);
Intersect(b1, b2, 5, 5, {res.data(), res.size()});
EXPECT_THAT(res, testing::ElementsAre(0x43214321, 0x0, 0xf0f0, 0xffffffff));
}
{
std::vector<Word> res(4);
Intersect(b1, b3, 4, 8, {res.data(), res.size()});
EXPECT_THAT(res,
testing::ElementsAre(0x14320020, 0x0, 0xf0f0f000, 0x8fffffff));
}
{
std::vector<Word> res(4);
Intersect(b3, b1, 8, 4, {res.data(), res.size()});
EXPECT_THAT(res,
testing::ElementsAre(0x14320020, 0x0, 0xf0f0f000, 0x8fffffff));
}
}
TEST(CountBits, Trivial) {
const std::vector<uint32_t> bitmap = {1664460009U, 1830791933U, 2649253042U,
1615775603U};
const auto bit = [&](int64_t i) { return (bitmap[i / 32] >> (i % 32)) & 1; };
const auto bitmap_buffer = CreateBuffer(bitmap);
const int64_t n = 32 * bitmap.size();
for (int64_t i = 0; i <= n; ++i) {
int64_t count = 0;
for (int64_t j = i; j < n; ++j) {
ASSERT_EQ(count, CountBits(bitmap_buffer, i, j - i)) << i << ' ' << j;
count += bit(j);
}
ASSERT_EQ(count, CountBits(bitmap_buffer, i, n - i));
}
}
TEST(CountBits, OutOfRange) {
const auto bitmap_buffer = CreateBuffer({0xffff0000});
ASSERT_EQ(CountBits(bitmap_buffer, -30, 24), 24);
ASSERT_EQ(CountBits(bitmap_buffer, -20, 24), 20);
ASSERT_EQ(CountBits(bitmap_buffer, -10, 24), 10);
ASSERT_EQ(CountBits(bitmap_buffer, -5, 24), 8);
ASSERT_EQ(CountBits(bitmap_buffer, 0, 24), 8);
ASSERT_EQ(CountBits(bitmap_buffer, 5, 24), 13);
ASSERT_EQ(CountBits(bitmap_buffer, 10, 24), 18);
ASSERT_EQ(CountBits(bitmap_buffer, 20, 24), 24);
ASSERT_EQ(CountBits(bitmap_buffer, 30, 24), 24);
ASSERT_EQ(CountBits(bitmap_buffer, 40, 24), 24);
}
TEST(BuilderTest, AddByGroups) {
int64_t size = 16384;
absl::BitGen gen;
std::vector<bool> bits;
Builder bldr(size);
auto add_fn = [&](int) {
bool v = absl::Bernoulli(gen, 0.5);
bits.push_back(v);
return v;
};
for (int64_t remaining_count = size; remaining_count > 0;) {
int64_t count =
std::min(remaining_count, absl::Uniform<int64_t>(gen, 0, 256));
remaining_count -= count;
bldr.AddByGroups(count, [&](int64_t) { return add_fn; });
}
Bitmap bitmap = std::move(bldr).Build();
EXPECT_EQ(size, bits.size());
for (int64_t i = 0; i < bits.size(); ++i) {
EXPECT_EQ(GetBit(bitmap, i), bits[i]);
}
}
TEST(BuilderTest, AddForEachNeverCopyAFunction) {
int cont[1]{0};
{
std::unique_ptr<int> x;
Builder b(1);
b.AddForEach(cont, [x(std::move(x))](int) { return true; });
}
{
std::unique_ptr<int> x;
Builder b(1);
const auto fn = [x(std::move(x))](int) { return true; };
b.AddForEach(cont, fn);
}
{
std::unique_ptr<int> x;
int cnt = 0;
Builder b(1);
auto fn = [&cnt, x(std::move(x))](int) mutable {
++cnt;
return true;
};
b.AddForEach(cont, fn);
EXPECT_EQ(cnt, 1);
}
}
#define TEST_BITS(bitmap_expr, fn, N) \
Bitmap bitmap = bitmap_expr; \
ASSERT_EQ(bitmap.size(), BitmapSize(N)); \
for (int i = 0; i < (N); ++i) { \
ASSERT_EQ(GetBit(bitmap, i), fn(i)) << i << " of " << N; \
}
TEST(BuilderTest, AddForEachSingle) {
constexpr int kMaxN = 1000;
std::vector<int> v(kMaxN);
for (int n = 0; n < kMaxN; ++n) {
v[n] = n;
}
auto is_5_divisible = [](int x) { return x % 5 == 0; };
for (int n = 2; n < kMaxN; ++n) {
{
Builder b(n);
b.AddForEach(std::vector(v.begin(), v.begin() + n), is_5_divisible);
TEST_BITS(std::move(b).Build(), is_5_divisible, n);
}
{
Builder b(n);
b.AddForEach(absl::MakeConstSpan(v.data(), n), is_5_divisible);
TEST_BITS(std::move(b).Build(), is_5_divisible, n);
}
}
}
TEST(BuilderTest, AddForEachMany) {
constexpr int kMaxN = 4027;
std::vector<int> v(kMaxN);
for (int n = 0; n < kMaxN; ++n) {
v[n] = n;
}
auto is_5_divisible = [](int x) { return x % 5 == 0; };
Builder b(kMaxN);
int beg = 0;
for (int cnt : {2, 3, 4, 6, 9, 13, 18, 27, 47, 94, 188, 376, 752, kMaxN}) {
b.AddForEach(
absl::MakeConstSpan(v.data() + beg, std::min(cnt, kMaxN - beg)),
is_5_divisible);
beg += cnt;
}
TEST_BITS(std::move(b).Build(), is_5_divisible, kMaxN);
}
TEST(BuilderTest, Full) {
Builder builder(10);
builder.AddForEach(std::vector<int>(10), [](int) { return true; });
EXPECT_TRUE(std::move(builder).Build().empty());
}
TEST(AlmostFullBuilderTest, Full) {
AlmostFullBuilder builder(555);
EXPECT_TRUE(std::move(builder).Build().empty());
}
TEST(AlmostFullBuilderTest, Empty) {
int64_t size = 555;
AlmostFullBuilder builder(size);
for (int64_t i = 0; i < size; ++i) {
builder.AddMissed(i);
}
auto bitmap = std::move(builder).Build();
ASSERT_EQ(bitmap.size(), BitmapSize(size));
EXPECT_TRUE(AreAllBitsUnset(bitmap.span().data(), size));
for (int64_t i = 0; i < size; ++i) {
EXPECT_EQ(GetBit(bitmap, i), 0);
}
}
TEST(AlmostFullBuilderTest, NotFull) {
int64_t size = 555;
AlmostFullBuilder builder(size);
for (int64_t i = 0; i < size; ++i) {
if (i % 5 == 1) builder.AddMissed(i);
}
auto bitmap = std::move(builder).Build();
EXPECT_EQ(bitmap.size(), BitmapSize(size));
for (int64_t i = 0; i < size; ++i) {
EXPECT_EQ(GetBit(bitmap, i), i % 5 != 1);
}
}
TEST(AlmostFullBuilderTest, EmptyThanFull) {
int64_t size = 155;
for (int64_t split_point = 1; split_point < size; ++split_point) {
AlmostFullBuilder builder(size);
for (int64_t i = 0; i < split_point; ++i) {
builder.AddMissed(i);
}
auto bitmap = std::move(builder).Build();
EXPECT_EQ(bitmap.size(), BitmapSize(size));
for (int64_t i = 0; i < size; ++i) {
ASSERT_EQ(GetBit(bitmap, i), i >= split_point) << i << " " << split_point;
}
}
}
TEST(AlmostFullBuilderTest, EmptyConsequentlyAtStartAndAFewMissed) {
int64_t size = 155;
int64_t split_point = 71;
AlmostFullBuilder builder(size);
for (int64_t i = 0; i < split_point; ++i) {
builder.AddMissed(i);
}
builder.AddMissed(93);
builder.AddMissed(107);
auto bitmap = std::move(builder).Build();
EXPECT_EQ(bitmap.size(), BitmapSize(size));
for (int64_t i = 0; i < size; ++i) {
bool present = (i >= split_point) && (i != 93) && (i != 107);
ASSERT_EQ(GetBit(bitmap, i), present) << i;
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/dense_array/bitmap.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/dense_array/bitmap_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
c004a04a-45b9-41d5-9ca1-80bf5198bb3d | cpp | tensorflow/tensorflow | intrusive_ptr | tensorflow/core/platform/intrusive_ptr.h | third_party/xla/third_party/tsl/tsl/platform/intrusive_ptr_test.cc | #ifndef TENSORFLOW_CORE_PLATFORM_INTRUSIVE_PTR_H_
#define TENSORFLOW_CORE_PLATFORM_INTRUSIVE_PTR_H_
#include <algorithm>
#include "tsl/platform/intrusive_ptr.h"
namespace tensorflow {
namespace core {
template <class T>
using IntrusivePtr = tsl::core::IntrusivePtr<T>;
}
}
#endif | #include "tsl/platform/intrusive_ptr.h"
#include "tsl/platform/refcount.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace core {
namespace {
TEST(IntrusivePtr, ConstructorAddRefFalse) {
auto ptr = IntrusivePtr<RefCounted>(new RefCounted(), false);
ASSERT_TRUE(ptr->RefCountIsOne());
}
TEST(IntrusivePtr, ConstructorAddRefTrue) {
auto raw = new RefCounted();
auto ptr = IntrusivePtr<RefCounted>(raw, true);
ASSERT_FALSE(raw->RefCountIsOne());
raw->Unref();
ASSERT_TRUE(raw->RefCountIsOne());
}
TEST(IntrusivePtr, CopyConstructor) {
auto ptr1 = IntrusivePtr<RefCounted>(new RefCounted(), false);
auto ptr2 = IntrusivePtr<RefCounted>(ptr1);
ASSERT_FALSE(ptr2->RefCountIsOne());
}
TEST(IntrusivePtr, CopyAssignment) {
auto ptr1 = IntrusivePtr<RefCounted>(new RefCounted(), false);
auto raw = new RefCounted();
auto ptr2 = IntrusivePtr<RefCounted>(raw, true);
ptr2 = ptr1;
ASSERT_EQ(ptr1.get(), ptr2.get());
ASSERT_FALSE(ptr2->RefCountIsOne());
ASSERT_TRUE(raw->RefCountIsOne());
raw->Unref();
}
TEST(IntrusivePtr, CopyAssignmentIntoEmpty) {
auto ptr1 = IntrusivePtr<RefCounted>(new RefCounted(), false);
auto ptr2 = IntrusivePtr<RefCounted>();
ptr2 = ptr1;
ASSERT_FALSE(ptr2->RefCountIsOne());
}
TEST(IntrusivePtr, MoveConstructor) {
auto ptr1 = IntrusivePtr<RefCounted>(new RefCounted(), false);
auto ptr2 = IntrusivePtr<RefCounted>(std::move(ptr1));
ASSERT_TRUE(ptr2->RefCountIsOne());
ASSERT_EQ(ptr1.get(), nullptr);
}
TEST(IntrusivePtr, MoveAssignment) {
auto ptr1 = IntrusivePtr<RefCounted>(new RefCounted(), false);
auto ptr2 = IntrusivePtr<RefCounted>(new RefCounted(), false);
ptr2 = std::move(ptr1);
ASSERT_TRUE(ptr2->RefCountIsOne());
ASSERT_EQ(ptr1.get(), nullptr);
}
TEST(IntrusivePtr, MoveAssignmentIntoEmpty) {
auto ptr1 = IntrusivePtr<RefCounted>(new RefCounted(), false);
auto ptr2 = IntrusivePtr<RefCounted>();
ptr2 = std::move(ptr1);
ASSERT_TRUE(ptr2->RefCountIsOne());
ASSERT_EQ(ptr1.get(), nullptr);
}
TEST(IntrusivePtr, MoveAssignmentAlias) {
auto ptr = IntrusivePtr<RefCounted>(new RefCounted(), false);
auto& ptr_alias = ptr;
ptr = std::move(ptr_alias);
ASSERT_TRUE(ptr->RefCountIsOne());
}
TEST(IntrusivePtr, Reset) {
auto ptr = IntrusivePtr<RefCounted>(new RefCounted(), false);
ptr.reset(new RefCounted(), false);
ASSERT_TRUE(ptr->RefCountIsOne());
}
TEST(IntrusivePtr, ResetIntoEmpty) {
auto ptr = IntrusivePtr<RefCounted>();
ptr.reset(new RefCounted(), false);
ASSERT_TRUE(ptr->RefCountIsOne());
}
TEST(IntrusivePtr, ResetAlias) {
auto ptr = IntrusivePtr<RefCounted>(new RefCounted(), false);
ASSERT_TRUE(ptr->RefCountIsOne());
ptr.reset(ptr.get(), false);
ASSERT_TRUE(ptr->RefCountIsOne());
}
TEST(IntrusivePtr, ResetRefBeforeUnref) {
class Foo : public RefCounted {
public:
explicit Foo(char label, Foo* ptr = nullptr)
: label_(label), ptr_(ptr, false) {}
char label_;
IntrusivePtr<Foo> ptr_;
};
IntrusivePtr<Foo> x(new Foo{'a', new Foo{'b', new Foo{'c'}}}, false);
x->ptr_ = x->ptr_->ptr_;
}
TEST(IntrusivePtr, ResetStealPtrBeforeUnref) {
class Foo : public RefCounted {
public:
explicit Foo(char label, Foo* ptr = nullptr)
: label_(label), ptr_(ptr, false) {}
char label_;
IntrusivePtr<Foo> ptr_;
};
IntrusivePtr<Foo> x(new Foo{'a', new Foo{'b', new Foo{'c'}}}, false);
x->ptr_ = std::move(x->ptr_->ptr_);
}
TEST(IntrusivePtr, Detach) {
auto ptr = IntrusivePtr<RefCounted>(new RefCounted(), false);
ASSERT_TRUE(ptr->RefCountIsOne());
auto raw = ptr.detach();
ASSERT_TRUE(raw->RefCountIsOne());
raw->Unref();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/platform/intrusive_ptr.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/intrusive_ptr_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2503dbb1-5ed5-4d36-981b-77edba1e0db3 | cpp | tensorflow/tensorflow | fuse_binary_into_following_affine | tensorflow/lite/toco/graph_transformations/fuse_binary_into_following_affine.cc | tensorflow/lite/toco/graph_transformations/tests/fuse_binary_into_following_affine_test.cc | #include <algorithm>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/lite/toco/graph_transformations/graph_transformations.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/runtime/types.h"
#include "tensorflow/lite/toco/tooling_util.h"
namespace toco {
namespace {
void FuseAddOrSubParamsIntoFollowingAffine(Model* model, Operator* following_op,
const Operator* add_or_sub_op,
int index_of_constant_input) {
CHECK(add_or_sub_op->type == OperatorType::kAdd ||
add_or_sub_op->type == OperatorType::kSub);
CHECK(index_of_constant_input == 0 || index_of_constant_input == 1);
CHECK(add_or_sub_op->type != OperatorType::kSub ||
index_of_constant_input == 1);
if (following_op->inputs.size() < 3) {
LOG(FATAL) << "Missing bias parameter";
}
const auto& weights = model->GetArray(following_op->inputs[1]);
auto& bias = model->GetArray(following_op->inputs[2]);
bias.minmax = nullptr;
const auto& operand =
model->GetArray(add_or_sub_op->inputs[index_of_constant_input]);
CHECK_EQ(RequiredBufferSizeForShape(operand.shape()), 1);
const float scalar_operand =
operand.GetBuffer<ArrayDataType::kFloat>().data[0];
float add_scalar_operand = 0.f;
if (add_or_sub_op->type == OperatorType::kAdd) {
add_scalar_operand = scalar_operand;
} else if (add_or_sub_op->type == OperatorType::kSub &&
index_of_constant_input == 1) {
add_scalar_operand = -scalar_operand;
} else {
LOG(FATAL) << "Should not get here";
}
const Shape& weights_shape = weights.shape();
const Shape& bias_shape = bias.shape();
const auto& weights_buffer = weights.GetBuffer<ArrayDataType::kFloat>();
const float* const weights_data = weights_buffer.data.data();
auto& bias_buffer = bias.GetMutableBuffer<ArrayDataType::kFloat>();
float* const bias_data = bias_buffer.data.data();
if (following_op->type == OperatorType::kConv ||
following_op->type == OperatorType::kFullyConnected) {
const int output_depth = weights_shape.dims(0);
CHECK_EQ(output_depth, bias_shape.dims(bias_shape.dimensions_count() - 1));
const int weights_size = RequiredBufferSizeForShape(weights_shape);
const int weights_per_depth = weights_size / output_depth;
CHECK_EQ(weights_size, weights_per_depth * output_depth);
for (int d = 0; d < output_depth; d++) {
float accumulation = 0;
for (int i = 0; i < weights_per_depth; i++) {
accumulation +=
add_scalar_operand * weights_data[d * weights_per_depth + i];
}
bias_data[d] += accumulation;
}
} else if (following_op->type == OperatorType::kDepthwiseConv) {
const int output_depth =
weights_shape.dims(weights_shape.dimensions_count() - 1);
const int weights_size = RequiredBufferSizeForShape(weights_shape);
const int weights_per_depth = weights_size / output_depth;
CHECK_EQ(weights_size, weights_per_depth * output_depth);
for (int c = 0; c < output_depth; c++) {
float accumulation = 0;
for (int k = 0; k < weights_per_depth; k++) {
accumulation += add_scalar_operand * weights_data[k * output_depth + c];
}
bias_data[c] += accumulation;
}
} else {
LOG(FATAL) << "Should not get here.";
}
}
void FuseMulOrDivParamsIntoFollowingAffine(Model* model, Operator* following_op,
const Operator* mul_or_div_op,
int index_of_constant_input) {
CHECK(mul_or_div_op->type == OperatorType::kMul ||
mul_or_div_op->type == OperatorType::kDiv);
CHECK(index_of_constant_input == 0 || index_of_constant_input == 1);
CHECK(mul_or_div_op->type != OperatorType::kDiv ||
index_of_constant_input == 1);
const auto& weights_name = following_op->inputs[1];
const auto& bias_name = following_op->inputs[2];
auto& weights = model->GetArray(weights_name);
DropMinMax(model, weights_name);
DropMinMax(model, bias_name);
const auto& operand =
model->GetArray(mul_or_div_op->inputs[index_of_constant_input]);
CHECK_EQ(RequiredBufferSizeForShape(operand.shape()), 1);
const float scalar_operand =
operand.GetBuffer<ArrayDataType::kFloat>().data[0];
float* weights_data =
weights.GetMutableBuffer<ArrayDataType::kFloat>().data.data();
const int weights_size = RequiredBufferSizeForShape(weights.shape());
for (int i = 0; i < weights_size; i++) {
if (mul_or_div_op->type == OperatorType::kMul) {
weights_data[i] *= scalar_operand;
} else if (mul_or_div_op->type == OperatorType::kDiv) {
weights_data[i] /= scalar_operand;
} else {
LOG(FATAL) << "Should not get here";
}
}
}
}
::tensorflow::Status FuseBinaryIntoFollowingAffine::Run(Model* model,
std::size_t op_index,
bool* modified) {
*modified = false;
const auto binary_it = model->operators.begin() + op_index;
auto* binary_op = binary_it->get();
if (binary_op->type != OperatorType::kAdd &&
binary_op->type != OperatorType::kMul &&
binary_op->type != OperatorType::kSub &&
binary_op->type != OperatorType::kDiv) {
return absl::OkStatus();
}
CHECK_EQ(binary_op->inputs.size(), 2);
const bool is_input_constant[2] = {
IsConstantParameterArray(*model, binary_op->inputs[0]),
IsConstantParameterArray(*model, binary_op->inputs[1]),
};
if (!is_input_constant[0] && !is_input_constant[1]) {
return absl::OkStatus();
}
if (is_input_constant[0] && is_input_constant[1]) {
return absl::OkStatus();
}
const int index_of_constant_input = is_input_constant[0] ? 0 : 1;
const int index_of_variable_input = is_input_constant[0] ? 1 : 0;
CHECK(is_input_constant[index_of_constant_input]);
CHECK(!is_input_constant[index_of_variable_input]);
if (binary_op->type == OperatorType::kDiv) {
if (index_of_constant_input != 1) {
AddMessageF("Not fusing %s because the denominator is not constant",
LogName(*binary_op));
return absl::OkStatus();
}
}
const auto& operand_shape =
model->GetArray(binary_op->inputs[index_of_constant_input]).shape();
for (const auto& dim : operand_shape.dims()) {
if (dim > 1) {
AddMessageF(
"Not fusing %s into the following affine op, because we only know "
"how to do so when the constant operand is a scalar",
LogName(*binary_op));
return absl::OkStatus();
}
}
if (binary_op->fused_activation_function !=
FusedActivationFunctionType::kNone) {
AddMessageF("Not fusing %s because it has a fused activation function",
LogName(*binary_op));
return absl::OkStatus();
}
if (CountOpsWithInput(*model, binary_op->outputs[0]) != 1) {
AddMessageF("Not fusing %s because it's consumed by multiple ops",
LogName(*binary_op));
return absl::OkStatus();
}
Operator* following_op = GetOpWithInput(*model, binary_op->outputs[0]);
if (!following_op) {
AddMessageF("Not fusing %s because it is not consumed by any op",
LogName(*binary_op));
return absl::OkStatus();
}
if (following_op->type != OperatorType::kConv &&
following_op->type != OperatorType::kFullyConnected &&
following_op->type != OperatorType::kDepthwiseConv) {
AddMessageF(
"Not fusing %s because the following %s is not of one of the supported "
"types",
LogName(*binary_op), LogName(*following_op));
return absl::OkStatus();
}
if (following_op->inputs.size() < 3) {
AddMessageF(
"Not fusing %s because the following %s does not have a bias vector",
LogName(*following_op), LogName(*binary_op));
return absl::OkStatus();
}
const auto& weights = model->GetArray(following_op->inputs[1]);
const auto& bias = model->GetArray(following_op->inputs[2]);
if (!weights.buffer || !bias.buffer) {
AddMessageF(
"Not fusing %s because the following %s has non-constant weights or "
"bias arrays",
LogName(*binary_op), LogName(*following_op));
return absl::OkStatus();
}
if (binary_op->type == OperatorType::kAdd ||
binary_op->type == OperatorType::kSub) {
if (following_op->type == OperatorType::kConv) {
if (static_cast<ConvOperator*>(following_op)->padding.type !=
PaddingType::kValid) {
AddMessageF(
"Not fusing %s because the following %s does not use VALID padding",
LogName(*binary_op), LogName(*following_op));
return absl::OkStatus();
}
}
if (following_op->type == OperatorType::kDepthwiseConv) {
if (static_cast<DepthwiseConvOperator*>(following_op)->padding.type !=
PaddingType::kValid) {
AddMessageF(
"Not fusing %s because the following %s does not use VALID padding",
LogName(*binary_op), LogName(*following_op));
return absl::OkStatus();
}
}
FuseAddOrSubParamsIntoFollowingAffine(model, following_op, binary_op,
index_of_constant_input);
} else if (binary_op->type == OperatorType::kMul ||
binary_op->type == OperatorType::kDiv) {
FuseMulOrDivParamsIntoFollowingAffine(model, following_op, binary_op,
index_of_constant_input);
} else {
LOG(FATAL) << "should not get here";
}
AddMessageF("Fusing %s into the following %s", LogName(*binary_op),
LogName(*following_op));
model->EraseArray(binary_op->outputs[0]);
following_op->inputs[0] = binary_op->inputs[index_of_variable_input];
DeleteOpAndArrays(model, binary_op);
*modified = true;
return absl::OkStatus();
}
} | #include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/toco/graph_transformations/graph_transformations.h"
#include "tensorflow/lite/toco/model.h"
namespace toco {
namespace {
std::vector<testing::Matcher<float>> ArrayFloatNear(
const std::vector<float>& values, float max_abs_error = 1e-5) {
std::vector<testing::Matcher<float>> matchers;
matchers.reserve(values.size());
for (const float& v : values) {
matchers.emplace_back(testing::FloatNear(v, max_abs_error));
}
return matchers;
}
}
class FuseBinaryIntoFollowingAffineTest : public ::testing::Test {
protected:
FuseBinaryIntoFollowingAffineTest() {}
void SetUp() override { model_ = std::make_unique<Model>(); }
void CreateArray(const std::string& name, const std::vector<int>& shape) {
Array& array = model_->GetOrCreateArray(name);
array.data_type = ArrayDataType::kFloat;
Shape* array_shape = array.mutable_shape();
*(array_shape->mutable_dims()) = shape;
}
void CreateConstantArray(const std::string& name,
const std::vector<int>& shape,
const std::vector<float>& data) {
CreateArray(name, shape);
Array& array = model_->GetOrCreateArray(name);
auto& array_buffer = array.GetMutableBuffer<ArrayDataType::kFloat>();
int bufsize = 1;
for (int dim : shape) {
bufsize *= dim;
}
array_buffer.data.resize(bufsize);
float* buf_ptr = array_buffer.data.data();
for (int i = 0; i < bufsize; ++i) {
buf_ptr[i] = data[i];
}
}
std::unique_ptr<Model> model_;
};
TEST_F(FuseBinaryIntoFollowingAffineTest, FuseMulIntoFullyConnected) {
{
CreateArray("Input", {2, 2});
CreateConstantArray("MulInput2", {1}, {2.0});
CreateArray("MulOutput", {2, 2});
CreateConstantArray("FCWeight", {2, 2}, {1.0, 2.0, 3.0, 4.0});
CreateConstantArray("FCBias", {1}, {1.0});
CreateArray("Output", {2, 2});
auto* mul_op = new MulOperator;
mul_op->inputs = {"Input", "MulInput2"};
mul_op->outputs = {"MulOutput"};
model_->operators.push_back(std::unique_ptr<Operator>(mul_op));
auto* fc_op = new FullyConnectedOperator;
fc_op->inputs = {"MulOutput", "FCWeight", "FCBias"};
fc_op->outputs = {"Output"};
model_->operators.push_back(std::unique_ptr<Operator>(fc_op));
}
toco::FuseBinaryIntoFollowingAffine transformation;
bool modified;
ASSERT_TRUE(transformation.Run(model_.get(), 0, &modified).ok());
EXPECT_TRUE(modified);
ASSERT_EQ(model_->operators.size(), 1);
const auto& op = model_->operators[0];
ASSERT_EQ(op->type, OperatorType::kFullyConnected);
ASSERT_EQ(op->inputs.size(), 3);
auto& weights_array = model_->GetArray(op->inputs[1]);
EXPECT_THAT(weights_array.GetBuffer<toco::ArrayDataType::kFloat>().data,
ElementsAreArray(ArrayFloatNear({2.0, 4.0, 6.0, 8.0})));
auto& bias_array = model_->GetArray(op->inputs[2]);
EXPECT_THAT(bias_array.GetBuffer<toco::ArrayDataType::kFloat>().data,
ElementsAreArray(ArrayFloatNear({1.0})));
}
TEST_F(FuseBinaryIntoFollowingAffineTest, DoNotFuseWithMultipleConsumers) {
{
CreateArray("Input", {2, 2});
CreateConstantArray("MulInput2", {1}, {2.0});
CreateArray("MulOutput", {2, 2});
CreateConstantArray("FCWeight", {2, 2}, {1.0, 2.0, 3.0, 4.0});
CreateConstantArray("FCBias", {1}, {1.0});
CreateArray("Output", {2, 2});
CreateArray("AnotherOutput", {2, 2});
auto* mul_op = new MulOperator;
mul_op->inputs = {"Input", "MulInput2"};
mul_op->outputs = {"MulOutput"};
model_->operators.push_back(std::unique_ptr<Operator>(mul_op));
auto* fc_op = new FullyConnectedOperator;
fc_op->inputs = {"MulOutput", "FCWeight", "FCBias"};
fc_op->outputs = {"Output"};
model_->operators.push_back(std::unique_ptr<Operator>(fc_op));
auto identity_op = new TensorFlowIdentityOperator;
identity_op->inputs = {"MulOutput"};
identity_op->outputs = {"AnotherOutput"};
model_->operators.push_back(std::unique_ptr<Operator>(identity_op));
}
toco::FuseBinaryIntoFollowingAffine transformation;
bool modified;
ASSERT_TRUE(transformation.Run(model_.get(), 0, &modified).ok());
EXPECT_FALSE(modified);
EXPECT_EQ(model_->operators.size(), 3);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/graph_transformations/fuse_binary_into_following_affine.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/graph_transformations/tests/fuse_binary_into_following_affine_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
270ece82-627b-47fa-94a3-20bcccd94c35 | cpp | google/tensorstore | nditerable_elementwise_input_transform | tensorstore/internal/nditerable_elementwise_input_transform.cc | tensorstore/internal/nditerable_elementwise_input_transform_test.cc | #include "tensorstore/internal/nditerable_elementwise_input_transform.h"
#include <stddef.h>
#include <array>
#include "absl/status/status.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_buffer_management.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/internal/unique_with_intrusive_allocator.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal {
namespace {
template <size_t Arity>
class ElementwiseInputTransformNDIterator
: public NDIterator::Base<ElementwiseInputTransformNDIterator<Arity>> {
public:
explicit ElementwiseInputTransformNDIterator(
tensorstore::span<const NDIterable::Ptr, Arity> inputs,
ElementwiseClosure<Arity + 1, void*> closure,
NDIterable::IterationBufferKindLayoutView layout,
ArenaAllocator<> allocator)
: inputs_(inputs, layout, allocator),
context_(closure.context),
elementwise_function_((*closure.function)[layout.buffer_kind]) {}
ArenaAllocator<> get_allocator() const override {
return inputs_.get_allocator();
}
bool GetBlock(tensorstore::span<const Index> indices,
IterationBufferShape block_shape,
IterationBufferPointer* pointer,
absl::Status* status) override {
return inputs_.GetBlock(indices, block_shape, status) &&
InvokeElementwiseFunction<Arity>(
elementwise_function_, context_, block_shape,
inputs_.block_pointers(), *pointer, static_cast<void*>(status));
}
private:
NDIteratorsWithManagedBuffers<Arity> inputs_;
void* context_;
SpecializedElementwiseFunctionPointer<Arity + 1, void*> elementwise_function_;
};
template <size_t Arity>
class ElementwiseInputTransformNDIterable
: public NDIterablesWithManagedBuffers<
std::array<NDIterable::Ptr, Arity>,
NDIterable::Base<ElementwiseInputTransformNDIterable<Arity>>> {
using Base = NDIterablesWithManagedBuffers<
std::array<NDIterable::Ptr, Arity>,
NDIterable::Base<ElementwiseInputTransformNDIterable<Arity>>>;
public:
ElementwiseInputTransformNDIterable(
std::array<NDIterable::Ptr, Arity> input_iterables, DataType output_dtype,
ElementwiseClosure<Arity + 1, void*> closure, ArenaAllocator<> allocator)
: Base{std::move(input_iterables)},
output_dtype_(output_dtype),
closure_(closure),
allocator_(allocator) {}
ArenaAllocator<> get_allocator() const override { return allocator_; }
DataType dtype() const override { return output_dtype_; }
NDIterator::Ptr GetIterator(
NDIterable::IterationBufferKindLayoutView layout) const override {
return MakeUniqueWithVirtualIntrusiveAllocator<
ElementwiseInputTransformNDIterator<Arity>>(allocator_, this->iterables,
closure_, layout);
}
private:
std::array<NDIterable::Ptr, Arity> inputs_;
DataType output_dtype_;
ElementwiseClosure<Arity + 1, void*> closure_;
ArenaAllocator<> allocator_;
};
}
template <size_t Arity>
NDIterable::Ptr GetElementwiseInputTransformNDIterable(
std::array<NDIterable::Ptr, Arity - 1> inputs, DataType output_dtype,
ElementwiseClosure<Arity, void*> closure, Arena* arena) {
return MakeUniqueWithVirtualIntrusiveAllocator<
ElementwiseInputTransformNDIterable<Arity - 1>>(
ArenaAllocator<>(arena), std::move(inputs), output_dtype, closure);
}
#define TENSORSTORE_INTERNAL_DO_INSTANTIATE(Arity) \
template NDIterable::Ptr GetElementwiseInputTransformNDIterable<Arity>( \
std::array<NDIterable::Ptr, Arity - 1> inputs, DataType output_dtype, \
ElementwiseClosure<Arity, void*> closure, Arena * arena); \
TENSORSTORE_INTERNAL_DO_INSTANTIATE(1)
TENSORSTORE_INTERNAL_DO_INSTANTIATE(2)
TENSORSTORE_INTERNAL_DO_INSTANTIATE(3)
TENSORSTORE_INTERNAL_DO_INSTANTIATE(4)
#undef TENSORSTORE_INTERNAL_DO_INSTANTIATE
}
} | #include "tensorstore/internal/nditerable_elementwise_input_transform.h"
#include <new>
#include <tuple>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/nditerable_copy.h"
#include "tensorstore/internal/nditerable_transformed_array.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal::NDIterableCopier;
using ::testing::_;
using ::testing::Pair;
template <typename Func, typename DestArray, typename... SourceArray>
absl::Status TestCopy(Func func, tensorstore::IterationConstraints constraints,
DestArray dest_array, SourceArray... source_array) {
tensorstore::internal::Arena arena;
tensorstore::internal::ElementwiseClosure<sizeof...(SourceArray) + 1, void*>
closure = tensorstore::internal::SimpleElementwiseFunction<
Func(typename SourceArray::Element..., typename DestArray::Element),
void*>::Closure(&func);
auto iterable = tensorstore::internal::GetElementwiseInputTransformNDIterable(
{{tensorstore::internal::GetTransformedArrayNDIterable(source_array,
&arena)
.value()...}},
tensorstore::dtype_v<typename DestArray::Element>, closure, &arena);
return NDIterableCopier(*iterable,
*tensorstore::internal::GetTransformedArrayNDIterable(
dest_array, &arena)
.value(),
dest_array.shape(), constraints, &arena)
.Copy();
}
TEST(NDIterableElementwiseInputTransformTest, Nullary) {
auto dest = tensorstore::AllocateArray<double>({2, 3});
TENSORSTORE_EXPECT_OK(TestCopy([](double* dest, void* arg) { *dest = 42.0; },
{}, dest));
EXPECT_EQ(
tensorstore::MakeArray<double>({{42.0, 42.0, 42.0}, {42.0, 42.0, 42.0}}),
dest);
}
TEST(NDIterableElementwiseInputTransformTest, Unary) {
auto source = tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
auto dest = tensorstore::AllocateArray<double>(source.shape());
TENSORSTORE_EXPECT_OK(TestCopy(
[](const int* source, double* dest, void* arg) { *dest = -*source; },
{}, dest, source));
EXPECT_EQ(
tensorstore::MakeArray<double>({{-1.0, -2.0, -3.0}, {-4.0, -5.0, -6.0}}),
dest);
}
TEST(NDIterableElementwiseInputTransformTest, Binary) {
auto a = tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
auto b = tensorstore::MakeArray<int>({{10, 12, 14}, {16, 18, 20}});
auto dest = tensorstore::AllocateArray<double>(a.shape());
TENSORSTORE_EXPECT_OK(TestCopy([](const int* a, const int* b, double* dest,
void* arg) { *dest = 2.0 * *a + *b; },
{}, dest, a, b));
EXPECT_EQ(
tensorstore::MakeArray<double>({{12.0, 16.0, 20.0}, {24.0, 28.0, 32.0}}),
dest);
}
TEST(NDIterableElementwiseInputTransformTest, Ternary) {
auto a = tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
auto b = tensorstore::MakeArray<int>({{10, 12, 14}, {16, 18, 20}});
auto c = tensorstore::MakeArray<double>({{1, -1, 1}, {-1, -1, 1}});
auto dest = tensorstore::AllocateArray<double>(a.shape());
TENSORSTORE_EXPECT_OK(
TestCopy([](const int* a, const int* b, const double* c, double* dest,
void* arg) { *dest = *a + *b * *c; },
{}, dest, a, b, c));
EXPECT_EQ(
tensorstore::MakeArray<double>({{1 + 10 * 1, 2 + 12 * -1, 3 + 14 * 1},
{4 + 16 * -1, 5 + 18 * -1, 6 + 20 * 1}}),
dest);
}
TEST(NDIterableElementwiseInputTransformTest, PartialCopy) {
auto source = tensorstore::MakeArray<int>({1, 2, 3, 0, 5, 6});
auto dest = tensorstore::AllocateArray<double>(
source.shape(), tensorstore::c_order, tensorstore::value_init);
EXPECT_THAT(TestCopy(
[](const int* source, double* dest, void* arg) {
auto* status = static_cast<absl::Status*>(arg);
if (*source == 0) {
*status = absl::UnknownError("zero");
return false;
}
*dest = -*source;
return true;
},
tensorstore::c_order, dest, source),
MatchesStatus(absl::StatusCode::kUnknown, "zero"));
EXPECT_EQ(tensorstore::MakeArray<double>({-1.0, -2.0, -3.0, 0.0, 0.0, 0.0}),
dest);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/nditerable_elementwise_input_transform.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/nditerable_elementwise_input_transform_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
2bfcf878-eab8-4d00-8e2a-4463f10332fb | cpp | google/arolla | simple_executable | arolla/qexpr/simple_executable.cc | arolla/qexpr/simple_executable_test.cc | #include "arolla/qexpr/simple_executable.h"
#include <memory>
#include "absl/status/status.h"
#include "arolla/memory/frame.h"
#include "arolla/qexpr/bound_operators.h"
#include "arolla/qexpr/eval_context.h"
#include "arolla/qexpr/evaluation_engine.h"
namespace arolla {
void SimpleBoundExpr::InitializeLiterals(EvaluationContext* ctx,
FramePtr frame) const {
RunBoundOperators(init_ops_, ctx, frame);
}
void SimpleBoundExpr::Execute(EvaluationContext* ctx, FramePtr frame) const {
RunBoundOperators(eval_ops_, ctx, frame);
}
void CombinedBoundExpr::InitializeLiterals(EvaluationContext* ctx,
FramePtr frame) const {
for (const auto& e : subexprs_) {
if (e->InitializeLiterals(ctx, frame); !ctx->status().ok()) {
break;
}
}
}
void CombinedBoundExpr::Execute(EvaluationContext* ctx, FramePtr frame) const {
for (const auto& e : subexprs_) {
if (e->Execute(ctx, frame); !ctx->status().ok()) {
break;
}
}
}
} | #include "arolla/qexpr/simple_executable.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status_matchers.h"
#include "arolla/memory/frame.h"
#include "arolla/qexpr/bound_operators.h"
#include "arolla/qexpr/eval_context.h"
#include "arolla/qexpr/evaluation_engine.h"
#include "arolla/qexpr/operators.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/unit.h"
namespace arolla {
namespace {
using ::absl_testing::IsOk;
using ::testing::Eq;
std::unique_ptr<BoundExpr> CreateCountingBoundExpr(
FrameLayout::Slot<int> init_counter, FrameLayout::Slot<int> exec_counter) {
std::vector<std::unique_ptr<BoundOperator>> init_ops;
init_ops.push_back(MakeBoundOperator([=](EvaluationContext*, FramePtr frame) {
++(*frame.GetMutable(init_counter));
}));
std::vector<std::unique_ptr<BoundOperator>> exec_ops;
exec_ops.push_back(MakeBoundOperator([=](EvaluationContext*, FramePtr frame) {
++(*frame.GetMutable(exec_counter));
}));
return std::make_unique<SimpleBoundExpr>(
absl::flat_hash_map<std::string, TypedSlot>{},
TypedSlot::UnsafeFromOffset(GetQType<Unit>(), 0),
std::move(init_ops), std::move(exec_ops));
}
TEST(SimpleExecutableTest, CombinedBoundExpr) {
FrameLayout::Builder builder;
std::vector<std::unique_ptr<BoundExpr>> subexprs;
auto init_1_called = builder.AddSlot<int>();
auto exec_1_called = builder.AddSlot<int>();
subexprs.push_back(CreateCountingBoundExpr(init_1_called, exec_1_called));
auto init_2_called = builder.AddSlot<int>();
auto exec_2_called = builder.AddSlot<int>();
subexprs.push_back(CreateCountingBoundExpr(init_2_called, exec_2_called));
std::unique_ptr<BoundExpr> combined_expr =
std::make_unique<CombinedBoundExpr>(
absl::flat_hash_map<std::string, TypedSlot>{},
TypedSlot::UnsafeFromOffset(GetQType<Unit>(), 0),
absl::flat_hash_map<std::string, TypedSlot>{}, std::move(subexprs));
FrameLayout layout = std::move(builder).Build();
RootEvaluationContext ctx(&layout);
ctx.Set(init_1_called, 0);
ctx.Set(init_2_called, 0);
ctx.Set(exec_1_called, 0);
ctx.Set(exec_2_called, 0);
ASSERT_THAT(combined_expr->InitializeLiterals(&ctx), IsOk());
EXPECT_THAT(ctx.Get(init_1_called), Eq(1));
EXPECT_THAT(ctx.Get(init_2_called), Eq(1));
EXPECT_THAT(ctx.Get(exec_1_called), Eq(0));
EXPECT_THAT(ctx.Get(exec_2_called), Eq(0));
ASSERT_THAT(combined_expr->Execute(&ctx), IsOk());
EXPECT_THAT(ctx.Get(init_1_called), Eq(1));
EXPECT_THAT(ctx.Get(init_2_called), Eq(1));
EXPECT_THAT(ctx.Get(exec_1_called), Eq(1));
EXPECT_THAT(ctx.Get(exec_2_called), Eq(1));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/simple_executable.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/simple_executable_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
cea0912d-1692-4ae8-ae3b-f70f75185623 | cpp | tensorflow/tensorflow | basic_string_array | third_party/xla/xla/python/pjrt_ifrt/basic_string_array.cc | third_party/xla/xla/python/pjrt_ifrt/basic_string_array_test.cc | #include "xla/python/pjrt_ifrt/basic_string_array.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/hash/hash.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/pjrt/pjrt_layout.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
std::string BasicStringArrayLayout::Serialize() const {
return std::string();
}
std::string BasicStringArrayLayout::ToString() const {
return "BasicStringArrayLayout: Dense, major-to-minor.";
}
bool BasicStringArrayLayout::operator==(const PjRtLayout& other) const {
auto* other_basic_string_array_layout =
dynamic_cast<const xla::ifrt::BasicStringArrayLayout*>(&other);
if (other_basic_string_array_layout == nullptr) {
return false;
}
return true;
}
void BasicStringArrayLayout::Hash(absl::HashState state) const {
}
char BasicStringArray::ID = 0;
absl::StatusOr<tsl::RCReference<BasicStringArray>> BasicStringArray::Create(
Client* client, Shape shape, std::shared_ptr<const Sharding> sharding,
Future<Buffers> buffers, OnDoneWithBuffer on_done_with_buffer) {
if (!buffers.IsValid()) {
return absl::InvalidArgumentError("Got buffers_ future is invalid");
}
auto buffers_promise = Future<Buffers>::CreatePromise();
auto buffers_future = Future<Buffers>(buffers_promise);
auto ready_promise = Future<>::CreatePromise();
auto ready_future = Future<>(ready_promise);
auto buffer_validator =
[buffers_promise = std::move(buffers_promise),
ready_promise = std::move(ready_promise),
sharding = sharding](absl::StatusOr<Buffers> buffers) mutable {
if (!buffers.ok()) {
buffers_promise.Set(buffers.status());
ready_promise.Set(buffers.status());
return;
}
if (sharding->devices()->size() != (*buffers).size()) {
auto error = absl::FailedPreconditionError(absl::StrCat(
"Number of buffers: ", (*buffers).size(),
" does not match the number of devices in sharding: ",
sharding->devices()->size()));
buffers_promise.Set(error);
ready_promise.Set(error);
return;
}
buffers_promise.Set(std::move(buffers));
ready_promise.Set(absl::OkStatus());
};
buffers.OnReady(std::move(buffer_validator));
return tsl::MakeRef<BasicStringArray>(
client, std::move(shape), std::move(sharding), std::move(buffers_future),
std::move(ready_future), std::move(on_done_with_buffer));
}
BasicStringArray::BasicStringArray(Client* client, Shape shape,
std::shared_ptr<const Sharding> sharding,
Future<Buffers> buffers,
Future<> ready_future,
OnDoneWithBuffer on_done_with_buffer)
: client_(client),
shape_(std::move(shape)),
sharding_(std::move(sharding)),
buffers_(std::move(buffers)),
ready_future_(std::move(ready_future)),
on_done_with_buffer_(std::move(on_done_with_buffer)) {}
BasicStringArray::~BasicStringArray() { DeleteInternal(); }
Future<> BasicStringArray::Delete() {
DeleteInternal();
return Future<>(absl::OkStatus());
}
bool BasicStringArray::IsDeleted() const {
absl::MutexLock lock(&mu_);
return is_deleted_;
}
void BasicStringArray::DeleteInternal() {
absl::MutexLock lock(&mu_);
if (is_deleted_) {
return;
}
if (on_done_with_buffer_) {
std::move(on_done_with_buffer_)();
}
is_deleted_ = true;
}
Future<> BasicStringArray::GetReadyFuture() const {
DCHECK(this);
absl::MutexLock lock(&mu_);
if (is_deleted_) {
return Future<>(
absl::FailedPreconditionError("Array has already been deleted"));
}
return ready_future_;
}
absl::StatusOr<std::vector<tsl::RCReference<Array>>>
BasicStringArray::DisassembleIntoSingleDeviceArrays(
ArrayCopySemantics semantics) {
DCHECK(this);
absl::MutexLock lock(&mu_);
if (is_deleted_) {
return absl::FailedPreconditionError("Array has already been deleted");
}
int num_shards = sharding_->devices()->size();
std::vector<Promise<Buffers>> buffer_promises;
buffer_promises.reserve(num_shards);
std::vector<Future<Buffers>> buffer_futures;
buffer_futures.reserve(num_shards);
struct PerShardBufferBackingStore {
void CopyFrom(absl::Span<const absl::string_view> input_buffer) {
strings.reserve(input_buffer.size());
string_views.reserve(input_buffer.size());
for (absl::string_view buf : input_buffer) {
strings.push_back(std::string(buf.data(), buf.size()));
string_views.push_back(strings.back());
}
}
std::vector<std::string> strings;
std::vector<absl::string_view> string_views;
};
std::vector<std::shared_ptr<PerShardBufferBackingStore>>
per_shard_buffer_backing_stores;
per_shard_buffer_backing_stores.reserve(num_shards);
std::vector<OnDoneWithBuffer> on_done_with_buffer_callbacks;
on_done_with_buffer_callbacks.reserve(num_shards);
for (int i = 0; i < num_shards; ++i) {
buffer_promises.push_back(Future<Buffers>::CreatePromise());
buffer_futures.push_back(Future<Buffers>(buffer_promises.back()));
auto backing_store = std::make_shared<PerShardBufferBackingStore>();
per_shard_buffer_backing_stores.push_back(backing_store);
on_done_with_buffer_callbacks.push_back(
[backing_store = std::move(backing_store)]() {});
}
buffers_.OnReady([buffer_promises = std::move(buffer_promises),
per_shard_buffer_backing_stores =
std::move(per_shard_buffer_backing_stores)](
absl::StatusOr<Buffers> buffers) mutable {
if (!buffers.ok()) {
for (auto& promise : buffer_promises) {
promise.Set(buffers.status());
}
per_shard_buffer_backing_stores.clear();
return;
}
auto num_shards = buffers->size();
for (int i = 0; i < num_shards; ++i) {
per_shard_buffer_backing_stores[i]->CopyFrom((*buffers)[i]);
Buffers buffers;
buffers.push_back(per_shard_buffer_backing_stores[i]->string_views);
buffer_promises[i].Set(std::move(buffers));
}
});
TF_ASSIGN_OR_RETURN(auto shapes_and_shadings, sharding_->Disassemble(shape_));
std::vector<tsl::RCReference<Array>> arrays;
arrays.reserve(num_shards);
for (int i = 0; i < num_shards; ++i) {
TF_ASSIGN_OR_RETURN(auto array,
BasicStringArray::Create(
client_, std::move(shapes_and_shadings[i].first),
std::move(shapes_and_shadings[i].second),
std::move(buffer_futures[i]),
std::move(on_done_with_buffer_callbacks[i])));
arrays.push_back(array);
}
return arrays;
}
Future<> BasicStringArray::CopyToHostBuffer(
void* data, std::optional<absl::Span<const int64_t>> byte_strides,
ArrayCopySemantics semantics) {
DCHECK(this);
return Future<>(absl::UnimplementedError("Not implemented"));
}
absl::StatusOr<tsl::RCReference<Array>> BasicStringArray::Copy(
std::optional<tsl::RCReference<xla::ifrt::DeviceList>> devices,
std::optional<xla::ifrt::MemoryKind> memory_kind,
ArrayCopySemantics semantics) {
DCHECK(this);
absl::MutexLock lock(&mu_);
if (is_deleted_) {
return absl::FailedPreconditionError("Array has already been deleted");
}
TF_ASSIGN_OR_RETURN(auto new_sharding, sharding().WithDeviceAssignment(
std::move(devices), memory_kind));
if (new_sharding->devices()->size() != sharding_->devices()->size()) {
return absl::InvalidArgumentError(absl::StrCat(
"Number of devices in new sharding: ", new_sharding->devices()->size(),
" does not match the number of devices in the current sharding: ",
sharding_->devices()->size()));
}
struct BufferBackingStore {
void AddShardData(absl::Span<const absl::string_view> input_buffer) {
auto& shard_strings = strings.emplace_back();
shard_strings.reserve(input_buffer.size());
auto& shard_string_views = string_views.emplace_back();
shard_string_views.reserve(input_buffer.size());
for (absl::string_view buf : input_buffer) {
shard_strings.push_back(std::string(buf.data(), buf.size()));
shard_string_views.push_back(shard_strings.back());
}
}
std::vector<std::vector<std::string>> strings;
std::vector<std::vector<absl::string_view>> string_views;
};
auto backing_store = std::make_shared<BufferBackingStore>();
auto on_done_with_buffer = [backing_store]() {};
auto buffers_promise = Future<Buffers>::CreatePromise();
auto buffers_future = Future<Buffers>(buffers_promise);
auto copier = [backing_store = std::move(backing_store),
buffers_promise = std::move(buffers_promise)](
absl::StatusOr<Buffers> input_buffers) mutable {
if (!input_buffers.ok()) {
buffers_promise.Set(input_buffers.status());
return;
}
Buffers buffers;
buffers.reserve(input_buffers->size());
for (auto& input_buffer : *input_buffers) {
backing_store->AddShardData(input_buffer);
buffers.push_back(backing_store->string_views.back());
}
buffers_promise.Set(std::move(buffers));
};
buffers_.OnReady(std::move(copier));
return BasicStringArray::Create(client_, shape_, std::move(new_sharding),
std::move(buffers_future),
std::move(on_done_with_buffer));
}
absl::StatusOr<tsl::RCReference<Array>> BasicStringArray::FullyReplicatedShard(
ArrayCopySemantics semantics) {
absl::MutexLock lock(&mu_);
if (is_deleted_) {
return absl::FailedPreconditionError("Array has already been deleted");
}
if (!sharding_->IsFullyReplicated()) {
return absl::FailedPreconditionError("This array is not fully replicated");
}
struct BufferBackingStore {
void CopyFrom(absl::Span<const absl::string_view> input_buffer) {
strings.reserve(input_buffer.size());
string_views.reserve(input_buffer.size());
for (absl::string_view buf : input_buffer) {
strings.push_back(std::string(buf.data(), buf.size()));
string_views.push_back(strings.back());
}
}
std::vector<std::string> strings;
std::vector<absl::string_view> string_views;
};
auto backing_store = std::make_shared<BufferBackingStore>();
auto on_done_with_buffer = [backing_store]() {};
auto buffers_promise = Future<Buffers>::CreatePromise();
auto buffers_future = Future<Buffers>(buffers_promise);
auto copier = [backing_store = std::move(backing_store),
buffers_promise = std::move(buffers_promise)](
absl::StatusOr<Buffers> input_buffers) mutable {
if (!input_buffers.ok()) {
buffers_promise.Set(input_buffers.status());
return;
}
auto& input_buffer = (*input_buffers)[0];
backing_store->CopyFrom(input_buffer);
Buffers buffers;
buffers.push_back(backing_store->string_views);
buffers_promise.Set(std::move(buffers));
};
buffers_.OnReady(std::move(copier));
return BasicStringArray::Create(
client_, shape_,
SingleDeviceSharding::Create(sharding_->devices()->devices().front(),
MemoryKind()),
std::move(buffers_future), std::move(on_done_with_buffer));
}
absl::StatusOr<std::unique_ptr<PjRtLayout>> BasicStringArray::layout() const {
absl::MutexLock lock(&mu_);
if (is_deleted_) {
return absl::FailedPreconditionError("Array has already been deleted");
}
return std::make_unique<BasicStringArrayLayout>();
}
std::string BasicStringArray::DebugString() const {
DCHECK(this);
return absl::StrFormat(
"BasicStringArray(shape=%s; sharding=%s; layout=major-to-minor-dense)",
shape_.DebugString(), sharding_->DebugString());
}
}
} | #include "xla/python/pjrt_ifrt/basic_string_array.h"
#include <cstdint>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/notification.h"
#include "absl/types/span.h"
#include "llvm/Support/Casting.h"
#include "xla/layout.h"
#include "xla/pjrt/pjrt_future.h"
#include "xla/pjrt/pjrt_layout.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/python/ifrt/test_util.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace ifrt {
namespace {
using ::testing::HasSubstr;
using ::tsl::testing::StatusIs;
absl::StatusOr<tsl::RCReference<BasicStringArray>> CreateTestArray(
Client* client, Future<BasicStringArray::Buffers> buffers,
BasicStringArray::OnDoneWithBuffer on_done_with_buffer) {
Shape shape({1});
Device* device = client->addressable_devices().at(0);
std::shared_ptr<const Sharding> sharding =
SingleDeviceSharding::Create(device, MemoryKind());
return BasicStringArray::Create(client, shape, sharding, std::move(buffers),
std::move(on_done_with_buffer));
}
std::pair<BasicStringArray::Buffers, BasicStringArray::OnDoneWithBuffer>
MakeBuffersAndOnDoneWithBuffer(
absl::Span<const absl::string_view> input_strings) {
BasicStringArray::Buffers buffers;
auto string_holder = std::make_shared<std::vector<std::string>>();
string_holder->reserve(input_strings.size());
auto string_view_holder = std::make_shared<std::vector<absl::string_view>>();
string_view_holder->reserve(input_strings.size());
for (const auto str : input_strings) {
string_holder->push_back(std::string(str));
}
for (const auto& str : *string_holder) {
string_view_holder->push_back(absl::string_view(str));
}
buffers.push_back(*string_view_holder);
BasicStringArray::OnDoneWithBuffer on_done_with_buffer =
[string_holder = std::move(string_holder),
string_view_holder = std::move(string_view_holder)]() {};
return std::make_pair(std::move(buffers), std::move(on_done_with_buffer));
}
absl::StatusOr<std::pair<tsl::RCReference<BasicStringArray>,
Promise<BasicStringArray::Buffers>>>
CreateNonReadyTestArray(
Client* client, Device* const device,
BasicStringArray::OnDoneWithBuffer on_done_with_buffer) {
auto buffers_promise = Future<BasicStringArray::Buffers>::CreatePromise();
auto buffers_future = Future<BasicStringArray::Buffers>(buffers_promise);
Shape shape({1});
std::shared_ptr<const Sharding> sharding =
SingleDeviceSharding::Create(device, MemoryKind());
TF_ASSIGN_OR_RETURN(auto array,
BasicStringArray::Create(client, shape, sharding,
std::move(buffers_future),
std::move(on_done_with_buffer)));
return std::make_pair(std::move(array), std::move(buffers_promise));
}
TEST(BasicStringArrayLayoutTest, Serialize) {
BasicStringArrayLayout layout;
EXPECT_TRUE(layout.Serialize().empty());
}
TEST(BasicStringArrayLayoutTest, ToString) {
BasicStringArrayLayout layout;
auto output_str = layout.ToString();
EXPECT_THAT(output_str, HasSubstr("major-to-minor"));
}
TEST(BasicStringArrayLayoutTest, Equality) {
BasicStringArrayLayout layout_1;
BasicStringArrayLayout layout_2;
const PjRtLayout& layout_3 = layout_2;
EXPECT_EQ(layout_1, layout_3);
xla::PjRtXlaLayout layout_6((xla::Layout()));
const PjRtLayout& layout_7 = layout_6;
EXPECT_FALSE(layout_7 == layout_1);
}
TEST(BasicStringArrayTest, CreateSuccess) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
BasicStringArray::Buffers buffers;
buffers.push_back({"abc", "def"});
TF_EXPECT_OK(CreateTestArray(client.get(),
Future<BasicStringArray::Buffers>(buffers),
nullptr));
}
TEST(BasicStringArrayTest, CreateFailureWithInvalidFuture) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
EXPECT_THAT(CreateTestArray(client.get(), Future<BasicStringArray::Buffers>(),
nullptr),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(BasicStringArrayTest, Destruction) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
BasicStringArray::Buffers buffers;
buffers.push_back({"abc", "def"});
absl::Notification on_done_with_buffer_called;
BasicStringArray::OnDoneWithBuffer on_done_with_buffer =
[&on_done_with_buffer_called]() { on_done_with_buffer_called.Notify(); };
auto array_creation_status_promise = PjRtFuture<>::CreatePromise();
tsl::Env::Default()->SchedClosure(([&]() {
auto array = CreateTestArray(client.get(),
Future<BasicStringArray::Buffers>(buffers),
std::move(on_done_with_buffer));
array_creation_status_promise.Set(array.status());
}));
TF_ASSERT_OK(Future<>(array_creation_status_promise).Await());
on_done_with_buffer_called.WaitForNotification();
}
TEST(BasicStringArrayTest, InvalidBuffersAreHandledCorrectly) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto devices = client->addressable_devices();
ASSERT_GE(devices.size(), 1);
auto shard0_data = std::make_shared<std::vector<absl::string_view>>();
shard0_data->push_back("abc");
auto shard1_data = std::make_shared<std::vector<absl::string_view>>();
shard1_data->push_back("def");
BasicStringArray::Buffers buffers;
buffers.push_back(*shard0_data);
buffers.push_back(*shard1_data);
auto on_done_with_buffer = [shard0_data = std::move(shard0_data),
shard1_data = std::move(shard1_data)]() {};
TF_ASSERT_OK_AND_ASSIGN(
auto ret, CreateNonReadyTestArray(client.get(), devices[0],
std::move(on_done_with_buffer)));
auto array = ret.first;
auto promise = ret.second;
auto basic_string_array = llvm::dyn_cast<BasicStringArray>(array.get());
tsl::Env::Default()->SchedClosure([&]() { promise.Set(buffers); });
EXPECT_THAT(basic_string_array->GetReadyFuture().Await(),
StatusIs(absl::StatusCode::kFailedPrecondition));
EXPECT_THAT(basic_string_array->buffers().Await(),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST(BasicStringArrayTest, Delete) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
BasicStringArray::Buffers buffers;
buffers.push_back({"abc", "def"});
absl::Notification on_done_with_buffer_called;
BasicStringArray::OnDoneWithBuffer on_done_with_buffer =
[&on_done_with_buffer_called]() { on_done_with_buffer_called.Notify(); };
TF_ASSERT_OK_AND_ASSIGN(
auto array,
CreateTestArray(client.get(), Future<BasicStringArray::Buffers>(buffers),
std::move(on_done_with_buffer)));
tsl::Env::Default()->SchedClosure([&]() { array->Delete(); });
on_done_with_buffer_called.WaitForNotification();
EXPECT_TRUE(array->IsDeleted());
}
TEST(GetReadyFutureTest, SuccessCase) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto promise = Future<BasicStringArray::Buffers>::CreatePromise();
auto buffers_future = Future<BasicStringArray::Buffers>(promise);
TF_ASSERT_OK_AND_ASSIGN(auto array,
CreateTestArray(client.get(), buffers_future,
nullptr));
auto ready_future = array->GetReadyFuture();
EXPECT_FALSE(ready_future.IsKnownReady());
BasicStringArray::Buffers buffers;
buffers.push_back({"abc", "def"});
tsl::Env::Default()->SchedClosure([&]() { promise.Set(buffers); });
TF_EXPECT_OK(ready_future.Await());
}
TEST(GetReadyFutureTest, FailureCases) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto promise = Future<BasicStringArray::Buffers>::CreatePromise();
auto buffers_future = Future<BasicStringArray::Buffers>(promise);
TF_ASSERT_OK_AND_ASSIGN(auto array,
CreateTestArray(client.get(), buffers_future,
nullptr));
auto ready_future = array->GetReadyFuture();
EXPECT_FALSE(ready_future.IsKnownReady());
tsl::Env::Default()->SchedClosure(
[&]() { promise.Set(absl::InternalError("injected error")); });
EXPECT_THAT(ready_future.Await(), StatusIs(absl::StatusCode::kInternal));
}
TEST(MakeArrayFromHostBufferTest, SuccessCase) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
Shape shape({1});
Device* device = client->addressable_devices().at(0);
std::shared_ptr<const Sharding> sharding =
SingleDeviceSharding::Create(device, MemoryKind());
auto string_views = std::make_shared<std::vector<absl::string_view>>();
string_views->push_back("abc");
string_views->push_back("def");
const void* data = string_views->data();
auto on_done_with_host_buffer = [string_views = std::move(string_views)]() {};
TF_ASSERT_OK(client->MakeArrayFromHostBuffer(
data, DType(DType::kString), shape,
std::nullopt, std::move(sharding),
Client::HostBufferSemantics::kImmutableOnlyDuringCall,
std::move(on_done_with_host_buffer)));
}
TEST(MakeArrayFromHostBufferTest, FailureCases) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
Shape shape({1});
Device* device = client->addressable_devices().at(0);
std::shared_ptr<const Sharding> single_device_sharding =
SingleDeviceSharding::Create(device, MemoryKind());
auto string_views = std::make_shared<std::vector<absl::string_view>>();
string_views->push_back("abc");
string_views->push_back("def");
const void* data = string_views->data();
auto on_done_with_host_buffer = [string_views = std::move(string_views)]() {};
EXPECT_THAT(
client->MakeArrayFromHostBuffer(
data, DType(DType::kString), shape,
std::optional<absl::Span<const int64_t>>({8}),
single_device_sharding,
Client::HostBufferSemantics::kImmutableOnlyDuringCall,
on_done_with_host_buffer),
StatusIs(absl::StatusCode::kInvalidArgument));
std::shared_ptr<const Sharding> opaque_sharding =
OpaqueSharding::Create(BasicDeviceList::Create({device}), MemoryKind());
EXPECT_THAT(client->MakeArrayFromHostBuffer(
data, DType(DType::kString), shape,
std::nullopt, opaque_sharding,
Client::HostBufferSemantics::kImmutableOnlyDuringCall,
on_done_with_host_buffer),
StatusIs(absl::StatusCode::kInvalidArgument));
for (Client::HostBufferSemantics host_buffer_semantics :
{Client::HostBufferSemantics::kImmutableUntilTransferCompletes,
Client::HostBufferSemantics::kImmutableZeroCopy,
Client::HostBufferSemantics::kMutableZeroCopy}) {
SCOPED_TRACE(
absl::StrCat("host_buffer_semantics: ", host_buffer_semantics));
EXPECT_THAT(client->MakeArrayFromHostBuffer(
data, DType(DType::kString), shape,
std::nullopt, single_device_sharding,
host_buffer_semantics, on_done_with_host_buffer),
StatusIs(absl::StatusCode::kInvalidArgument));
}
}
absl::StatusOr<tsl::RCReference<Array>> MakeSingleDeviceStringTestArray(
absl::Span<const std::string> contents, Client* client,
Device* const device) {
Shape shape({1});
std::shared_ptr<const Sharding> sharding =
SingleDeviceSharding::Create(device, MemoryKind());
auto string_views = std::make_shared<std::vector<absl::string_view>>();
for (const auto& content : contents) {
string_views->push_back(content);
}
const void* data = string_views->data();
auto on_done_with_host_buffer = [string_views = std::move(string_views)]() {};
return client->MakeArrayFromHostBuffer(
data, DType(DType::kString), shape,
std::nullopt, std::move(sharding),
Client::HostBufferSemantics::kImmutableOnlyDuringCall,
std::move(on_done_with_host_buffer));
}
absl::StatusOr<tsl::RCReference<Array>> MakeSingleDeviceFloatTestArray(
Client* client, Device* const device) {
DType dtype(DType::kF32);
Shape shape({2, 3});
auto data = std::make_unique<std::vector<float>>(6);
std::iota(data->begin(), data->end(), 0);
std::shared_ptr<const Sharding> sharding =
SingleDeviceSharding::Create(device, MemoryKind());
return client->MakeArrayFromHostBuffer(
data->data(), dtype, shape,
std::nullopt, sharding,
Client::HostBufferSemantics::kImmutableOnlyDuringCall,
nullptr);
}
absl::StatusOr<tsl::RCReference<Array>> MakeShardedStringTestArray(
Client* client, absl::Span<const std::string> data,
bool is_fully_replicated) {
if (data.size() < 2) {
return absl::InvalidArgumentError(absl::StrCat(
"Input data has too few strings. Need at least 2. got: ", data.size()));
}
auto devices = client->addressable_devices();
if (devices.size() < 2) {
return absl::InvalidArgumentError(absl::StrCat(
"Test client has too few devices. Need 2, got:", devices.size()));
}
std::shared_ptr<const Sharding> sharding = ConcreteEvenSharding::Create(
BasicDeviceList::Create({devices[0], devices[1]}), MemoryKind(),
Shape({2, 1}), Shape({1}), is_fully_replicated);
std::vector<tsl::RCReference<Array>> arrays;
for (int i = 0; i < 2; ++i) {
TF_ASSIGN_OR_RETURN(auto array, MakeSingleDeviceStringTestArray(
{data[i]}, client, devices[i]));
arrays.push_back(std::move(array));
}
return client->AssembleArrayFromSingleDeviceArrays(
Shape({2, 1}), std::move(sharding), absl::MakeSpan(arrays),
ArrayCopySemantics::kAlwaysCopy);
}
TEST(AssembleArrayFromSingleDeviceArraysTest,
SuccessWithReadySingleDeviceArrays) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
const std::vector<std::string> per_shard_contents({"shard 0", "shard 1"});
TF_ASSERT_OK_AND_ASSIGN(
auto array, MakeShardedStringTestArray(client.get(), per_shard_contents,
false));
auto basic_string_array = llvm::dyn_cast<BasicStringArray>(array.get());
ASSERT_NE(basic_string_array, nullptr);
TF_ASSERT_OK_AND_ASSIGN(auto buffers, basic_string_array->buffers().Await());
EXPECT_EQ(buffers.size(), 2);
for (int i = 0; i < buffers.size(); ++i) {
SCOPED_TRACE(absl::StrCat("buffer #", i));
auto buffer = buffers[i];
EXPECT_THAT(buffer, testing::ElementsAre(per_shard_contents[i]));
}
}
TEST(AssembleArrayFromSingleDeviceArraysTest, FailsWithNonStringArrays) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto devices = client->addressable_devices();
ASSERT_GE(devices.size(), 2);
std::shared_ptr<const Sharding> opaque_sharding = OpaqueSharding::Create(
BasicDeviceList::Create({devices[0], devices[1]}), MemoryKind());
std::vector<tsl::RCReference<Array>> arrays(2);
TF_ASSERT_OK_AND_ASSIGN(
arrays[0], MakeSingleDeviceFloatTestArray(client.get(), devices[0]));
TF_ASSERT_OK_AND_ASSIGN(
arrays[1], MakeSingleDeviceStringTestArray({"string_array_contents"},
client.get(), devices[1]));
EXPECT_THAT(client->AssembleArrayFromSingleDeviceArrays(
Shape({2}), std::move(opaque_sharding),
absl::MakeSpan(arrays), ArrayCopySemantics::kAlwaysCopy),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(AssembleArrayFromSingleDeviceArraysTest,
FailsWithNonSingleDeviceStringArrays) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto devices = client->addressable_devices();
ASSERT_GE(devices.size(), 2);
std::shared_ptr<const Sharding> opaque_sharding = OpaqueSharding::Create(
BasicDeviceList::Create({devices[0], devices[1]}), MemoryKind());
std::vector<tsl::RCReference<Array>> arrays(2);
const std::vector<std::string> per_shard_contents({"abc", "def"});
TF_ASSERT_OK_AND_ASSIGN(
arrays[0], MakeShardedStringTestArray(client.get(), per_shard_contents,
false));
TF_ASSERT_OK_AND_ASSIGN(
arrays[1], MakeSingleDeviceStringTestArray({"string_array_contents"},
client.get(), devices[1]));
EXPECT_THAT(client->AssembleArrayFromSingleDeviceArrays(
Shape({2}), std::move(opaque_sharding),
absl::MakeSpan(arrays), ArrayCopySemantics::kAlwaysCopy),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(AssembleArrayFromSingleDeviceArraysTest,
FromNonReadySingleDeviceArraysSuccess) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto devices = client->addressable_devices();
ASSERT_GE(devices.size(), 2);
std::shared_ptr<const Sharding> opaque_sharding = OpaqueSharding::Create(
BasicDeviceList::Create({devices[0], devices[1]}), MemoryKind());
std::vector<tsl::RCReference<Array>> arrays;
std::vector<Promise<BasicStringArray::Buffers>> promises;
arrays.reserve(2);
auto buf_and_on_done_with_buffer = MakeBuffersAndOnDoneWithBuffer({"abc"});
auto buffers0 = buf_and_on_done_with_buffer.first;
auto on_done_with_buffer0 = buf_and_on_done_with_buffer.second;
TF_ASSERT_OK_AND_ASSIGN(
auto ret, CreateNonReadyTestArray(client.get(), devices[0],
std::move(on_done_with_buffer0)));
arrays.push_back(std::move(ret.first));
promises.push_back(std::move(ret.second));
buf_and_on_done_with_buffer = MakeBuffersAndOnDoneWithBuffer({"def"});
auto buffers1 = buf_and_on_done_with_buffer.first;
auto on_done_with_buffer1 = buf_and_on_done_with_buffer.second;
TF_ASSERT_OK_AND_ASSIGN(
ret, CreateNonReadyTestArray(client.get(), devices[1],
std::move(on_done_with_buffer1)));
arrays.push_back(std::move(ret.first));
promises.push_back(std::move(ret.second));
TF_ASSERT_OK_AND_ASSIGN(
auto array, client->AssembleArrayFromSingleDeviceArrays(
Shape({1}), std::move(opaque_sharding),
absl::MakeSpan(arrays), ArrayCopySemantics::kAlwaysCopy));
tsl::Env::Default()->SchedClosure(([&]() mutable {
promises[0].Set(buffers0);
promises[1].Set(buffers1);
}));
auto basic_string_array = llvm::dyn_cast<BasicStringArray>(array.get());
ASSERT_NE(basic_string_array, nullptr);
auto buffers_future = basic_string_array->buffers();
TF_ASSERT_OK_AND_ASSIGN(auto buffers, buffers_future.Await());
EXPECT_EQ(buffers.size(), 2);
EXPECT_THAT(buffers[0], testing::ElementsAre("abc"));
EXPECT_THAT(buffers[1], testing::ElementsAre("def"));
}
TEST(AssembleArrayFromSingleDeviceArraysTest,
FromNonReadySingleDeviceArraysFailure) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto devices = client->addressable_devices();
ASSERT_GE(devices.size(), 2);
std::shared_ptr<const Sharding> opaque_sharding = OpaqueSharding::Create(
BasicDeviceList::Create({devices[0], devices[1]}), MemoryKind());
std::vector<tsl::RCReference<Array>> arrays;
std::vector<Promise<BasicStringArray::Buffers>> promises;
arrays.reserve(2);
TF_ASSERT_OK_AND_ASSIGN(
auto ret, CreateNonReadyTestArray(client.get(), devices[0],
nullptr));
arrays.push_back(std::move(ret.first));
promises.push_back(std::move(ret.second));
TF_ASSERT_OK_AND_ASSIGN(
ret, CreateNonReadyTestArray(client.get(), devices[1],
nullptr));
arrays.push_back(std::move(ret.first));
promises.push_back(std::move(ret.second));
TF_ASSERT_OK_AND_ASSIGN(
auto array, client->AssembleArrayFromSingleDeviceArrays(
Shape({1}), std::move(opaque_sharding),
absl::MakeSpan(arrays), ArrayCopySemantics::kAlwaysCopy));
absl::Notification done_readying_single_device_arrays;
tsl::Env::Default()->SchedClosure(([&]() mutable {
promises[0].Set(absl::InternalError("injected from the test"));
promises[1].Set(absl::InternalError("injected from the test"));
done_readying_single_device_arrays.Notify();
}));
auto basic_string_array = llvm::dyn_cast<BasicStringArray>(array.get());
ASSERT_NE(basic_string_array, nullptr);
auto buffers_future = basic_string_array->buffers();
EXPECT_THAT(buffers_future.Await(),
StatusIs(absl::StatusCode::kInternal,
HasSubstr("injected from the test")));
done_readying_single_device_arrays.WaitForNotification();
}
TEST(DisassembleArrayIntoSingleDeviceArrays,
SingleDeviceArrayDisassembleSuccess) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto [buffers, on_done_with_buffer] = MakeBuffersAndOnDoneWithBuffer({"abc"});
TF_ASSERT_OK_AND_ASSIGN(
auto array,
CreateTestArray(client.get(), Future<BasicStringArray::Buffers>(buffers),
std::move(on_done_with_buffer)));
TF_ASSERT_OK_AND_ASSIGN(auto disassembled_arrays,
array->DisassembleIntoSingleDeviceArrays(
ArrayCopySemantics::kAlwaysCopy));
ASSERT_EQ(disassembled_arrays.size(), 1);
auto basic_string_array =
llvm::dyn_cast<BasicStringArray>(disassembled_arrays[0].get());
TF_ASSERT_OK_AND_ASSIGN(auto new_buffers,
basic_string_array->buffers().Await());
ASSERT_EQ(buffers.size(), 1);
EXPECT_THAT(buffers[0], testing::ElementsAre("abc"));
}
TEST(DisassembleArrayIntoSingleDeviceArrays, ShardedArrayDisassembleSuccess) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
const std::vector<std::string> per_shard_contents({"abc", "def"});
TF_ASSERT_OK_AND_ASSIGN(
auto array, MakeShardedStringTestArray(client.get(), per_shard_contents,
false));
TF_ASSERT_OK_AND_ASSIGN(auto disassembled_arrays,
array->DisassembleIntoSingleDeviceArrays(
ArrayCopySemantics::kAlwaysCopy));
ASSERT_EQ(disassembled_arrays.size(), 2);
for (int i = 0; i < disassembled_arrays.size(); ++i) {
SCOPED_TRACE(absl::StrCat("dissembled array: ", i));
auto basic_string_array =
llvm::dyn_cast<BasicStringArray>(disassembled_arrays[i].get());
TF_ASSERT_OK_AND_ASSIGN(auto buffer, basic_string_array->buffers().Await());
ASSERT_EQ(buffer.size(), 1);
EXPECT_THAT(buffer[0], testing::ElementsAre(per_shard_contents[i]));
}
}
TEST(DisassembleArrayIntoSingleDeviceArrays, FailsIfTheArrayHasBeenDeleted) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto [buffers, on_done_with_buffer] = MakeBuffersAndOnDoneWithBuffer({"abc"});
TF_ASSERT_OK_AND_ASSIGN(
auto array,
CreateTestArray(client.get(), Future<BasicStringArray::Buffers>(buffers),
std::move(on_done_with_buffer)));
array->Delete();
EXPECT_THAT(
array->DisassembleIntoSingleDeviceArrays(ArrayCopySemantics::kAlwaysCopy),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST(CopyTest, SuccessSingleDeviceShardedArray) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto devices = client->addressable_devices();
ASSERT_GE(devices.size(), 2);
auto [buffers, on_done_with_buffer] = MakeBuffersAndOnDoneWithBuffer({"abc"});
std::vector<tsl::RCReference<Array>> arrays;
TF_ASSERT_OK_AND_ASSIGN(
arrays.emplace_back(),
CreateTestArray(client.get(), Future<BasicStringArray::Buffers>(buffers),
std::move(on_done_with_buffer)));
TF_ASSERT_OK_AND_ASSIGN(
auto new_arrays,
client->CopyArrays(absl::MakeSpan(arrays),
BasicDeviceList::Create({devices[1]}), MemoryKind(),
ArrayCopySemantics::kAlwaysCopy));
auto new_basic_string_array =
llvm::dyn_cast<BasicStringArray>(new_arrays[0].get());
TF_ASSERT_OK_AND_ASSIGN(auto new_buffers,
new_basic_string_array->buffers().Await());
ASSERT_EQ(new_buffers.size(), 1);
EXPECT_THAT(new_buffers[0], testing::ElementsAre("abc"));
}
TEST(CopyTest, SuccessMultiDeviceShardedArray) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto devices = client->addressable_devices();
ASSERT_GE(devices.size(), 4);
const std::vector<std::string> per_shard_contents({"shard 0", "shard 1"});
std::vector<tsl::RCReference<Array>> arrays;
TF_ASSERT_OK_AND_ASSIGN(
arrays.emplace_back(),
MakeShardedStringTestArray(client.get(), per_shard_contents,
false));
TF_ASSERT_OK_AND_ASSIGN(
auto new_arrays,
client->CopyArrays(absl::MakeSpan(arrays),
BasicDeviceList::Create({devices[2], devices[3]}),
MemoryKind(), ArrayCopySemantics::kAlwaysCopy));
auto new_basic_string_array =
llvm::dyn_cast<BasicStringArray>(new_arrays[0].get());
TF_ASSERT_OK_AND_ASSIGN(auto new_buffers,
new_basic_string_array->buffers().Await());
ASSERT_EQ(new_buffers.size(), 2);
EXPECT_THAT(new_buffers[0], testing::ElementsAre("shard 0"));
EXPECT_THAT(new_buffers[1], testing::ElementsAre("shard 1"));
}
TEST(CopyTest, FailsAfterDeletion) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto devices = client->addressable_devices();
ASSERT_GE(devices.size(), 2);
auto [buffers, on_done_with_buffer] = MakeBuffersAndOnDoneWithBuffer({"abc"});
std::vector<tsl::RCReference<Array>> arrays;
TF_ASSERT_OK_AND_ASSIGN(
arrays.emplace_back(),
CreateTestArray(client.get(), Future<BasicStringArray::Buffers>(buffers),
std::move(on_done_with_buffer)));
arrays[0]->Delete();
EXPECT_THAT(client->CopyArrays(absl::MakeSpan(arrays),
BasicDeviceList::Create({devices[1]}),
MemoryKind(), ArrayCopySemantics::kAlwaysCopy),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST(CopyTest, FailsWithDifferentNumbersDevices) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto devices = client->addressable_devices();
ASSERT_GE(devices.size(), 2);
auto [buffers, on_done_with_buffer] = MakeBuffersAndOnDoneWithBuffer({"abc"});
std::vector<tsl::RCReference<Array>> arrays;
TF_ASSERT_OK_AND_ASSIGN(
arrays.emplace_back(),
CreateTestArray(client.get(), Future<BasicStringArray::Buffers>(buffers),
std::move(on_done_with_buffer)));
EXPECT_THAT(
client->CopyArrays(absl::MakeSpan(arrays),
BasicDeviceList::Create({devices[0], devices[1]}),
MemoryKind(), ArrayCopySemantics::kAlwaysCopy),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(CopyTest, NonReadySourceArraySuccessfullyBecomesReadyAfterCopy) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto devices = client->addressable_devices();
ASSERT_GE(devices.size(), 2);
auto buf_and_on_done_with_buffer = MakeBuffersAndOnDoneWithBuffer({"abc"});
auto buffers = buf_and_on_done_with_buffer.first;
auto on_done_with_buffer = buf_and_on_done_with_buffer.second;
TF_ASSERT_OK_AND_ASSIGN(
auto ret, CreateNonReadyTestArray(client.get(), devices[0],
std::move(on_done_with_buffer)));
std::vector<tsl::RCReference<Array>> arrays;
arrays.push_back(std::move(ret.first));
auto promise = std::move(ret.second);
TF_ASSERT_OK(client->CopyArrays(
absl::MakeSpan(arrays), BasicDeviceList::Create({devices[1]}),
MemoryKind(), ArrayCopySemantics::kAlwaysCopy));
absl::Notification done_readying_single_device_arrays;
tsl::Env::Default()->SchedClosure(([&]() mutable {
promise.Set(std::move(buffers));
done_readying_single_device_arrays.Notify();
}));
auto basic_string_array = llvm::dyn_cast<BasicStringArray>(arrays[0].get());
ASSERT_NE(basic_string_array, nullptr);
TF_ASSERT_OK_AND_ASSIGN(auto new_buffers,
basic_string_array->buffers().Await());
ASSERT_EQ(new_buffers.size(), 1);
EXPECT_THAT(new_buffers[0], testing::ElementsAre("abc"));
done_readying_single_device_arrays.WaitForNotification();
}
TEST(CopyTest, NonReadySourceArrayFailsToBecomeReadyAfterCopy) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto devices = client->addressable_devices();
ASSERT_GE(devices.size(), 2);
auto buf_and_on_done_with_buffer = MakeBuffersAndOnDoneWithBuffer({"abc"});
auto buffers = buf_and_on_done_with_buffer.first;
auto on_done_with_buffer = buf_and_on_done_with_buffer.second;
TF_ASSERT_OK_AND_ASSIGN(
auto ret, CreateNonReadyTestArray(client.get(), devices[0],
std::move(on_done_with_buffer)));
std::vector<tsl::RCReference<Array>> arrays;
arrays.push_back(std::move(ret.first));
auto promise = std::move(ret.second);
TF_ASSERT_OK(client->CopyArrays(
absl::MakeSpan(arrays), BasicDeviceList::Create({devices[1]}),
MemoryKind(), ArrayCopySemantics::kAlwaysCopy));
absl::Notification done_readying_single_device_arrays;
tsl::Env::Default()->SchedClosure(([&]() mutable {
promise.Set(absl::InternalError("injected from the test"));
done_readying_single_device_arrays.Notify();
}));
auto basic_string_array = llvm::dyn_cast<BasicStringArray>(arrays[0].get());
ASSERT_NE(basic_string_array, nullptr);
auto buffers_future = basic_string_array->buffers();
EXPECT_THAT(buffers_future.Await(),
StatusIs(absl::StatusCode::kInternal,
HasSubstr("injected from the test")));
done_readying_single_device_arrays.WaitForNotification();
}
TEST(FullyReplicatedShardTest, SuccessSingleDeviceShardedArray) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
constexpr char kContents[] = "abc";
auto [buffers, on_done_with_buffer] =
MakeBuffersAndOnDoneWithBuffer({kContents});
TF_ASSERT_OK_AND_ASSIGN(
auto array,
CreateTestArray(client.get(), Future<BasicStringArray::Buffers>(buffers),
std::move(on_done_with_buffer)));
TF_ASSERT_OK_AND_ASSIGN(
auto relicated_shard,
array->FullyReplicatedShard(ArrayCopySemantics::kAlwaysCopy));
auto replicated_basic_string_array =
llvm::dyn_cast<BasicStringArray>(relicated_shard.get());
TF_ASSERT_OK_AND_ASSIGN(auto replicated_buffers,
replicated_basic_string_array->buffers().Await());
ASSERT_EQ(replicated_buffers.size(), 1);
EXPECT_THAT(replicated_buffers[0], testing::ElementsAre(kContents));
}
TEST(FullyReplicatedShardTest, SuccessMultiDeviceShardedArray) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
constexpr char kReplicatedContents[] = "abc";
const std::vector<std::string> per_shard_contents(
{kReplicatedContents, kReplicatedContents});
TF_ASSERT_OK_AND_ASSIGN(
auto array, MakeShardedStringTestArray(client.get(), per_shard_contents,
true));
TF_ASSERT_OK_AND_ASSIGN(
auto replicated_shard,
array->FullyReplicatedShard(ArrayCopySemantics::kAlwaysCopy));
auto replicated_basic_string_array =
llvm::dyn_cast<BasicStringArray>(replicated_shard.get());
TF_ASSERT_OK_AND_ASSIGN(auto replicated_buffers,
replicated_basic_string_array->buffers().Await());
ASSERT_EQ(replicated_buffers.size(), 1);
EXPECT_THAT(replicated_buffers[0], testing::ElementsAre(kReplicatedContents));
}
TEST(FullyReplicatedShardTest, FailsWithNonFullyReplicatedArrays) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
const std::vector<std::string> per_shard_contents({"abc", "def"});
TF_ASSERT_OK_AND_ASSIGN(
auto array, MakeShardedStringTestArray(client.get(), per_shard_contents,
false));
EXPECT_THAT(array->FullyReplicatedShard(ArrayCopySemantics::kAlwaysCopy),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST(FullyReplicatedShardTest, FailsAfterDeletion) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
constexpr char kContents[] = "abc";
auto [buffers, on_done_with_buffer] =
MakeBuffersAndOnDoneWithBuffer({kContents});
TF_ASSERT_OK_AND_ASSIGN(
auto array,
CreateTestArray(client.get(), Future<BasicStringArray::Buffers>(buffers),
std::move(on_done_with_buffer)));
array->Delete();
EXPECT_THAT(array->FullyReplicatedShard(ArrayCopySemantics::kAlwaysCopy),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST(LayoutTest, Success) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
constexpr char kContents[] = "abc";
auto [buffers, on_done_with_buffer] =
MakeBuffersAndOnDoneWithBuffer({kContents});
TF_ASSERT_OK_AND_ASSIGN(
auto array,
CreateTestArray(client.get(),
Future<BasicStringArray::Buffers>(std::move(buffers)),
std::move(on_done_with_buffer)));
TF_ASSERT_OK_AND_ASSIGN(auto layout, array->layout());
EXPECT_TRUE(layout->Serialize().empty());
}
TEST(LayoutTest, FailsAfterDeletion) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
constexpr char kContents[] = "abc";
auto [buffers, on_done_with_buffer] =
MakeBuffersAndOnDoneWithBuffer({kContents});
TF_ASSERT_OK_AND_ASSIGN(
auto array,
CreateTestArray(client.get(), Future<BasicStringArray::Buffers>(buffers),
std::move(on_done_with_buffer)));
array->Delete();
EXPECT_THAT(array->layout(), StatusIs(absl::StatusCode::kFailedPrecondition));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/pjrt_ifrt/basic_string_array.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/pjrt_ifrt/basic_string_array_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
88e33272-0175-44d5-8bd6-5c049c7994d0 | cpp | google/quiche | binary_http_message | quiche/binary_http/binary_http_message.cc | quiche/binary_http/binary_http_message_test.cc | #include "quiche/binary_http/binary_http_message.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <iterator>
#include <memory>
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/ascii.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "quiche/common/quiche_callbacks.h"
#include "quiche/common/quiche_data_reader.h"
#include "quiche/common/quiche_data_writer.h"
namespace quiche {
namespace {
constexpr uint8_t kKnownLengthRequestFraming = 0;
constexpr uint8_t kKnownLengthResponseFraming = 1;
bool ReadStringValue(quiche::QuicheDataReader& reader, std::string& data) {
absl::string_view data_view;
if (!reader.ReadStringPieceVarInt62(&data_view)) {
return false;
}
data = std::string(data_view);
return true;
}
bool IsValidPadding(absl::string_view data) {
return std::all_of(data.begin(), data.end(),
[](char c) { return c == '\0'; });
}
absl::StatusOr<BinaryHttpRequest::ControlData> DecodeControlData(
quiche::QuicheDataReader& reader) {
BinaryHttpRequest::ControlData control_data;
if (!ReadStringValue(reader, control_data.method)) {
return absl::InvalidArgumentError("Failed to read method.");
}
if (!ReadStringValue(reader, control_data.scheme)) {
return absl::InvalidArgumentError("Failed to read scheme.");
}
if (!ReadStringValue(reader, control_data.authority)) {
return absl::InvalidArgumentError("Failed to read authority.");
}
if (!ReadStringValue(reader, control_data.path)) {
return absl::InvalidArgumentError("Failed to read path.");
}
return control_data;
}
absl::Status DecodeFields(quiche::QuicheDataReader& reader,
quiche::UnretainedCallback<void(
absl::string_view name, absl::string_view value)>
callback) {
absl::string_view fields;
if (!reader.ReadStringPieceVarInt62(&fields)) {
return absl::InvalidArgumentError("Failed to read fields.");
}
quiche::QuicheDataReader fields_reader(fields);
while (!fields_reader.IsDoneReading()) {
absl::string_view name;
if (!fields_reader.ReadStringPieceVarInt62(&name)) {
return absl::InvalidArgumentError("Failed to read field name.");
}
absl::string_view value;
if (!fields_reader.ReadStringPieceVarInt62(&value)) {
return absl::InvalidArgumentError("Failed to read field value.");
}
callback(name, value);
}
return absl::OkStatus();
}
absl::Status DecodeFieldsAndBody(quiche::QuicheDataReader& reader,
BinaryHttpMessage& message) {
if (const absl::Status status = DecodeFields(
reader,
[&message](absl::string_view name, absl::string_view value) {
message.AddHeaderField({std::string(name), std::string(value)});
});
!status.ok()) {
return status;
}
if (reader.IsDoneReading()) {
return absl::OkStatus();
}
absl::string_view body;
if (!reader.ReadStringPieceVarInt62(&body)) {
return absl::InvalidArgumentError("Failed to read body.");
}
message.set_body(std::string(body));
return absl::OkStatus();
}
absl::StatusOr<BinaryHttpRequest> DecodeKnownLengthRequest(
quiche::QuicheDataReader& reader) {
const auto control_data = DecodeControlData(reader);
if (!control_data.ok()) {
return control_data.status();
}
BinaryHttpRequest request(std::move(*control_data));
if (const absl::Status status = DecodeFieldsAndBody(reader, request);
!status.ok()) {
return status;
}
if (!IsValidPadding(reader.PeekRemainingPayload())) {
return absl::InvalidArgumentError("Non-zero padding.");
}
request.set_num_padding_bytes(reader.BytesRemaining());
return request;
}
absl::StatusOr<BinaryHttpResponse> DecodeKnownLengthResponse(
quiche::QuicheDataReader& reader) {
std::vector<std::pair<uint16_t, std::vector<BinaryHttpMessage::Field>>>
informational_responses;
uint64_t status_code;
bool reading_response_control_data = true;
while (reading_response_control_data) {
if (!reader.ReadVarInt62(&status_code)) {
return absl::InvalidArgumentError("Failed to read status code.");
}
if (status_code >= 100 && status_code <= 199) {
std::vector<BinaryHttpMessage::Field> fields;
if (const absl::Status status = DecodeFields(
reader,
[&fields](absl::string_view name, absl::string_view value) {
fields.push_back({std::string(name), std::string(value)});
});
!status.ok()) {
return status;
}
informational_responses.emplace_back(status_code, std::move(fields));
} else {
reading_response_control_data = false;
}
}
BinaryHttpResponse response(status_code);
for (const auto& informational_response : informational_responses) {
if (const absl::Status status = response.AddInformationalResponse(
informational_response.first,
std::move(informational_response.second));
!status.ok()) {
return status;
}
}
if (const absl::Status status = DecodeFieldsAndBody(reader, response);
!status.ok()) {
return status;
}
if (!IsValidPadding(reader.PeekRemainingPayload())) {
return absl::InvalidArgumentError("Non-zero padding.");
}
response.set_num_padding_bytes(reader.BytesRemaining());
return response;
}
uint64_t StringPieceVarInt62Len(absl::string_view s) {
return quiche::QuicheDataWriter::GetVarInt62Len(s.length()) + s.length();
}
}
void BinaryHttpMessage::Fields::AddField(BinaryHttpMessage::Field field) {
fields_.push_back(std::move(field));
}
absl::Status BinaryHttpMessage::Fields::Encode(
quiche::QuicheDataWriter& writer) const {
if (!writer.WriteVarInt62(EncodedFieldsSize())) {
return absl::InvalidArgumentError("Failed to write encoded field size.");
}
for (const BinaryHttpMessage::Field& field : fields_) {
if (!writer.WriteStringPieceVarInt62(field.name)) {
return absl::InvalidArgumentError("Failed to write field name.");
}
if (!writer.WriteStringPieceVarInt62(field.value)) {
return absl::InvalidArgumentError("Failed to write field value.");
}
}
return absl::OkStatus();
}
size_t BinaryHttpMessage::Fields::EncodedSize() const {
const size_t size = EncodedFieldsSize();
return size + quiche::QuicheDataWriter::GetVarInt62Len(size);
}
size_t BinaryHttpMessage::Fields::EncodedFieldsSize() const {
size_t size = 0;
for (const BinaryHttpMessage::Field& field : fields_) {
size += StringPieceVarInt62Len(field.name) +
StringPieceVarInt62Len(field.value);
}
return size;
}
BinaryHttpMessage* BinaryHttpMessage::AddHeaderField(
BinaryHttpMessage::Field field) {
const std::string lower_name = absl::AsciiStrToLower(field.name);
if (lower_name == "host") {
has_host_ = true;
}
header_fields_.AddField({std::move(lower_name), std::move(field.value)});
return this;
}
absl::Status BinaryHttpMessage::EncodeKnownLengthFieldsAndBody(
quiche::QuicheDataWriter& writer) const {
if (const absl::Status status = header_fields_.Encode(writer); !status.ok()) {
return status;
}
if (!writer.WriteStringPieceVarInt62(body_)) {
return absl::InvalidArgumentError("Failed to encode body.");
}
return absl::OkStatus();
}
size_t BinaryHttpMessage::EncodedKnownLengthFieldsAndBodySize() const {
return header_fields_.EncodedSize() + StringPieceVarInt62Len(body_);
}
absl::Status BinaryHttpResponse::AddInformationalResponse(
uint16_t status_code, std::vector<Field> header_fields) {
if (status_code < 100) {
return absl::InvalidArgumentError("status code < 100");
}
if (status_code > 199) {
return absl::InvalidArgumentError("status code > 199");
}
InformationalResponse data(status_code);
for (Field& header : header_fields) {
data.AddField(header.name, std::move(header.value));
}
informational_response_control_data_.push_back(std::move(data));
return absl::OkStatus();
}
absl::StatusOr<std::string> BinaryHttpResponse::Serialize() const {
return EncodeAsKnownLength();
}
absl::StatusOr<std::string> BinaryHttpResponse::EncodeAsKnownLength() const {
std::string data;
data.resize(EncodedSize());
quiche::QuicheDataWriter writer(data.size(), data.data());
if (!writer.WriteUInt8(kKnownLengthResponseFraming)) {
return absl::InvalidArgumentError("Failed to write framing indicator");
}
for (const auto& informational : informational_response_control_data_) {
if (const absl::Status status = informational.Encode(writer);
!status.ok()) {
return status;
}
}
if (!writer.WriteVarInt62(status_code_)) {
return absl::InvalidArgumentError("Failed to write status code");
}
if (const absl::Status status = EncodeKnownLengthFieldsAndBody(writer);
!status.ok()) {
return status;
}
QUICHE_DCHECK_EQ(writer.remaining(), num_padding_bytes());
writer.WritePadding();
return data;
}
size_t BinaryHttpResponse::EncodedSize() const {
size_t size = sizeof(kKnownLengthResponseFraming);
for (const auto& informational : informational_response_control_data_) {
size += informational.EncodedSize();
}
return size + quiche::QuicheDataWriter::GetVarInt62Len(status_code_) +
EncodedKnownLengthFieldsAndBodySize() + num_padding_bytes();
}
void BinaryHttpResponse::InformationalResponse::AddField(absl::string_view name,
std::string value) {
fields_.AddField({absl::AsciiStrToLower(name), std::move(value)});
}
absl::Status BinaryHttpResponse::InformationalResponse::Encode(
quiche::QuicheDataWriter& writer) const {
writer.WriteVarInt62(status_code_);
return fields_.Encode(writer);
}
size_t BinaryHttpResponse::InformationalResponse::EncodedSize() const {
return quiche::QuicheDataWriter::GetVarInt62Len(status_code_) +
fields_.EncodedSize();
}
absl::StatusOr<std::string> BinaryHttpRequest::Serialize() const {
return EncodeAsKnownLength();
}
absl::Status BinaryHttpRequest::EncodeControlData(
quiche::QuicheDataWriter& writer) const {
if (!writer.WriteStringPieceVarInt62(control_data_.method)) {
return absl::InvalidArgumentError("Failed to encode method.");
}
if (!writer.WriteStringPieceVarInt62(control_data_.scheme)) {
return absl::InvalidArgumentError("Failed to encode scheme.");
}
if (!has_host()) {
if (!writer.WriteStringPieceVarInt62(control_data_.authority)) {
return absl::InvalidArgumentError("Failed to encode authority.");
}
} else {
if (!writer.WriteStringPieceVarInt62("")) {
return absl::InvalidArgumentError("Failed to encode authority.");
}
}
if (!writer.WriteStringPieceVarInt62(control_data_.path)) {
return absl::InvalidArgumentError("Failed to encode path.");
}
return absl::OkStatus();
}
size_t BinaryHttpRequest::EncodedControlDataSize() const {
size_t size = StringPieceVarInt62Len(control_data_.method) +
StringPieceVarInt62Len(control_data_.scheme) +
StringPieceVarInt62Len(control_data_.path);
if (!has_host()) {
size += StringPieceVarInt62Len(control_data_.authority);
} else {
size += StringPieceVarInt62Len("");
}
return size;
}
size_t BinaryHttpRequest::EncodedSize() const {
return sizeof(kKnownLengthRequestFraming) + EncodedControlDataSize() +
EncodedKnownLengthFieldsAndBodySize() + num_padding_bytes();
}
absl::StatusOr<std::string> BinaryHttpRequest::EncodeAsKnownLength() const {
std::string data;
data.resize(EncodedSize());
quiche::QuicheDataWriter writer(data.size(), data.data());
if (!writer.WriteUInt8(kKnownLengthRequestFraming)) {
return absl::InvalidArgumentError("Failed to encode framing indicator.");
}
if (const absl::Status status = EncodeControlData(writer); !status.ok()) {
return status;
}
if (const absl::Status status = EncodeKnownLengthFieldsAndBody(writer);
!status.ok()) {
return status;
}
QUICHE_DCHECK_EQ(writer.remaining(), num_padding_bytes());
writer.WritePadding();
return data;
}
absl::StatusOr<BinaryHttpRequest> BinaryHttpRequest::Create(
absl::string_view data) {
quiche::QuicheDataReader reader(data);
uint8_t framing;
if (!reader.ReadUInt8(&framing)) {
return absl::InvalidArgumentError("Missing framing indicator.");
}
if (framing == kKnownLengthRequestFraming) {
return DecodeKnownLengthRequest(reader);
}
return absl::UnimplementedError(
absl::StrCat("Unsupported framing type ", framing));
}
absl::StatusOr<BinaryHttpResponse> BinaryHttpResponse::Create(
absl::string_view data) {
quiche::QuicheDataReader reader(data);
uint8_t framing;
if (!reader.ReadUInt8(&framing)) {
return absl::InvalidArgumentError("Missing framing indicator.");
}
if (framing == kKnownLengthResponseFraming) {
return DecodeKnownLengthResponse(reader);
}
return absl::UnimplementedError(
absl::StrCat("Unsupported framing type ", framing));
}
std::string BinaryHttpMessage::DebugString() const {
std::vector<std::string> headers;
for (const auto& field : GetHeaderFields()) {
headers.emplace_back(field.DebugString());
}
return absl::StrCat("BinaryHttpMessage{Headers{", absl::StrJoin(headers, ";"),
"}Body{", body(), "}}");
}
std::string BinaryHttpMessage::Field::DebugString() const {
return absl::StrCat("Field{", name, "=", value, "}");
}
std::string BinaryHttpResponse::InformationalResponse::DebugString() const {
std::vector<std::string> fs;
for (const auto& field : fields()) {
fs.emplace_back(field.DebugString());
}
return absl::StrCat("InformationalResponse{", absl::StrJoin(fs, ";"), "}");
}
std::string BinaryHttpResponse::DebugString() const {
std::vector<std::string> irs;
for (const auto& ir : informational_responses()) {
irs.emplace_back(ir.DebugString());
}
return absl::StrCat("BinaryHttpResponse(", status_code_, "){",
BinaryHttpMessage::DebugString(), absl::StrJoin(irs, ";"),
"}");
}
std::string BinaryHttpRequest::DebugString() const {
return absl::StrCat("BinaryHttpRequest{", BinaryHttpMessage::DebugString(),
"}");
}
void PrintTo(const BinaryHttpRequest& msg, std::ostream* os) {
*os << msg.DebugString();
}
void PrintTo(const BinaryHttpResponse& msg, std::ostream* os) {
*os << msg.DebugString();
}
void PrintTo(const BinaryHttpMessage::Field& msg, std::ostream* os) {
*os << msg.DebugString();
}
} | #include "quiche/binary_http/binary_http_message.h"
#include <cstdint>
#include <memory>
#include <sstream>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "quiche/common/platform/api/quiche_test.h"
using ::testing::ContainerEq;
using ::testing::FieldsAre;
using ::testing::StrEq;
namespace quiche {
namespace {
std::string WordToBytes(uint32_t word) {
return std::string({static_cast<char>(word >> 24),
static_cast<char>(word >> 16),
static_cast<char>(word >> 8), static_cast<char>(word)});
}
template <class T>
void TestPrintTo(const T& resp) {
std::ostringstream os;
PrintTo(resp, &os);
EXPECT_EQ(os.str(), resp.DebugString());
}
}
TEST(BinaryHttpRequest, EncodeGetNoBody) {
BinaryHttpRequest request({"GET", "https", "www.example.com", "/hello.txt"});
request
.AddHeaderField({"User-Agent",
"curl/7.16.3 libcurl/7.16.3 OpenSSL/0.9.7l zlib/1.2.3"})
->AddHeaderField({"Host", "www.example.com"})
->AddHeaderField({"Accept-Language", "en, mi"});
const uint32_t expected_words[] = {
0x00034745, 0x54056874, 0x74707300, 0x0a2f6865, 0x6c6c6f2e, 0x74787440,
0x6c0a7573, 0x65722d61, 0x67656e74, 0x34637572, 0x6c2f372e, 0x31362e33,
0x206c6962, 0x6375726c, 0x2f372e31, 0x362e3320, 0x4f70656e, 0x53534c2f,
0x302e392e, 0x376c207a, 0x6c69622f, 0x312e322e, 0x3304686f, 0x73740f77,
0x77772e65, 0x78616d70, 0x6c652e63, 0x6f6d0f61, 0x63636570, 0x742d6c61,
0x6e677561, 0x67650665, 0x6e2c206d, 0x69000000};
std::string expected;
for (const auto& word : expected_words) {
expected += WordToBytes(word);
}
expected.resize(expected.size() - 2);
const auto result = request.Serialize();
ASSERT_TRUE(result.ok());
ASSERT_EQ(*result, expected);
EXPECT_THAT(
request.DebugString(),
StrEq("BinaryHttpRequest{BinaryHttpMessage{Headers{Field{user-agent=curl/"
"7.16.3 "
"libcurl/7.16.3 OpenSSL/0.9.7l "
"zlib/1.2.3};Field{host=www.example.com};Field{accept-language=en, "
"mi}}Body{}}}"));
TestPrintTo(request);
}
TEST(BinaryHttpRequest, DecodeGetNoBody) {
const uint32_t words[] = {
0x00034745, 0x54056874, 0x74707300, 0x0a2f6865, 0x6c6c6f2e, 0x74787440,
0x6c0a7573, 0x65722d61, 0x67656e74, 0x34637572, 0x6c2f372e, 0x31362e33,
0x206c6962, 0x6375726c, 0x2f372e31, 0x362e3320, 0x4f70656e, 0x53534c2f,
0x302e392e, 0x376c207a, 0x6c69622f, 0x312e322e, 0x3304686f, 0x73740f77,
0x77772e65, 0x78616d70, 0x6c652e63, 0x6f6d0f61, 0x63636570, 0x742d6c61,
0x6e677561, 0x67650665, 0x6e2c206d, 0x69000000};
std::string data;
for (const auto& word : words) {
data += WordToBytes(word);
}
data.resize(data.size() - 3);
const auto request_so = BinaryHttpRequest::Create(data);
ASSERT_TRUE(request_so.ok());
const BinaryHttpRequest request = *request_so;
ASSERT_THAT(request.control_data(),
FieldsAre("GET", "https", "", "/hello.txt"));
std::vector<BinaryHttpMessage::Field> expected_fields = {
{"user-agent", "curl/7.16.3 libcurl/7.16.3 OpenSSL/0.9.7l zlib/1.2.3"},
{"host", "www.example.com"},
{"accept-language", "en, mi"}};
for (const auto& field : expected_fields) {
TestPrintTo(field);
}
ASSERT_THAT(request.GetHeaderFields(), ContainerEq(expected_fields));
ASSERT_EQ(request.body(), "");
EXPECT_THAT(
request.DebugString(),
StrEq("BinaryHttpRequest{BinaryHttpMessage{Headers{Field{user-agent=curl/"
"7.16.3 "
"libcurl/7.16.3 OpenSSL/0.9.7l "
"zlib/1.2.3};Field{host=www.example.com};Field{accept-language=en, "
"mi}}Body{}}}"));
TestPrintTo(request);
}
TEST(BinaryHttpRequest, EncodeGetWithAuthority) {
BinaryHttpRequest request({"GET", "https", "www.example.com", "/hello.txt"});
request
.AddHeaderField({"User-Agent",
"curl/7.16.3 libcurl/7.16.3 OpenSSL/0.9.7l zlib/1.2.3"})
->AddHeaderField({"Accept-Language", "en, mi"});
const uint32_t expected_words[] = {
0x00034745, 0x54056874, 0x7470730f, 0x7777772e, 0x6578616d, 0x706c652e,
0x636f6d0a, 0x2f68656c, 0x6c6f2e74, 0x78744057, 0x0a757365, 0x722d6167,
0x656e7434, 0x6375726c, 0x2f372e31, 0x362e3320, 0x6c696263, 0x75726c2f,
0x372e3136, 0x2e33204f, 0x70656e53, 0x534c2f30, 0x2e392e37, 0x6c207a6c,
0x69622f31, 0x2e322e33, 0x0f616363, 0x6570742d, 0x6c616e67, 0x75616765,
0x06656e2c, 0x206d6900};
std::string expected;
for (const auto& word : expected_words) {
expected += WordToBytes(word);
}
const auto result = request.Serialize();
ASSERT_TRUE(result.ok());
ASSERT_EQ(*result, expected);
EXPECT_THAT(
request.DebugString(),
StrEq("BinaryHttpRequest{BinaryHttpMessage{Headers{Field{user-agent=curl/"
"7.16.3 libcurl/7.16.3 OpenSSL/0.9.7l "
"zlib/1.2.3};Field{accept-language=en, mi}}Body{}}}"));
}
TEST(BinaryHttpRequest, DecodeGetWithAuthority) {
const uint32_t words[] = {
0x00034745, 0x54056874, 0x7470730f, 0x7777772e, 0x6578616d, 0x706c652e,
0x636f6d0a, 0x2f68656c, 0x6c6f2e74, 0x78744057, 0x0a757365, 0x722d6167,
0x656e7434, 0x6375726c, 0x2f372e31, 0x362e3320, 0x6c696263, 0x75726c2f,
0x372e3136, 0x2e33204f, 0x70656e53, 0x534c2f30, 0x2e392e37, 0x6c207a6c,
0x69622f31, 0x2e322e33, 0x0f616363, 0x6570742d, 0x6c616e67, 0x75616765,
0x06656e2c, 0x206d6900, 0x00};
std::string data;
for (const auto& word : words) {
data += WordToBytes(word);
}
const auto request_so = BinaryHttpRequest::Create(data);
ASSERT_TRUE(request_so.ok());
const BinaryHttpRequest request = *request_so;
ASSERT_THAT(request.control_data(),
FieldsAre("GET", "https", "www.example.com", "/hello.txt"));
std::vector<BinaryHttpMessage::Field> expected_fields = {
{"user-agent", "curl/7.16.3 libcurl/7.16.3 OpenSSL/0.9.7l zlib/1.2.3"},
{"accept-language", "en, mi"}};
ASSERT_THAT(request.GetHeaderFields(), ContainerEq(expected_fields));
ASSERT_EQ(request.body(), "");
EXPECT_THAT(
request.DebugString(),
StrEq("BinaryHttpRequest{BinaryHttpMessage{Headers{Field{user-agent=curl/"
"7.16.3 libcurl/7.16.3 OpenSSL/0.9.7l "
"zlib/1.2.3};Field{accept-language=en, mi}}Body{}}}"));
}
TEST(BinaryHttpRequest, EncodePostBody) {
BinaryHttpRequest request({"POST", "https", "www.example.com", "/hello.txt"});
request.AddHeaderField({"User-Agent", "not/telling"})
->AddHeaderField({"Host", "www.example.com"})
->AddHeaderField({"Accept-Language", "en"})
->set_body({"Some body that I used to post.\r\n"});
const uint32_t expected_words[] = {
0x0004504f, 0x53540568, 0x74747073, 0x000a2f68, 0x656c6c6f, 0x2e747874,
0x3f0a7573, 0x65722d61, 0x67656e74, 0x0b6e6f74, 0x2f74656c, 0x6c696e67,
0x04686f73, 0x740f7777, 0x772e6578, 0x616d706c, 0x652e636f, 0x6d0f6163,
0x63657074, 0x2d6c616e, 0x67756167, 0x6502656e, 0x20536f6d, 0x6520626f,
0x64792074, 0x68617420, 0x49207573, 0x65642074, 0x6f20706f, 0x73742e0d,
0x0a000000};
std::string expected;
for (const auto& word : expected_words) {
expected += WordToBytes(word);
}
expected.resize(expected.size() - 3);
const auto result = request.Serialize();
ASSERT_TRUE(result.ok());
ASSERT_EQ(*result, expected);
EXPECT_THAT(
request.DebugString(),
StrEq("BinaryHttpRequest{BinaryHttpMessage{Headers{Field{user-agent=not/"
"telling};Field{host=www.example.com};Field{accept-language=en}}"
"Body{Some "
"body that I used to post.\r\n}}}"));
}
TEST(BinaryHttpRequest, DecodePostBody) {
const uint32_t words[] = {
0x0004504f, 0x53540568, 0x74747073, 0x000a2f68, 0x656c6c6f, 0x2e747874,
0x3f0a7573, 0x65722d61, 0x67656e74, 0x0b6e6f74, 0x2f74656c, 0x6c696e67,
0x04686f73, 0x740f7777, 0x772e6578, 0x616d706c, 0x652e636f, 0x6d0f6163,
0x63657074, 0x2d6c616e, 0x67756167, 0x6502656e, 0x20536f6d, 0x6520626f,
0x64792074, 0x68617420, 0x49207573, 0x65642074, 0x6f20706f, 0x73742e0d,
0x0a000000};
std::string data;
for (const auto& word : words) {
data += WordToBytes(word);
}
const auto request_so = BinaryHttpRequest::Create(data);
ASSERT_TRUE(request_so.ok());
BinaryHttpRequest request = *request_so;
ASSERT_THAT(request.control_data(),
FieldsAre("POST", "https", "", "/hello.txt"));
std::vector<BinaryHttpMessage::Field> expected_fields = {
{"user-agent", "not/telling"},
{"host", "www.example.com"},
{"accept-language", "en"}};
ASSERT_THAT(request.GetHeaderFields(), ContainerEq(expected_fields));
ASSERT_EQ(request.body(), "Some body that I used to post.\r\n");
EXPECT_THAT(
request.DebugString(),
StrEq("BinaryHttpRequest{BinaryHttpMessage{Headers{Field{user-agent=not/"
"telling};Field{host=www.example.com};Field{accept-language=en}}"
"Body{Some "
"body that I used to post.\r\n}}}"));
}
TEST(BinaryHttpRequest, Equality) {
BinaryHttpRequest request({"POST", "https", "www.example.com", "/hello.txt"});
request.AddHeaderField({"User-Agent", "not/telling"})
->set_body({"hello, world!\r\n"});
BinaryHttpRequest same({"POST", "https", "www.example.com", "/hello.txt"});
same.AddHeaderField({"User-Agent", "not/telling"})
->set_body({"hello, world!\r\n"});
EXPECT_EQ(request, same);
}
TEST(BinaryHttpRequest, Inequality) {
BinaryHttpRequest request({"POST", "https", "www.example.com", "/hello.txt"});
request.AddHeaderField({"User-Agent", "not/telling"})
->set_body({"hello, world!\r\n"});
BinaryHttpRequest different_control(
{"PUT", "https", "www.example.com", "/hello.txt"});
different_control.AddHeaderField({"User-Agent", "not/telling"})
->set_body({"hello, world!\r\n"});
EXPECT_NE(request, different_control);
BinaryHttpRequest different_header(
{"PUT", "https", "www.example.com", "/hello.txt"});
different_header.AddHeaderField({"User-Agent", "told/you"})
->set_body({"hello, world!\r\n"});
EXPECT_NE(request, different_header);
BinaryHttpRequest no_header(
{"PUT", "https", "www.example.com", "/hello.txt"});
no_header.set_body({"hello, world!\r\n"});
EXPECT_NE(request, no_header);
BinaryHttpRequest different_body(
{"POST", "https", "www.example.com", "/hello.txt"});
different_body.AddHeaderField({"User-Agent", "not/telling"})
->set_body({"goodbye, world!\r\n"});
EXPECT_NE(request, different_body);
BinaryHttpRequest no_body({"POST", "https", "www.example.com", "/hello.txt"});
no_body.AddHeaderField({"User-Agent", "not/telling"});
EXPECT_NE(request, no_body);
}
TEST(BinaryHttpResponse, EncodeNoBody) {
BinaryHttpResponse response(404);
response.AddHeaderField({"Server", "Apache"});
const uint32_t expected_words[] = {0x0141940e, 0x06736572, 0x76657206,
0x41706163, 0x68650000};
std::string expected;
for (const auto& word : expected_words) {
expected += WordToBytes(word);
}
expected.resize(expected.size() - 1);
const auto result = response.Serialize();
ASSERT_TRUE(result.ok());
ASSERT_EQ(*result, expected);
EXPECT_THAT(
response.DebugString(),
StrEq("BinaryHttpResponse(404){BinaryHttpMessage{Headers{Field{server="
"Apache}}Body{}}}"));
}
TEST(BinaryHttpResponse, DecodeNoBody) {
const uint32_t words[] = {0x0141940e, 0x06736572, 0x76657206, 0x41706163,
0x68650000};
std::string data;
for (const auto& word : words) {
data += WordToBytes(word);
}
const auto response_so = BinaryHttpResponse::Create(data);
ASSERT_TRUE(response_so.ok());
const BinaryHttpResponse response = *response_so;
ASSERT_EQ(response.status_code(), 404);
std::vector<BinaryHttpMessage::Field> expected_fields = {
{"server", "Apache"}};
ASSERT_THAT(response.GetHeaderFields(), ContainerEq(expected_fields));
ASSERT_EQ(response.body(), "");
ASSERT_TRUE(response.informational_responses().empty());
EXPECT_THAT(
response.DebugString(),
StrEq("BinaryHttpResponse(404){BinaryHttpMessage{Headers{Field{server="
"Apache}}Body{}}}"));
}
TEST(BinaryHttpResponse, EncodeBody) {
BinaryHttpResponse response(200);
response.AddHeaderField({"Server", "Apache"});
response.set_body("Hello, world!\r\n");
const uint32_t expected_words[] = {0x0140c80e, 0x06736572, 0x76657206,
0x41706163, 0x68650f48, 0x656c6c6f,
0x2c20776f, 0x726c6421, 0x0d0a0000};
std::string expected;
for (const auto& word : expected_words) {
expected += WordToBytes(word);
}
expected.resize(expected.size() - 2);
const auto result = response.Serialize();
ASSERT_TRUE(result.ok());
ASSERT_EQ(*result, expected);
EXPECT_THAT(
response.DebugString(),
StrEq("BinaryHttpResponse(200){BinaryHttpMessage{Headers{Field{server="
"Apache}}Body{Hello, world!\r\n}}}"));
}
TEST(BinaryHttpResponse, DecodeBody) {
const uint32_t words[] = {0x0140c80e, 0x06736572, 0x76657206,
0x41706163, 0x68650f48, 0x656c6c6f,
0x2c20776f, 0x726c6421, 0x0d0a0000};
std::string data;
for (const auto& word : words) {
data += WordToBytes(word);
}
const auto response_so = BinaryHttpResponse::Create(data);
ASSERT_TRUE(response_so.ok());
const BinaryHttpResponse response = *response_so;
ASSERT_EQ(response.status_code(), 200);
std::vector<BinaryHttpMessage::Field> expected_fields = {
{"server", "Apache"}};
ASSERT_THAT(response.GetHeaderFields(), ContainerEq(expected_fields));
ASSERT_EQ(response.body(), "Hello, world!\r\n");
ASSERT_TRUE(response.informational_responses().empty());
EXPECT_THAT(
response.DebugString(),
StrEq("BinaryHttpResponse(200){BinaryHttpMessage{Headers{Field{server="
"Apache}}Body{Hello, world!\r\n}}}"));
}
TEST(BHttpResponse, AddBadInformationalResponseCode) {
BinaryHttpResponse response(200);
ASSERT_FALSE(response.AddInformationalResponse(50, {}).ok());
ASSERT_FALSE(response.AddInformationalResponse(300, {}).ok());
}
TEST(BinaryHttpResponse, EncodeMultiInformationalWithBody) {
BinaryHttpResponse response(200);
response.AddHeaderField({"Date", "Mon, 27 Jul 2009 12:28:53 GMT"})
->AddHeaderField({"Server", "Apache"})
->AddHeaderField({"Last-Modified", "Wed, 22 Jul 2009 19:15:56 GMT"})
->AddHeaderField({"ETag", "\"34aa387-d-1568eb00\""})
->AddHeaderField({"Accept-Ranges", "bytes"})
->AddHeaderField({"Content-Length", "51"})
->AddHeaderField({"Vary", "Accept-Encoding"})
->AddHeaderField({"Content-Type", "text/plain"});
response.set_body("Hello World! My content includes a trailing CRLF.\r\n");
ASSERT_TRUE(
response.AddInformationalResponse(102, {{"Running", "\"sleep 15\""}})
.ok());
ASSERT_TRUE(response
.AddInformationalResponse(
103, {{"Link", "</style.css>; rel=preload; as=style"},
{"Link", "</script.js>; rel=preload; as=script"}})
.ok());
const uint32_t expected_words[] = {
0x01406613, 0x0772756e, 0x6e696e67, 0x0a22736c, 0x65657020, 0x31352240,
0x67405304, 0x6c696e6b, 0x233c2f73, 0x74796c65, 0x2e637373, 0x3e3b2072,
0x656c3d70, 0x72656c6f, 0x61643b20, 0x61733d73, 0x74796c65, 0x046c696e,
0x6b243c2f, 0x73637269, 0x70742e6a, 0x733e3b20, 0x72656c3d, 0x7072656c,
0x6f61643b, 0x2061733d, 0x73637269, 0x707440c8, 0x40ca0464, 0x6174651d,
0x4d6f6e2c, 0x20323720, 0x4a756c20, 0x32303039, 0x2031323a, 0x32383a35,
0x3320474d, 0x54067365, 0x72766572, 0x06417061, 0x6368650d, 0x6c617374,
0x2d6d6f64, 0x69666965, 0x641d5765, 0x642c2032, 0x32204a75, 0x6c203230,
0x30392031, 0x393a3135, 0x3a353620, 0x474d5404, 0x65746167, 0x14223334,
0x61613338, 0x372d642d, 0x31353638, 0x65623030, 0x220d6163, 0x63657074,
0x2d72616e, 0x67657305, 0x62797465, 0x730e636f, 0x6e74656e, 0x742d6c65,
0x6e677468, 0x02353104, 0x76617279, 0x0f416363, 0x6570742d, 0x456e636f,
0x64696e67, 0x0c636f6e, 0x74656e74, 0x2d747970, 0x650a7465, 0x78742f70,
0x6c61696e, 0x3348656c, 0x6c6f2057, 0x6f726c64, 0x21204d79, 0x20636f6e,
0x74656e74, 0x20696e63, 0x6c756465, 0x73206120, 0x74726169, 0x6c696e67,
0x2043524c, 0x462e0d0a};
std::string expected;
for (const auto& word : expected_words) {
expected += WordToBytes(word);
}
const auto result = response.Serialize();
ASSERT_TRUE(result.ok());
ASSERT_EQ(*result, expected);
EXPECT_THAT(
response.DebugString(),
StrEq(
"BinaryHttpResponse(200){BinaryHttpMessage{Headers{Field{date=Mon, "
"27 Jul 2009 12:28:53 "
"GMT};Field{server=Apache};Field{last-modified=Wed, 22 Jul 2009 "
"19:15:56 "
"GMT};Field{etag=\"34aa387-d-1568eb00\"};Field{accept-ranges=bytes};"
"Field{"
"content-length=51};Field{vary=Accept-Encoding};Field{content-type="
"text/plain}}Body{Hello World! My content includes a trailing "
"CRLF.\r\n}}InformationalResponse{Field{running=\"sleep "
"15\"}};InformationalResponse{Field{link=</style.css>; rel=preload; "
"as=style};Field{link=</script.js>; rel=preload; as=script}}}"));
TestPrintTo(response);
}
TEST(BinaryHttpResponse, DecodeMultiInformationalWithBody) {
const uint32_t words[] = {
0x01406613, 0x0772756e, 0x6e696e67, 0x0a22736c, 0x65657020, 0x31352240,
0x67405304, 0x6c696e6b, 0x233c2f73, 0x74796c65, 0x2e637373, 0x3e3b2072,
0x656c3d70, 0x72656c6f, 0x61643b20, 0x61733d73, 0x74796c65, 0x046c696e,
0x6b243c2f, 0x73637269, 0x70742e6a, 0x733e3b20, 0x72656c3d, 0x7072656c,
0x6f61643b, 0x2061733d, 0x73637269, 0x707440c8, 0x40ca0464, 0x6174651d,
0x4d6f6e2c, 0x20323720, 0x4a756c20, 0x32303039, 0x2031323a, 0x32383a35,
0x3320474d, 0x54067365, 0x72766572, 0x06417061, 0x6368650d, 0x6c617374,
0x2d6d6f64, 0x69666965, 0x641d5765, 0x642c2032, 0x32204a75, 0x6c203230,
0x30392031, 0x393a3135, 0x3a353620, 0x474d5404, 0x65746167, 0x14223334,
0x61613338, 0x372d642d, 0x31353638, 0x65623030, 0x220d6163, 0x63657074,
0x2d72616e, 0x67657305, 0x62797465, 0x730e636f, 0x6e74656e, 0x742d6c65,
0x6e677468, 0x02353104, 0x76617279, 0x0f416363, 0x6570742d, 0x456e636f,
0x64696e67, 0x0c636f6e, 0x74656e74, 0x2d747970, 0x650a7465, 0x78742f70,
0x6c61696e, 0x3348656c, 0x6c6f2057, 0x6f726c64, 0x21204d79, 0x20636f6e,
0x74656e74, 0x20696e63, 0x6c756465, 0x73206120, 0x74726169, 0x6c696e67,
0x2043524c, 0x462e0d0a, 0x00000000};
std::string data;
for (const auto& word : words) {
data += WordToBytes(word);
}
const auto response_so = BinaryHttpResponse::Create(data);
ASSERT_TRUE(response_so.ok());
const BinaryHttpResponse response = *response_so;
std::vector<BinaryHttpMessage::Field> expected_fields = {
{"date", "Mon, 27 Jul 2009 12:28:53 GMT"},
{"server", "Apache"},
{"last-modified", "Wed, 22 Jul 2009 19:15:56 GMT"},
{"etag", "\"34aa387-d-1568eb00\""},
{"accept-ranges", "bytes"},
{"content-length", "51"},
{"vary", "Accept-Encoding"},
{"content-type", "text/plain"}};
ASSERT_THAT(response.GetHeaderFields(), ContainerEq(expected_fields));
ASSERT_EQ(response.body(),
"Hello World! My content includes a trailing CRLF.\r\n");
std::vector<BinaryHttpMessage::Field> header102 = {
{"running", "\"sleep 15\""}};
std::vector<BinaryHttpMessage::Field> header103 = {
{"link", "</style.css>; rel=preload; as=style"},
{"link", "</script.js>; rel=preload; as=script"}};
std::vector<BinaryHttpResponse::InformationalResponse> expected_control = {
{102, header102}, {103, header103}};
ASSERT_THAT(response.informational_responses(),
ContainerEq(expected_control));
EXPECT_THAT(
response.DebugString(),
StrEq(
"BinaryHttpResponse(200){BinaryHttpMessage{Headers{Field{date=Mon, "
"27 Jul 2009 12:28:53 "
"GMT};Field{server=Apache};Field{last-modified=Wed, 22 Jul 2009 "
"19:15:56 "
"GMT};Field{etag=\"34aa387-d-1568eb00\"};Field{accept-ranges=bytes};"
"Field{"
"content-length=51};Field{vary=Accept-Encoding};Field{content-type="
"text/plain}}Body{Hello World! My content includes a trailing "
"CRLF.\r\n}}InformationalResponse{Field{running=\"sleep "
"15\"}};InformationalResponse{Field{link=</style.css>; rel=preload; "
"as=style};Field{link=</script.js>; rel=preload; as=script}}}"));
TestPrintTo(response);
}
TEST(BinaryHttpMessage, SwapBody) {
BinaryHttpRequest request({});
request.set_body("hello, world!");
std::string other = "goodbye, world!";
request.swap_body(other);
EXPECT_EQ(request.body(), "goodbye, world!");
EXPECT_EQ(other, "hello, world!");
}
TEST(BinaryHttpResponse, Equality) {
BinaryHttpResponse response(200);
response.AddHeaderField({"Server", "Apache"})->set_body("Hello, world!\r\n");
ASSERT_TRUE(
response.AddInformationalResponse(102, {{"Running", "\"sleep 15\""}})
.ok());
BinaryHttpResponse same(200);
same.AddHeaderField({"Server", "Apache"})->set_body("Hello, world!\r\n");
ASSERT_TRUE(
same.AddInformationalResponse(102, {{"Running", "\"sleep 15\""}}).ok());
ASSERT_EQ(response, same);
}
TEST(BinaryHttpResponse, Inequality) {
BinaryHttpResponse response(200);
response.AddHeaderField({"Server", "Apache"})->set_body("Hello, world!\r\n");
ASSERT_TRUE(
response.AddInformationalResponse(102, {{"Running", "\"sleep 15\""}})
.ok());
BinaryHttpResponse different_status(201);
different_status.AddHeaderField({"Server", "Apache"})
->set_body("Hello, world!\r\n");
EXPECT_TRUE(different_status
.AddInformationalResponse(102, {{"Running", "\"sleep 15\""}})
.ok());
EXPECT_NE(response, different_status);
BinaryHttpResponse different_header(200);
different_header.AddHeaderField({"Server", "python3"})
->set_body("Hello, world!\r\n");
EXPECT_TRUE(different_header
.AddInformationalResponse(102, {{"Running", "\"sleep 15\""}})
.ok());
EXPECT_NE(response, different_header);
BinaryHttpResponse no_header(200);
no_header.set_body("Hello, world!\r\n");
EXPECT_TRUE(
no_header.AddInformationalResponse(102, {{"Running", "\"sleep 15\""}})
.ok());
EXPECT_NE(response, no_header);
BinaryHttpResponse different_body(200);
different_body.AddHeaderField({"Server", "Apache"})
->set_body("Goodbye, world!\r\n");
EXPECT_TRUE(different_body
.AddInformationalResponse(102, {{"Running", "\"sleep 15\""}})
.ok());
EXPECT_NE(response, different_body);
BinaryHttpResponse no_body(200);
no_body.AddHeaderField({"Server", "Apache"});
EXPECT_TRUE(
no_body.AddInformationalResponse(102, {{"Running", "\"sleep 15\""}})
.ok());
EXPECT_NE(response, no_body);
BinaryHttpResponse different_informational(200);
different_informational.AddHeaderField({"Server", "Apache"})
->set_body("Hello, world!\r\n");
EXPECT_TRUE(different_informational
.AddInformationalResponse(198, {{"Running", "\"sleep 15\""}})
.ok());
EXPECT_NE(response, different_informational);
BinaryHttpResponse no_informational(200);
no_informational.AddHeaderField({"Server", "Apache"})
->set_body("Hello, world!\r\n");
EXPECT_NE(response, no_informational);
}
MATCHER_P(HasEqPayload, value, "Payloads of messages are equivalent.") {
return arg.IsPayloadEqual(value);
}
template <typename T>
void TestPadding(T& message) {
const auto data_so = message.Serialize();
ASSERT_TRUE(data_so.ok());
auto data = *data_so;
ASSERT_EQ(data.size(), message.EncodedSize());
message.set_num_padding_bytes(10);
const auto padded_data_so = message.Serialize();
ASSERT_TRUE(padded_data_so.ok());
const auto padded_data = *padded_data_so;
ASSERT_EQ(padded_data.size(), message.EncodedSize());
ASSERT_EQ(data.size() + 10, padded_data.size());
data.resize(data.size() + 10);
ASSERT_EQ(data, padded_data);
const auto deserialized_padded_message_so = T::Create(data);
ASSERT_TRUE(deserialized_padded_message_so.ok());
const auto deserialized_padded_message = *deserialized_padded_message_so;
ASSERT_EQ(deserialized_padded_message, message);
ASSERT_EQ(deserialized_padded_message.num_padding_bytes(), size_t(10));
data[data.size() - 1] = 'a';
const auto bad_so = T::Create(data);
ASSERT_FALSE(bad_so.ok());
data.resize(data.size() - 10);
const auto deserialized_message_so = T::Create(data);
ASSERT_TRUE(deserialized_message_so.ok());
const auto deserialized_message = *deserialized_message_so;
ASSERT_EQ(deserialized_message.num_padding_bytes(), size_t(0));
ASSERT_THAT(deserialized_message, HasEqPayload(deserialized_padded_message));
ASSERT_NE(deserialized_message, deserialized_padded_message);
}
TEST(BinaryHttpRequest, Padding) {
BinaryHttpRequest request({"GET", "https", "", "/hello.txt"});
request
.AddHeaderField({"User-Agent",
"curl/7.16.3 libcurl/7.16.3 OpenSSL/0.9.7l zlib/1.2.3"})
->AddHeaderField({"Host", "www.example.com"})
->AddHeaderField({"Accept-Language", "en, mi"});
TestPadding(request);
}
TEST(BinaryHttpResponse, Padding) {
BinaryHttpResponse response(200);
response.AddHeaderField({"Server", "Apache"});
response.set_body("Hello, world!\r\n");
TestPadding(response);
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/binary_http/binary_http_message.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/binary_http/binary_http_message_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
7d65e279-385d-403d-ae70-cdd9bebf5336 | cpp | google/quiche | spdy_framer | quiche/http2/core/spdy_framer.cc | quiche/http2/core/spdy_framer_test.cc | #include "quiche/http2/core/spdy_framer.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/memory/memory.h"
#include "quiche/http2/core/spdy_alt_svc_wire_format.h"
#include "quiche/http2/core/spdy_frame_builder.h"
#include "quiche/http2/core/spdy_protocol.h"
#include "quiche/http2/core/zero_copy_output_buffer.h"
#include "quiche/http2/hpack/hpack_constants.h"
#include "quiche/http2/hpack/hpack_encoder.h"
#include "quiche/common/http/http_header_block.h"
#include "quiche/common/platform/api/quiche_bug_tracker.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace spdy {
namespace {
uint32_t PackStreamDependencyValues(bool exclusive,
SpdyStreamId parent_stream_id) {
uint32_t parent = parent_stream_id & 0x7fffffff;
uint32_t e_bit = exclusive ? 0x80000000 : 0;
return parent | e_bit;
}
const uint8_t kNoFlags = 0;
const size_t kPadLengthFieldSize = 1;
const size_t kOneSettingParameterSize = 6;
size_t GetUncompressedSerializedLength(const quiche::HttpHeaderBlock& headers) {
const size_t num_name_value_pairs_size = sizeof(uint32_t);
const size_t length_of_name_size = num_name_value_pairs_size;
const size_t length_of_value_size = num_name_value_pairs_size;
size_t total_length = num_name_value_pairs_size;
for (const auto& header : headers) {
total_length += length_of_name_size + header.first.size() +
length_of_value_size + header.second.size();
}
return total_length;
}
uint8_t SerializeHeaderFrameFlags(const SpdyHeadersIR& header_ir,
const bool end_headers) {
uint8_t flags = 0;
if (header_ir.fin()) {
flags |= CONTROL_FLAG_FIN;
}
if (end_headers) {
flags |= HEADERS_FLAG_END_HEADERS;
}
if (header_ir.padded()) {
flags |= HEADERS_FLAG_PADDED;
}
if (header_ir.has_priority()) {
flags |= HEADERS_FLAG_PRIORITY;
}
return flags;
}
uint8_t SerializePushPromiseFrameFlags(const SpdyPushPromiseIR& push_promise_ir,
const bool end_headers) {
uint8_t flags = 0;
if (push_promise_ir.padded()) {
flags = flags | PUSH_PROMISE_FLAG_PADDED;
}
if (end_headers) {
flags |= PUSH_PROMISE_FLAG_END_PUSH_PROMISE;
}
return flags;
}
bool SerializeHeadersGivenEncoding(const SpdyHeadersIR& headers,
const std::string& encoding,
const bool end_headers,
ZeroCopyOutputBuffer* output) {
const size_t frame_size =
GetHeaderFrameSizeSansBlock(headers) + encoding.size();
SpdyFrameBuilder builder(frame_size, output);
bool ret = builder.BeginNewFrame(
SpdyFrameType::HEADERS, SerializeHeaderFrameFlags(headers, end_headers),
headers.stream_id(), frame_size - kFrameHeaderSize);
QUICHE_DCHECK_EQ(kFrameHeaderSize, builder.length());
if (ret && headers.padded()) {
ret &= builder.WriteUInt8(headers.padding_payload_len());
}
if (ret && headers.has_priority()) {
int weight = ClampHttp2Weight(headers.weight());
ret &= builder.WriteUInt32(PackStreamDependencyValues(
headers.exclusive(), headers.parent_stream_id()));
ret &= builder.WriteUInt8(weight - 1);
}
if (ret) {
ret &= builder.WriteBytes(encoding.data(), encoding.size());
}
if (ret && headers.padding_payload_len() > 0) {
std::string padding(headers.padding_payload_len(), 0);
ret &= builder.WriteBytes(padding.data(), padding.length());
}
if (!ret) {
QUICHE_DLOG(WARNING)
<< "Failed to build HEADERS. Not enough space in output";
}
return ret;
}
bool SerializePushPromiseGivenEncoding(const SpdyPushPromiseIR& push_promise,
const std::string& encoding,
const bool end_headers,
ZeroCopyOutputBuffer* output) {
const size_t frame_size =
GetPushPromiseFrameSizeSansBlock(push_promise) + encoding.size();
SpdyFrameBuilder builder(frame_size, output);
bool ok = builder.BeginNewFrame(
SpdyFrameType::PUSH_PROMISE,
SerializePushPromiseFrameFlags(push_promise, end_headers),
push_promise.stream_id(), frame_size - kFrameHeaderSize);
if (push_promise.padded()) {
ok = ok && builder.WriteUInt8(push_promise.padding_payload_len());
}
ok = ok && builder.WriteUInt32(push_promise.promised_stream_id()) &&
builder.WriteBytes(encoding.data(), encoding.size());
if (ok && push_promise.padding_payload_len() > 0) {
std::string padding(push_promise.padding_payload_len(), 0);
ok = builder.WriteBytes(padding.data(), padding.length());
}
QUICHE_DLOG_IF(ERROR, !ok)
<< "Failed to write PUSH_PROMISE encoding, not enough "
<< "space in output";
return ok;
}
bool WritePayloadWithContinuation(SpdyFrameBuilder* builder,
const std::string& hpack_encoding,
SpdyStreamId stream_id, SpdyFrameType type,
int padding_payload_len) {
uint8_t end_flag = 0;
uint8_t flags = 0;
if (type == SpdyFrameType::HEADERS) {
end_flag = HEADERS_FLAG_END_HEADERS;
} else if (type == SpdyFrameType::PUSH_PROMISE) {
end_flag = PUSH_PROMISE_FLAG_END_PUSH_PROMISE;
} else {
QUICHE_DLOG(FATAL) << "CONTINUATION frames cannot be used with frame type "
<< FrameTypeToString(type);
}
size_t bytes_remaining = 0;
bytes_remaining = hpack_encoding.size() -
std::min(hpack_encoding.size(),
kHttp2MaxControlFrameSendSize - builder->length() -
padding_payload_len);
bool ret = builder->WriteBytes(&hpack_encoding[0],
hpack_encoding.size() - bytes_remaining);
if (padding_payload_len > 0) {
std::string padding = std::string(padding_payload_len, 0);
ret &= builder->WriteBytes(padding.data(), padding.length());
}
while (bytes_remaining > 0 && ret) {
size_t bytes_to_write =
std::min(bytes_remaining,
kHttp2MaxControlFrameSendSize - kContinuationFrameMinimumSize);
if (bytes_remaining == bytes_to_write) {
flags |= end_flag;
}
ret &= builder->BeginNewFrame(SpdyFrameType::CONTINUATION, flags, stream_id,
bytes_to_write);
ret &= builder->WriteBytes(
&hpack_encoding[hpack_encoding.size() - bytes_remaining],
bytes_to_write);
bytes_remaining -= bytes_to_write;
}
return ret;
}
void SerializeDataBuilderHelper(const SpdyDataIR& data_ir, uint8_t* flags,
int* num_padding_fields,
size_t* size_with_padding) {
if (data_ir.fin()) {
*flags = DATA_FLAG_FIN;
}
if (data_ir.padded()) {
*flags = *flags | DATA_FLAG_PADDED;
++*num_padding_fields;
}
*size_with_padding = *num_padding_fields + data_ir.data_len() +
data_ir.padding_payload_len() + kDataFrameMinimumSize;
}
void SerializeDataFrameHeaderWithPaddingLengthFieldBuilderHelper(
const SpdyDataIR& data_ir, uint8_t* flags, size_t* frame_size,
size_t* num_padding_fields) {
*flags = DATA_FLAG_NONE;
if (data_ir.fin()) {
*flags = DATA_FLAG_FIN;
}
*frame_size = kDataFrameMinimumSize;
if (data_ir.padded()) {
*flags = *flags | DATA_FLAG_PADDED;
++(*num_padding_fields);
*frame_size = *frame_size + *num_padding_fields;
}
}
void SerializeSettingsBuilderHelper(const SpdySettingsIR& settings,
uint8_t* flags, const SettingsMap* values,
size_t* size) {
if (settings.is_ack()) {
*flags = *flags | SETTINGS_FLAG_ACK;
}
*size =
kSettingsFrameMinimumSize + (values->size() * kOneSettingParameterSize);
}
void SerializeAltSvcBuilderHelper(const SpdyAltSvcIR& altsvc_ir,
std::string* value, size_t* size) {
*size = kGetAltSvcFrameMinimumSize;
*size = *size + altsvc_ir.origin().length();
*value = SpdyAltSvcWireFormat::SerializeHeaderFieldValue(
altsvc_ir.altsvc_vector());
*size = *size + value->length();
}
}
SpdyFramer::SpdyFramer(CompressionOption option)
: debug_visitor_(nullptr), compression_option_(option) {
static_assert(kHttp2MaxControlFrameSendSize <= kHttp2DefaultFrameSizeLimit,
"Our send limit should be at most our receive limit.");
}
SpdyFramer::~SpdyFramer() = default;
void SpdyFramer::set_debug_visitor(
SpdyFramerDebugVisitorInterface* debug_visitor) {
debug_visitor_ = debug_visitor;
}
SpdyFramer::SpdyFrameIterator::SpdyFrameIterator(SpdyFramer* framer)
: framer_(framer), is_first_frame_(true), has_next_frame_(true) {}
SpdyFramer::SpdyFrameIterator::~SpdyFrameIterator() = default;
size_t SpdyFramer::SpdyFrameIterator::NextFrame(ZeroCopyOutputBuffer* output) {
const SpdyFrameIR* frame_ir = GetIR();
if (!has_next_frame_ || frame_ir == nullptr) {
QUICHE_BUG(spdy_bug_75_1)
<< "SpdyFramer::SpdyFrameIterator::NextFrame called without "
<< "a next frame.";
return false;
}
const size_t size_without_block =
is_first_frame_ ? GetFrameSizeSansBlock() : kContinuationFrameMinimumSize;
std::string encoding =
encoder_->Next(kHttp2MaxControlFrameSendSize - size_without_block);
has_next_frame_ = encoder_->HasNext();
if (framer_->debug_visitor_ != nullptr) {
const auto& frame_ref =
static_cast<const SpdyFrameWithHeaderBlockIR&>(*frame_ir);
const size_t header_list_size =
GetUncompressedSerializedLength(frame_ref.header_block());
framer_->debug_visitor_->OnSendCompressedFrame(
frame_ref.stream_id(),
is_first_frame_ ? frame_ref.frame_type() : SpdyFrameType::CONTINUATION,
header_list_size, size_without_block + encoding.size());
}
const size_t free_bytes_before = output->BytesFree();
bool ok = false;
if (is_first_frame_) {
is_first_frame_ = false;
ok = SerializeGivenEncoding(encoding, output);
} else {
SpdyContinuationIR continuation_ir(frame_ir->stream_id());
continuation_ir.take_encoding(std::move(encoding));
continuation_ir.set_end_headers(!has_next_frame_);
ok = framer_->SerializeContinuation(continuation_ir, output);
}
return ok ? free_bytes_before - output->BytesFree() : 0;
}
bool SpdyFramer::SpdyFrameIterator::HasNextFrame() const {
return has_next_frame_;
}
SpdyFramer::SpdyHeaderFrameIterator::SpdyHeaderFrameIterator(
SpdyFramer* framer, std::unique_ptr<const SpdyHeadersIR> headers_ir)
: SpdyFrameIterator(framer), headers_ir_(std::move(headers_ir)) {
SetEncoder(headers_ir_.get());
}
SpdyFramer::SpdyHeaderFrameIterator::~SpdyHeaderFrameIterator() = default;
const SpdyFrameIR* SpdyFramer::SpdyHeaderFrameIterator::GetIR() const {
return headers_ir_.get();
}
size_t SpdyFramer::SpdyHeaderFrameIterator::GetFrameSizeSansBlock() const {
return GetHeaderFrameSizeSansBlock(*headers_ir_);
}
bool SpdyFramer::SpdyHeaderFrameIterator::SerializeGivenEncoding(
const std::string& encoding, ZeroCopyOutputBuffer* output) const {
return SerializeHeadersGivenEncoding(*headers_ir_, encoding,
!has_next_frame(), output);
}
SpdyFramer::SpdyPushPromiseFrameIterator::SpdyPushPromiseFrameIterator(
SpdyFramer* framer,
std::unique_ptr<const SpdyPushPromiseIR> push_promise_ir)
: SpdyFrameIterator(framer), push_promise_ir_(std::move(push_promise_ir)) {
SetEncoder(push_promise_ir_.get());
}
SpdyFramer::SpdyPushPromiseFrameIterator::~SpdyPushPromiseFrameIterator() =
default;
const SpdyFrameIR* SpdyFramer::SpdyPushPromiseFrameIterator::GetIR() const {
return push_promise_ir_.get();
}
size_t SpdyFramer::SpdyPushPromiseFrameIterator::GetFrameSizeSansBlock() const {
return GetPushPromiseFrameSizeSansBlock(*push_promise_ir_);
}
bool SpdyFramer::SpdyPushPromiseFrameIterator::SerializeGivenEncoding(
const std::string& encoding, ZeroCopyOutputBuffer* output) const {
return SerializePushPromiseGivenEncoding(*push_promise_ir_, encoding,
!has_next_frame(), output);
}
SpdyFramer::SpdyControlFrameIterator::SpdyControlFrameIterator(
SpdyFramer* framer, std::unique_ptr<const SpdyFrameIR> frame_ir)
: framer_(framer), frame_ir_(std::move(frame_ir)) {}
SpdyFramer::SpdyControlFrameIterator::~SpdyControlFrameIterator() = default;
size_t SpdyFramer::SpdyControlFrameIterator::NextFrame(
ZeroCopyOutputBuffer* output) {
size_t size_written = framer_->SerializeFrame(*frame_ir_, output);
has_next_frame_ = false;
return size_written;
}
bool SpdyFramer::SpdyControlFrameIterator::HasNextFrame() const {
return has_next_frame_;
}
const SpdyFrameIR* SpdyFramer::SpdyControlFrameIterator::GetIR() const {
return frame_ir_.get();
}
std::unique_ptr<SpdyFrameSequence> SpdyFramer::CreateIterator(
SpdyFramer* framer, std::unique_ptr<const SpdyFrameIR> frame_ir) {
switch (frame_ir->frame_type()) {
case SpdyFrameType::HEADERS: {
return std::make_unique<SpdyHeaderFrameIterator>(
framer, absl::WrapUnique(
static_cast<const SpdyHeadersIR*>(frame_ir.release())));
}
case SpdyFrameType::PUSH_PROMISE: {
return std::make_unique<SpdyPushPromiseFrameIterator>(
framer, absl::WrapUnique(static_cast<const SpdyPushPromiseIR*>(
frame_ir.release())));
}
case SpdyFrameType::DATA: {
QUICHE_DVLOG(1) << "Serialize a stream end DATA frame for VTL";
ABSL_FALLTHROUGH_INTENDED;
}
default: {
return std::make_unique<SpdyControlFrameIterator>(framer,
std::move(frame_ir));
}
}
}
SpdySerializedFrame SpdyFramer::SerializeData(const SpdyDataIR& data_ir) {
uint8_t flags = DATA_FLAG_NONE;
int num_padding_fields = 0;
size_t size_with_padding = 0;
SerializeDataBuilderHelper(data_ir, &flags, &num_padding_fields,
&size_with_padding);
SpdyFrameBuilder builder(size_with_padding);
builder.BeginNewFrame(SpdyFrameType::DATA, flags, data_ir.stream_id());
if (data_ir.padded()) {
builder.WriteUInt8(data_ir.padding_payload_len() & 0xff);
}
builder.WriteBytes(data_ir.data(), data_ir.data_len());
if (data_ir.padding_payload_len() > 0) {
std::string padding(data_ir.padding_payload_len(), 0);
builder.WriteBytes(padding.data(), padding.length());
}
QUICHE_DCHECK_EQ(size_with_padding, builder.length());
return builder.take();
}
SpdySerializedFrame SpdyFramer::SerializeDataFrameHeaderWithPaddingLengthField(
const SpdyDataIR& data_ir) {
uint8_t flags = DATA_FLAG_NONE;
size_t frame_size = 0;
size_t num_padding_fields = 0;
SerializeDataFrameHeaderWithPaddingLengthFieldBuilderHelper(
data_ir, &flags, &frame_size, &num_padding_fields);
SpdyFrameBuilder builder(frame_size);
builder.BeginNewFrame(
SpdyFrameType::DATA, flags, data_ir.stream_id(),
num_padding_fields + data_ir.data_len() + data_ir.padding_payload_len());
if (data_ir.padded()) {
builder.WriteUInt8(data_ir.padding_payload_len() & 0xff);
}
QUICHE_DCHECK_EQ(frame_size, builder.length());
return builder.take();
}
SpdySerializedFrame SpdyFramer::SerializeRstStream(
const SpdyRstStreamIR& rst_stream) const {
size_t expected_length = kRstStreamFrameSize;
SpdyFrameBuilder builder(expected_length);
builder.BeginNewFrame(SpdyFrameType::RST_STREAM, 0, rst_stream.stream_id());
builder.WriteUInt32(rst_stream.error_code());
QUICHE_DCHECK_EQ(expected_length, builder.length());
return builder.take();
}
SpdySerializedFrame SpdyFramer::SerializeSettings(
const SpdySettingsIR& settings) const {
uint8_t flags = 0;
size_t size = 0;
const SettingsMap* values = &(settings.values());
SerializeSettingsBuilderHelper(settings, &flags, values, &size);
SpdyFrameBuilder builder(size);
builder.BeginNewFrame(SpdyFrameType::SETTINGS, flags, 0);
if (settings.is_ack()) {
return builder.take();
}
QUICHE_DCHECK_EQ(kSettingsFrameMinimumSize, builder.length());
for (auto it = values->begin(); it != values->end(); ++it) {
int setting_id = it->first;
QUICHE_DCHECK_GE(setting_id, 0);
builder.WriteUInt16(static_cast<SpdySettingsId>(setting_id));
builder.WriteUInt32(it->second);
}
QUICHE_DCHECK_EQ(size, builder.length());
return builder.take();
}
SpdySerializedFrame SpdyFramer::SerializePing(const SpdyPingIR& ping) const {
SpdyFrameBuilder builder(kPingFrameSize);
uint8_t flags = 0;
if (ping.is_ack()) {
flags |= PING_FLAG_ACK;
}
builder.BeginNewFrame(SpdyFrameType::PING, flags, 0);
builder.WriteUInt64(ping.id());
QUICHE_DCHECK_EQ(kPingFrameSize, builder.length());
return builder.take();
}
SpdySerializedFrame SpdyFramer::SerializeGoAway(
const SpdyGoAwayIR& goaway) const {
size_t expected_length = kGoawayFrameMinimumSize;
expected_length += goaway.description().size();
SpdyFrameBuilder builder(expected_length);
builder.BeginNewFrame(SpdyFrameType::GOAWAY, 0, 0);
builder.WriteUInt32(goaway.last_good_stream_id());
builder.WriteUInt32(goaway.error_code());
if (!goaway.description().empty()) {
builder.WriteBytes(goaway.description().data(),
goaway.description().size());
}
QUICHE_DCHECK_EQ(expected_length, builder.length());
return builder.take();
}
void SpdyFramer::SerializeHeadersBuilderHelper(const SpdyHeadersIR& headers,
uint8_t* flags, size_t* size,
std::string* hpack_encoding,
int* weight,
size_t* length_field) {
if (headers.fin()) {
*flags = *flags | CONTROL_FLAG_FIN;
}
*flags = *flags | HEADERS_FLAG_END_HEADERS;
if (headers.has_priority()) {
*flags = *flags | HEADERS_FLAG_PRIORITY;
}
if (headers.padded()) {
*flags = *flags | HEADERS_FLAG_PADDED;
}
*size = kHeadersFrameMinimumSize;
if (headers.padded()) {
*size = *size + kPadLengthFieldSize;
*size = *size + headers.padding_payload_len();
}
if (headers.has_priority()) {
*weight = ClampHttp2Weight(headers.weight());
*size = *size + 5;
}
*hpack_encoding =
GetHpackEncoder()->EncodeHeaderBlock(headers.header_block());
*size = *size + hpack_encoding->size();
if (*size > kHttp2MaxControlFrameSendSize) {
*size = *size + GetNumberRequiredContinuationFrames(*size) *
kContinuationFrameMinimumSize;
*flags = *flags & ~HEADERS_FLAG_END_HEADERS;
}
if (headers.padded()) {
*length_field = *length_field + kPadLengthFieldSize;
}
if (headers.has_priority()) {
*length_field = *length_field + 4;
*length_field = *length_field + 1;
}
*length_field = *length_field + headers.padding_payload_len();
*length_field = *length_field + hpack_encoding->size();
*length_field =
std::min(*length_field, kHttp2MaxControlFrameSendSize - kFrameHeaderSize);
}
SpdySerializedFrame SpdyFramer::SerializeHeaders(const SpdyHeadersIR& headers) {
uint8_t flags = 0;
size_t size = 0;
std::string hpack_encoding;
int weight = 0;
size_t length_field = 0;
SerializeHeadersBuilderHelper(headers, &flags, &size, &hpack_encoding,
&weight, &length_field);
SpdyFrameBuilder builder(size);
builder.BeginNewFrame(SpdyFrameType::HEADERS, flags, headers.stream_id(),
length_field);
QUICHE_DCHECK_EQ(kHeadersFrameMinimumSize, builder.length());
int padding_payload_len = 0;
if (headers.padded()) {
builder.WriteUInt8(headers.padding_payload_len());
padding_payload_len = headers.padding_payload_len();
}
if (headers.has_priority()) {
builder.WriteUInt32(PackStreamDependencyValues(headers.exclusive(),
headers.parent_stream_id()));
builder.WriteUInt8(weight - 1);
}
WritePayloadWithContinuation(&builder, hpack_encoding, headers.stream_id(),
SpdyFrameType::HEADERS, padding_payload_len);
if (debug_visitor_) {
const size_t header_list_size =
GetUncompressedSerializedLength(headers.header_block());
debug_visitor_->OnSendCompressedFrame(headers.stream_id(),
SpdyFrameType::HEADERS,
header_list_size, builder.length());
}
return builder.take();
}
SpdySerializedFrame SpdyFramer::SerializeWindowUpdate(
const SpdyWindowUpdateIR& window_update) {
SpdyFrameBuilder builder(kWindowUpdateFrameSize);
builder.BeginNewFrame(SpdyFrameType::WINDOW_UPDATE, kNoFlags,
window_update.stream_id());
builder.WriteUInt32(window_update.delta());
QUICHE_DCHECK_EQ(kWindowUpdateFrameSize, builder.length());
return builder.take();
}
void SpdyFramer::SerializePushPromiseBuilderHelper(
const SpdyPushPromiseIR& push_promise, uint8_t* flags,
std::string* hpack_encoding, size_t* size) {
*flags = 0;
*flags = *flags | PUSH_PROMISE_FLAG_END_PUSH_PROMISE;
*size = kPushPromiseFrameMinimumSize;
if (push_promise.padded()) {
*flags = *flags | PUSH_PROMISE_FLAG_PADDED;
*size = *size + kPadLengthFieldSize;
*size = *size + push_promise.padding_payload_len();
}
*hpack_encoding =
GetHpackEncoder()->EncodeHeaderBlock(push_promise.header_block());
*size = *size + hpack_encoding->size();
if (*size > kHttp2MaxControlFrameSendSize) {
*size = *size + GetNumberRequiredContinuationFrames(*size) *
kContinuationFrameMinimumSize;
*flags = *flags & ~PUSH_PROMISE_FLAG_END_PUSH_PROMISE;
}
}
SpdySerializedFrame SpdyFramer::SerializePushPromise(
const SpdyPushPromiseIR& push_promise) {
uint8_t flags = 0;
size_t size = 0;
std::string hpack_encoding;
SerializePushPromiseBuilderHelper(push_promise, &flags, &hpack_encoding,
&size);
SpdyFrameBuilder builder(size);
size_t length =
std::min(size, kHttp2MaxControlFrameSendSize) - kFrameHeaderSize;
builder.BeginNewFrame(SpdyFrameType::PUSH_PROMISE, flags,
push_promise.stream_id(), length);
int padding_payload_len = 0;
if (push_promise.padded()) {
builder.WriteUInt8(push_promise.padding_payload_len());
builder.WriteUInt32(push_promise.promised_stream_id());
QUICHE_DCHECK_EQ(kPushPromiseFrameMinimumSize + kPadLengthFieldSize,
builder.length());
padding_payload_len = push_promise.padding_payload_len();
} else {
builder.WriteUInt32(push_promise.promised_stream_id());
QUICHE_DCHECK_EQ(kPushPromiseFrameMinimumSize, builder.length());
}
WritePayloadWithContinuation(
&builder, hpack_encoding, push_promise.stream_id(),
SpdyFrameType::PUSH_PROMISE, padding_payload_len);
if (debug_visitor_) {
const size_t header_list_size =
GetUncompressedSerializedLength(push_promise.header_block());
debug_visitor_->OnSendCompressedFrame(push_promise.stream_id(),
SpdyFrameType::PUSH_PROMISE,
header_list_size, builder.length());
}
return builder.take();
}
SpdySerializedFrame SpdyFramer::SerializeContinuation(
const SpdyContinuationIR& continuation) const {
const std::string& encoding = continuation.encoding();
size_t frame_size = kContinuationFrameMinimumSize + encoding.size();
SpdyFrameBuilder builder(frame_size);
uint8_t flags = continuation.end_headers() ? HEADERS_FLAG_END_HEADERS : 0;
builder.BeginNewFrame(SpdyFrameType::CONTINUATION, flags,
continuation.stream_id());
QUICHE_DCHECK_EQ(kFrameHeaderSize, builder.length());
builder.WriteBytes(encoding.data(), encoding.size());
return builder.take();
}
SpdySerializedFrame SpdyFramer::SerializeAltSvc(const SpdyAltSvcIR& altsvc_ir) {
std::string value;
size_t size = 0;
SerializeAltSvcBuilderHelper(altsvc_ir, &value, &size);
SpdyFrameBuilder builder(size);
builder.BeginNewFrame(SpdyFrameType::ALTSVC, kNoFlags, altsvc_ir.stream_id());
builder.WriteUInt16(altsvc_ir.origin().length());
builder.WriteBytes(altsvc_ir.origin().data(), altsvc_ir.origin().length());
builder.WriteBytes(value.data(), value.length());
QUICHE_DCHECK_LT(kGetAltSvcFrameMinimumSize, builder.length());
return builder.take();
}
SpdySerializedFrame SpdyFramer::SerializePriority(
const SpdyPriorityIR& priority) const {
SpdyFrameBuilder builder(kPriorityFrameSize);
builder.BeginNewFrame(SpdyFrameType::PRIORITY, kNoFlags,
priority.stream_id());
builder.WriteUInt32(PackStreamDependencyValues(priority.exclusive(),
priority.parent_stream_id()));
builder.WriteUInt8(priority.weight() - 1);
QUICHE_DCHECK_EQ(kPriorityFrameSize, builder.length());
return builder.take();
}
SpdySerializedFrame SpdyFramer::SerializePriorityUpdate(
const SpdyPriorityUpdateIR& priority_update) const {
const size_t total_size = kPriorityUpdateFrameMinimumSize +
priority_update.priority_field_value().size();
SpdyFrameBuilder builder(total_size);
builder.BeginNewFrame(SpdyFrameType::PRIORITY_UPDATE, kNoFlags,
priority_update.stream_id());
builder.WriteUInt32(priority_update.prioritized_stream_id());
builder.WriteBytes(priority_update.priority_field_value().data(),
priority_update.priority_field_value().size());
QUICHE_DCHECK_EQ(total_size, builder.length());
return builder.take();
}
SpdySerializedFrame SpdyFramer::SerializeAcceptCh(
const SpdyAcceptChIR& accept_ch) const {
const size_t total_size = accept_ch.size();
SpdyFrameBuilder builder(total_size);
builder.BeginNewFrame(SpdyFrameType::ACCEPT_CH, kNoFlags,
accept_ch.stream_id());
for (const AcceptChOriginValuePair& entry : accept_ch.entries()) {
builder.WriteUInt16(entry.origin.size());
builder.WriteBytes(entry.origin.data(), entry.origin.size());
builder.WriteUInt16(entry.value.size());
builder.WriteBytes(entry.value.data(), entry.value.size());
}
QUICHE_DCHECK_EQ(total_size, builder.length());
return builder.take();
}
SpdySerializedFrame SpdyFramer::SerializeUnknown(
const SpdyUnknownIR& unknown) const {
const size_t total_size = kFrameHeaderSize + unknown.payload().size();
SpdyFrameBuilder builder(total_size);
builder.BeginNewUncheckedFrame(unknown.type(), unknown.flags(),
unknown.stream_id(), unknown.length());
builder.WriteBytes(unknown.payload().data(), unknown.payload().size());
return builder.take();
}
namespace {
class FrameSerializationVisitor : public SpdyFrameVisitor {
public:
explicit FrameSerializationVisitor(SpdyFramer* framer)
: framer_(framer), frame_() {}
~FrameSerializationVisitor() override = default;
SpdySerializedFrame ReleaseSerializedFrame() { return std::move(frame_); }
void VisitData(const SpdyDataIR& data) override {
frame_ = framer_->SerializeData(data);
}
void VisitRstStream(const SpdyRstStreamIR& rst_stream) override {
frame_ = framer_->SerializeRstStream(rst_stream);
}
void VisitSettings(const SpdySettingsIR& settings) override {
frame_ = framer_->SerializeSettings(settings);
}
void VisitPing(const SpdyPingIR& ping) override {
frame_ = framer_->SerializePing(ping);
}
void VisitGoAway(const SpdyGoAwayIR& goaway) override {
frame_ = framer_->SerializeGoAway(goaway);
}
void VisitHeaders(const SpdyHeadersIR& headers) override {
frame_ = framer_->SerializeHeaders(headers);
}
void VisitWindowUpdate(const SpdyWindowUpdateIR& window_update) override {
frame_ = framer_->SerializeWindowUpdate(window_update);
}
void VisitPushPromise(const SpdyPushPromiseIR& push_promise) override {
frame_ = framer_->SerializePushPromise(push_promise);
}
void VisitContinuation(const SpdyContinuationIR& continuation) override {
frame_ = framer_->SerializeContinuation(continuation);
}
void VisitAltSvc(const SpdyAltSvcIR& altsvc) override {
frame_ = framer_->SerializeAltSvc(altsvc);
}
void VisitPriority(const SpdyPriorityIR& priority) override {
frame_ = framer_->SerializePriority(priority);
}
void VisitPriorityUpdate(
const SpdyPriorityUpdateIR& priority_update) override {
frame_ = framer_->SerializePriorityUpdate(priority_update);
}
void VisitAcceptCh(const SpdyAcceptChIR& accept_ch) override {
frame_ = framer_->SerializeAcceptCh(accept_ch);
}
void VisitUnknown(const SpdyUnknownIR& unknown) override {
frame_ = framer_->SerializeUnknown(unknown);
}
private:
SpdyFramer* framer_;
SpdySerializedFrame frame_;
};
class FlagsSerializationVisitor : public SpdyFrameVisitor {
public:
void VisitData(const SpdyDataIR& data) override {
flags_ = DATA_FLAG_NONE;
if (data.fin()) {
flags_ |= DATA_FLAG_FIN;
}
if (data.padded()) {
flags_ |= DATA_FLAG_PADDED;
}
}
void VisitRstStream(const SpdyRstStreamIR& ) override {
flags_ = kNoFlags;
}
void VisitSettings(const SpdySettingsIR& settings) override {
flags_ = kNoFlags;
if (settings.is_ack()) {
flags_ |= SETTINGS_FLAG_ACK;
}
}
void VisitPing(const SpdyPingIR& ping) override {
flags_ = kNoFlags;
if (ping.is_ack()) {
flags_ |= PING_FLAG_ACK;
}
}
void VisitGoAway(const SpdyGoAwayIR& ) override {
flags_ = kNoFlags;
}
void VisitHeaders(const SpdyHeadersIR& headers) override {
flags_ = HEADERS_FLAG_END_HEADERS;
if (headers.fin()) {
flags_ |= CONTROL_FLAG_FIN;
}
if (headers.padded()) {
flags_ |= HEADERS_FLAG_PADDED;
}
if (headers.has_priority()) {
flags_ |= HEADERS_FLAG_PRIORITY;
}
}
void VisitWindowUpdate(const SpdyWindowUpdateIR& ) override {
flags_ = kNoFlags;
}
void VisitPushPromise(const SpdyPushPromiseIR& push_promise) override {
flags_ = PUSH_PROMISE_FLAG_END_PUSH_PROMISE;
if (push_promise.padded()) {
flags_ |= PUSH_PROMISE_FLAG_PADDED;
}
}
void VisitContinuation(const SpdyContinuationIR& ) override {
flags_ = HEADERS_FLAG_END_HEADERS;
}
void VisitAltSvc(const SpdyAltSvcIR& ) override {
flags_ = kNoFlags;
}
void VisitPriority(const SpdyPriorityIR& ) override {
flags_ = kNoFlags;
}
void VisitPriorityUpdate(
const SpdyPriorityUpdateIR& ) override {
flags_ = kNoFlags;
}
void VisitAcceptCh(const SpdyAcceptChIR& ) override {
flags_ = kNoFlags;
}
uint8_t flags() const { return flags_; }
private:
uint8_t flags_ = kNoFlags;
};
}
SpdySerializedFrame SpdyFramer::SerializeFrame(const SpdyFrameIR& frame) {
FrameSerializationVisitor visitor(this);
frame.Visit(&visitor);
return visitor.ReleaseSerializedFrame();
}
uint8_t SpdyFramer::GetSerializedFlags(const SpdyFrameIR& frame) {
FlagsSerializationVisitor visitor;
frame.Visit(&visitor);
return visitor.flags();
}
bool SpdyFramer::SerializeData(const SpdyDataIR& data_ir,
ZeroCopyOutputBuffer* output) const {
uint8_t flags = DATA_FLAG_NONE;
int num_padding_fields = 0;
size_t size_with_padding = 0;
SerializeDataBuilderHelper(data_ir, &flags, &num_padding_fields,
&size_with_padding);
SpdyFrameBuilder builder(size_with_padding, output);
bool ok =
builder.BeginNewFrame(SpdyFrameType::DATA, flags, data_ir.stream_id());
if (data_ir.padded()) {
ok = ok && builder.WriteUInt8(data_ir.padding_payload_len() & 0xff);
}
ok = ok && builder.WriteBytes(data_ir.data(), data_ir.data_len());
if (data_ir.padding_payload_len() > 0) {
std::string padding;
padding = std::string(data_ir.padding_payload_len(), 0);
ok = ok && builder.WriteBytes(padding.data(), padding.length());
}
QUICHE_DCHECK_EQ(size_with_padding, builder.length());
return ok;
}
bool SpdyFramer::SerializeDataFrameHeaderWithPaddingLengthField(
const SpdyDataIR& data_ir, ZeroCopyOutputBuffer* output) const {
uint8_t flags = DATA_FLAG_NONE;
size_t frame_size = 0;
size_t num_padding_fields = 0;
SerializeDataFrameHeaderWithPaddingLengthFieldBuilderHelper(
data_ir, &flags, &frame_size, &num_padding_fields);
SpdyFrameBuilder builder(frame_size, output);
bool ok = true;
ok = ok &&
builder.BeginNewFrame(SpdyFrameType::DATA, flags, data_ir.stream_id(),
num_padding_fields + data_ir.data_len() +
data_ir.padding_payload_len());
if (data_ir.padded()) {
ok = ok && builder.WriteUInt8(data_ir.padding_payload_len() & 0xff);
}
QUICHE_DCHECK_EQ(frame_size, builder.length());
return ok;
}
bool SpdyFramer::SerializeRstStream(const SpdyRstStreamIR& rst_stream,
ZeroCopyOutputBuffer* output) const {
size_t expected_length = kRstStreamFrameSize;
SpdyFrameBuilder builder(expected_length, output);
bool ok = builder.BeginNewFrame(SpdyFrameType::RST_STREAM, 0,
rst_stream.stream_id());
ok = ok && builder.WriteUInt32(rst_stream.error_code());
QUICHE_DCHECK_EQ(expected_length, builder.length());
return ok;
}
bool SpdyFramer::SerializeSettings(const SpdySettingsIR& settings,
ZeroCopyOutputBuffer* output) const {
uint8_t flags = 0;
size_t size = 0;
const SettingsMap* values = &(settings.values());
SerializeSettingsBuilderHelper(settings, &flags, values, &size);
SpdyFrameBuilder builder(size, output);
bool ok = builder.BeginNewFrame(SpdyFrameType::SETTINGS, flags, 0);
if (settings.is_ack()) {
return ok;
}
QUICHE_DCHECK_EQ(kSettingsFrameMinimumSize, builder.length());
for (auto it = values->begin(); it != values->end(); ++it) {
int setting_id = it->first;
QUICHE_DCHECK_GE(setting_id, 0);
ok = ok && builder.WriteUInt16(static_cast<SpdySettingsId>(setting_id)) &&
builder.WriteUInt32(it->second);
}
QUICHE_DCHECK_EQ(size, builder.length());
return ok;
}
bool SpdyFramer::SerializePing(const SpdyPingIR& ping,
ZeroCopyOutputBuffer* output) const {
SpdyFrameBuilder builder(kPingFrameSize, output);
uint8_t flags = 0;
if (ping.is_ack()) {
flags |= PING_FLAG_ACK;
}
bool ok = builder.BeginNewFrame(SpdyFrameType::PING, flags, 0);
ok = ok && builder.WriteUInt64(ping.id());
QUICHE_DCHECK_EQ(kPingFrameSize, builder.length());
return ok;
}
bool SpdyFramer::SerializeGoAway(const SpdyGoAwayIR& goaway,
ZeroCopyOutputBuffer* output) const {
size_t expected_length = kGoawayFrameMinimumSize;
expected_length += goaway.description().size();
SpdyFrameBuilder builder(expected_length, output);
bool ok = builder.BeginNewFrame(SpdyFrameType::GOAWAY, 0, 0);
ok = ok && builder.WriteUInt32(goaway.last_good_stream_id()) &&
builder.WriteUInt32(goaway.error_code());
if (!goaway.description().empty()) {
ok = ok && builder.WriteBytes(goaway.description().data(),
goaway.description().size());
}
QUICHE_DCHECK_EQ(expected_length, builder.length());
return ok;
}
bool SpdyFramer::SerializeHeaders(const SpdyHeadersIR& headers,
ZeroCopyOutputBuffer* output) {
uint8_t flags = 0;
size_t size = 0;
std::string hpack_encoding;
int weight = 0;
size_t length_field = 0;
SerializeHeadersBuilderHelper(headers, &flags, &size, &hpack_encoding,
&weight, &length_field);
bool ok = true;
SpdyFrameBuilder builder(size, output);
ok = ok && builder.BeginNewFrame(SpdyFrameType::HEADERS, flags,
headers.stream_id(), length_field);
QUICHE_DCHECK_EQ(kHeadersFrameMinimumSize, builder.length());
int padding_payload_len = 0;
if (headers.padded()) {
ok = ok && builder.WriteUInt8(headers.padding_payload_len());
padding_payload_len = headers.padding_payload_len();
}
if (headers.has_priority()) {
ok = ok &&
builder.WriteUInt32(PackStreamDependencyValues(
headers.exclusive(), headers.parent_stream_id())) &&
builder.WriteUInt8(weight - 1);
}
ok = ok && WritePayloadWithContinuation(
&builder, hpack_encoding, headers.stream_id(),
SpdyFrameType::HEADERS, padding_payload_len);
if (debug_visitor_) {
const size_t header_list_size =
GetUncompressedSerializedLength(headers.header_block());
debug_visitor_->OnSendCompressedFrame(headers.stream_id(),
SpdyFrameType::HEADERS,
header_list_size, builder.length());
}
return ok;
}
bool SpdyFramer::SerializeWindowUpdate(const SpdyWindowUpdateIR& window_update,
ZeroCopyOutputBuffer* output) const {
SpdyFrameBuilder builder(kWindowUpdateFrameSize, output);
bool ok = builder.BeginNewFrame(SpdyFrameType::WINDOW_UPDATE, kNoFlags,
window_update.stream_id());
ok = ok && builder.WriteUInt32(window_update.delta());
QUICHE_DCHECK_EQ(kWindowUpdateFrameSize, builder.length());
return ok;
}
bool SpdyFramer::SerializePushPromise(const SpdyPushPromiseIR& push_promise,
ZeroCopyOutputBuffer* output) {
uint8_t flags = 0;
size_t size = 0;
std::string hpack_encoding;
SerializePushPromiseBuilderHelper(push_promise, &flags, &hpack_encoding,
&size);
bool ok = true;
SpdyFrameBuilder builder(size, output);
size_t length =
std::min(size, kHttp2MaxControlFrameSendSize) - kFrameHeaderSize;
ok = builder.BeginNewFrame(SpdyFrameType::PUSH_PROMISE, flags,
push_promise.stream_id(), length);
int padding_payload_len = 0;
if (push_promise.padded()) {
ok = ok && builder.WriteUInt8(push_promise.padding_payload_len()) &&
builder.WriteUInt32(push_promise.promised_stream_id());
QUICHE_DCHECK_EQ(kPushPromiseFrameMinimumSize + kPadLengthFieldSize,
builder.length());
padding_payload_len = push_promise.padding_payload_len();
} else {
ok = ok && builder.WriteUInt32(push_promise.promised_stream_id());
QUICHE_DCHECK_EQ(kPushPromiseFrameMinimumSize, builder.length());
}
ok = ok && WritePayloadWithContinuation(
&builder, hpack_encoding, push_promise.stream_id(),
SpdyFrameType::PUSH_PROMISE, padding_payload_len);
if (debug_visitor_) {
const size_t header_list_size =
GetUncompressedSerializedLength(push_promise.header_block());
debug_visitor_->OnSendCompressedFrame(push_promise.stream_id(),
SpdyFrameType::PUSH_PROMISE,
header_list_size, builder.length());
}
return ok;
}
bool SpdyFramer::SerializeContinuation(const SpdyContinuationIR& continuation,
ZeroCopyOutputBuffer* output) const {
const std::string& encoding = continuation.encoding();
size_t frame_size = kContinuationFrameMinimumSize + encoding.size();
SpdyFrameBuilder builder(frame_size, output);
uint8_t flags = continuation.end_headers() ? HEADERS_FLAG_END_HEADERS : 0;
bool ok = builder.BeginNewFrame(SpdyFrameType::CONTINUATION, flags,
continuation.stream_id(),
frame_size - kFrameHeaderSize);
QUICHE_DCHECK_EQ(kFrameHeaderSize, builder.length());
ok = ok && builder.WriteBytes(encoding.data(), encoding.size());
return ok;
}
bool SpdyFramer::SerializeAltSvc(const SpdyAltSvcIR& altsvc_ir,
ZeroCopyOutputBuffer* output) {
std::string value;
size_t size = 0;
SerializeAltSvcBuilderHelper(altsvc_ir, &value, &size);
SpdyFrameBuilder builder(size, output);
bool ok = builder.BeginNewFrame(SpdyFrameType::ALTSVC, kNoFlags,
altsvc_ir.stream_id()) &&
builder.WriteUInt16(altsvc_ir.origin().length()) &&
builder.WriteBytes(altsvc_ir.origin().data(),
altsvc_ir.origin().length()) &&
builder.WriteBytes(value.data(), value.length());
QUICHE_DCHECK_LT(kGetAltSvcFrameMinimumSize, builder.length());
return ok;
}
bool SpdyFramer::SerializePriority(const SpdyPriorityIR& priority,
ZeroCopyOutputBuffer* output) const {
SpdyFrameBuilder builder(kPriorityFrameSize, output);
bool ok = builder.BeginNewFrame(SpdyFrameType::PRIORITY, kNoFlags,
priority.stream_id());
ok = ok &&
builder.WriteUInt32(PackStreamDependencyValues(
priority.exclusive(), priority.parent_stream_id())) &&
builder.WriteUInt8(priority.weight() - 1);
QUICHE_DCHECK_EQ(kPriorityFrameSize, builder.length());
return ok;
}
bool SpdyFramer::SerializePriorityUpdate(
const SpdyPriorityUpdateIR& priority_update,
ZeroCopyOutputBuffer* output) const {
const size_t total_size = kPriorityUpdateFrameMinimumSize +
priority_update.priority_field_value().size();
SpdyFrameBuilder builder(total_size, output);
bool ok = builder.BeginNewFrame(SpdyFrameType::PRIORITY_UPDATE, kNoFlags,
priority_update.stream_id());
ok = ok && builder.WriteUInt32(priority_update.prioritized_stream_id());
ok = ok && builder.WriteBytes(priority_update.priority_field_value().data(),
priority_update.priority_field_value().size());
QUICHE_DCHECK_EQ(total_size, builder.length());
return ok;
}
bool SpdyFramer::SerializeAcceptCh(const SpdyAcceptChIR& accept_ch,
ZeroCopyOutputBuffer* output) const {
const size_t total_size = accept_ch.size();
SpdyFrameBuilder builder(total_size, output);
bool ok = builder.BeginNewFrame(SpdyFrameType::ACCEPT_CH, kNoFlags,
accept_ch.stream_id());
for (const AcceptChOriginValuePair& entry : accept_ch.entries()) {
ok = ok && builder.WriteUInt16(entry.origin.size());
ok = ok && builder.WriteBytes(entry.origin.data(), entry.origin.size());
ok = ok && builder.WriteUInt16(entry.value.size());
ok = ok && builder.WriteBytes(entry.value.data(), entry.value.size());
}
QUICHE_DCHECK_EQ(total_size, builder.length());
return ok;
}
bool SpdyFramer::SerializeUnknown(const SpdyUnknownIR& unknown,
ZeroCopyOutputBuffer* output) const {
const size_t total_size = kFrameHeaderSize + unknown.payload().size();
SpdyFrameBuilder builder(total_size, output);
bool ok = builder.BeginNewUncheckedFrame(
unknown.type(), unknown.flags(), unknown.stream_id(), unknown.length());
ok = ok &&
builder.WriteBytes(unknown.payload().data(), unknown.payload().size());
return ok;
}
namespace {
class FrameSerializationVisitorWithOutput : public SpdyFrameVisitor {
public:
explicit FrameSerializationVisitorWithOutput(SpdyFramer* framer,
ZeroCopyOutputBuffer* output)
: framer_(framer), output_(output), result_(false) {}
~FrameSerializationVisitorWithOutput() override = default;
size_t Result() { return result_; }
void VisitData(const SpdyDataIR& data) override {
result_ = framer_->SerializeData(data, output_);
}
void VisitRstStream(const SpdyRstStreamIR& rst_stream) override {
result_ = framer_->SerializeRstStream(rst_stream, output_);
}
void VisitSettings(const SpdySettingsIR& settings) override {
result_ = framer_->SerializeSettings(settings, output_);
}
void VisitPing(const SpdyPingIR& ping) override {
result_ = framer_->SerializePing(ping, output_);
}
void VisitGoAway(const SpdyGoAwayIR& goaway) override {
result_ = framer_->SerializeGoAway(goaway, output_);
}
void VisitHeaders(const SpdyHeadersIR& headers) override {
result_ = framer_->SerializeHeaders(headers, output_);
}
void VisitWindowUpdate(const SpdyWindowUpdateIR& window_update) override {
result_ = framer_->SerializeWindowUpdate(window_update, output_);
}
void VisitPushPromise(const SpdyPushPromiseIR& push_promise) override {
result_ = framer_->SerializePushPromise(push_promise, output_);
}
void VisitContinuation(const SpdyContinuationIR& continuation) override {
result_ = framer_->SerializeContinuation(continuation, output_);
}
void VisitAltSvc(const SpdyAltSvcIR& altsvc) override {
result_ = framer_->SerializeAltSvc(altsvc, output_);
}
void VisitPriority(const SpdyPriorityIR& priority) override {
result_ = framer_->SerializePriority(priority, output_);
}
void VisitPriorityUpdate(
const SpdyPriorityUpdateIR& priority_update) override {
result_ = framer_->SerializePriorityUpdate(priority_update, output_);
}
void VisitAcceptCh(const SpdyAcceptChIR& accept_ch) override {
result_ = framer_->SerializeAcceptCh(accept_ch, output_);
}
void VisitUnknown(const SpdyUnknownIR& unknown) override {
result_ = framer_->SerializeUnknown(unknown, output_);
}
private:
SpdyFramer* framer_;
ZeroCopyOutputBuffer* output_;
bool result_;
};
}
size_t SpdyFramer::SerializeFrame(const SpdyFrameIR& frame,
ZeroCopyOutputBuffer* output) {
FrameSerializationVisitorWithOutput visitor(this, output);
size_t free_bytes_before = output->BytesFree();
frame.Visit(&visitor);
return visitor.Result() ? free_bytes_before - output->BytesFree() : 0;
}
HpackEncoder* SpdyFramer::GetHpackEncoder() {
if (hpack_encoder_ == nullptr) {
hpack_encoder_ = std::make_unique<HpackEncoder>();
if (!compression_enabled()) {
hpack_encoder_->DisableCompression();
}
}
return hpack_encoder_.get();
}
void SpdyFramer::UpdateHeaderEncoderTableSize(uint32_t value) {
GetHpackEncoder()->ApplyHeaderTableSizeSetting(value);
}
size_t SpdyFramer::header_encoder_table_size() const {
if (hpack_encoder_ == nullptr) {
return kDefaultHeaderTableSizeSetting;
} else {
return hpack_encoder_->CurrentHeaderTableSizeSetting();
}
}
} | #include "quiche/http2/core/spdy_framer.h"
#include <stdlib.h>
#include <algorithm>
#include <cstdint>
#include <cstring>
#include <ios>
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/macros.h"
#include "absl/strings/string_view.h"
#include "quiche/http2/core/array_output_buffer.h"
#include "quiche/http2/core/http2_frame_decoder_adapter.h"
#include "quiche/http2/core/recording_headers_handler.h"
#include "quiche/http2/core/spdy_alt_svc_wire_format.h"
#include "quiche/http2/core/spdy_bitmasks.h"
#include "quiche/http2/core/spdy_frame_builder.h"
#include "quiche/http2/core/spdy_headers_handler_interface.h"
#include "quiche/http2/core/spdy_protocol.h"
#include "quiche/http2/hpack/hpack_encoder.h"
#include "quiche/http2/test_tools/mock_spdy_framer_visitor.h"
#include "quiche/http2/test_tools/spdy_test_utils.h"
#include "quiche/common/http/http_header_block.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/common/quiche_text_utils.h"
using ::http2::Http2DecoderAdapter;
using ::testing::_;
namespace spdy {
namespace test {
namespace {
const int64_t kSize = 1024 * 1024;
char output_buffer[kSize] = "";
const int64_t buffer_size = 64 * 1024;
char frame_list_char[buffer_size] = "";
}
class MockDebugVisitor : public SpdyFramerDebugVisitorInterface {
public:
MOCK_METHOD(void, OnSendCompressedFrame,
(SpdyStreamId stream_id, SpdyFrameType type, size_t payload_len,
size_t frame_len),
(override));
MOCK_METHOD(void, OnReceiveCompressedFrame,
(SpdyStreamId stream_id, SpdyFrameType type, size_t frame_len),
(override));
};
MATCHER_P(IsFrameUnionOf, frame_list, "") {
size_t size_verified = 0;
for (const auto& frame : *frame_list) {
if (arg.size() < size_verified + frame.size()) {
QUICHE_LOG(FATAL)
<< "Incremental header serialization should not lead to a "
<< "higher total frame length than non-incremental method.";
return false;
}
if (memcmp(arg.data() + size_verified, frame.data(), frame.size())) {
CompareCharArraysWithHexError(
"Header serialization methods should be equivalent: ",
reinterpret_cast<unsigned char*>(arg.data() + size_verified),
frame.size(), reinterpret_cast<unsigned char*>(frame.data()),
frame.size());
return false;
}
size_verified += frame.size();
}
return size_verified == arg.size();
}
class SpdyFramerPeer {
public:
static std::unique_ptr<SpdyHeadersIR> CloneSpdyHeadersIR(
const SpdyHeadersIR& headers) {
auto new_headers = std::make_unique<SpdyHeadersIR>(
headers.stream_id(), headers.header_block().Clone());
new_headers->set_fin(headers.fin());
new_headers->set_has_priority(headers.has_priority());
new_headers->set_weight(headers.weight());
new_headers->set_parent_stream_id(headers.parent_stream_id());
new_headers->set_exclusive(headers.exclusive());
if (headers.padded()) {
new_headers->set_padding_len(headers.padding_payload_len() + 1);
}
return new_headers;
}
static SpdySerializedFrame SerializeHeaders(SpdyFramer* framer,
const SpdyHeadersIR& headers) {
SpdySerializedFrame serialized_headers_old_version(
framer->SerializeHeaders(headers));
framer->hpack_encoder_.reset(nullptr);
auto* saved_debug_visitor = framer->debug_visitor_;
framer->debug_visitor_ = nullptr;
std::vector<SpdySerializedFrame> frame_list;
ArrayOutputBuffer frame_list_buffer(frame_list_char, buffer_size);
SpdyFramer::SpdyHeaderFrameIterator it(framer, CloneSpdyHeadersIR(headers));
while (it.HasNextFrame()) {
size_t size_before = frame_list_buffer.Size();
EXPECT_GT(it.NextFrame(&frame_list_buffer), 0u);
frame_list.emplace_back(
MakeSerializedFrame(frame_list_buffer.Begin() + size_before,
frame_list_buffer.Size() - size_before));
}
framer->debug_visitor_ = saved_debug_visitor;
EXPECT_THAT(serialized_headers_old_version, IsFrameUnionOf(&frame_list));
return serialized_headers_old_version;
}
static SpdySerializedFrame SerializeHeaders(SpdyFramer* framer,
const SpdyHeadersIR& headers,
ArrayOutputBuffer* output) {
if (output == nullptr) {
return SerializeHeaders(framer, headers);
}
output->Reset();
EXPECT_TRUE(framer->SerializeHeaders(headers, output));
SpdySerializedFrame serialized_headers_old_version =
MakeSerializedFrame(output->Begin(), output->Size());
framer->hpack_encoder_.reset(nullptr);
auto* saved_debug_visitor = framer->debug_visitor_;
framer->debug_visitor_ = nullptr;
std::vector<SpdySerializedFrame> frame_list;
ArrayOutputBuffer frame_list_buffer(frame_list_char, buffer_size);
SpdyFramer::SpdyHeaderFrameIterator it(framer, CloneSpdyHeadersIR(headers));
while (it.HasNextFrame()) {
size_t size_before = frame_list_buffer.Size();
EXPECT_GT(it.NextFrame(&frame_list_buffer), 0u);
frame_list.emplace_back(
MakeSerializedFrame(frame_list_buffer.Begin() + size_before,
frame_list_buffer.Size() - size_before));
}
framer->debug_visitor_ = saved_debug_visitor;
EXPECT_THAT(serialized_headers_old_version, IsFrameUnionOf(&frame_list));
return serialized_headers_old_version;
}
static std::unique_ptr<SpdyPushPromiseIR> CloneSpdyPushPromiseIR(
const SpdyPushPromiseIR& push_promise) {
auto new_push_promise = std::make_unique<SpdyPushPromiseIR>(
push_promise.stream_id(), push_promise.promised_stream_id(),
push_promise.header_block().Clone());
new_push_promise->set_fin(push_promise.fin());
if (push_promise.padded()) {
new_push_promise->set_padding_len(push_promise.padding_payload_len() + 1);
}
return new_push_promise;
}
static SpdySerializedFrame SerializePushPromise(
SpdyFramer* framer, const SpdyPushPromiseIR& push_promise) {
SpdySerializedFrame serialized_headers_old_version =
framer->SerializePushPromise(push_promise);
framer->hpack_encoder_.reset(nullptr);
auto* saved_debug_visitor = framer->debug_visitor_;
framer->debug_visitor_ = nullptr;
std::vector<SpdySerializedFrame> frame_list;
ArrayOutputBuffer frame_list_buffer(frame_list_char, buffer_size);
frame_list_buffer.Reset();
SpdyFramer::SpdyPushPromiseFrameIterator it(
framer, CloneSpdyPushPromiseIR(push_promise));
while (it.HasNextFrame()) {
size_t size_before = frame_list_buffer.Size();
EXPECT_GT(it.NextFrame(&frame_list_buffer), 0u);
frame_list.emplace_back(
MakeSerializedFrame(frame_list_buffer.Begin() + size_before,
frame_list_buffer.Size() - size_before));
}
framer->debug_visitor_ = saved_debug_visitor;
EXPECT_THAT(serialized_headers_old_version, IsFrameUnionOf(&frame_list));
return serialized_headers_old_version;
}
static SpdySerializedFrame SerializePushPromise(
SpdyFramer* framer, const SpdyPushPromiseIR& push_promise,
ArrayOutputBuffer* output) {
if (output == nullptr) {
return SerializePushPromise(framer, push_promise);
}
output->Reset();
EXPECT_TRUE(framer->SerializePushPromise(push_promise, output));
SpdySerializedFrame serialized_headers_old_version =
MakeSerializedFrame(output->Begin(), output->Size());
framer->hpack_encoder_.reset(nullptr);
auto* saved_debug_visitor = framer->debug_visitor_;
framer->debug_visitor_ = nullptr;
std::vector<SpdySerializedFrame> frame_list;
ArrayOutputBuffer frame_list_buffer(frame_list_char, buffer_size);
frame_list_buffer.Reset();
SpdyFramer::SpdyPushPromiseFrameIterator it(
framer, CloneSpdyPushPromiseIR(push_promise));
while (it.HasNextFrame()) {
size_t size_before = frame_list_buffer.Size();
EXPECT_GT(it.NextFrame(&frame_list_buffer), 0u);
frame_list.emplace_back(
MakeSerializedFrame(frame_list_buffer.Begin() + size_before,
frame_list_buffer.Size() - size_before));
}
framer->debug_visitor_ = saved_debug_visitor;
EXPECT_THAT(serialized_headers_old_version, IsFrameUnionOf(&frame_list));
return serialized_headers_old_version;
}
};
class TestSpdyVisitor : public SpdyFramerVisitorInterface,
public SpdyFramerDebugVisitorInterface {
public:
static constexpr size_t kDefaultHeaderBufferSize = 16 * 1024 * 1024;
explicit TestSpdyVisitor(SpdyFramer::CompressionOption option)
: framer_(option),
error_count_(0),
headers_frame_count_(0),
push_promise_frame_count_(0),
goaway_count_(0),
setting_count_(0),
settings_ack_sent_(0),
settings_ack_received_(0),
continuation_count_(0),
altsvc_count_(0),
priority_count_(0),
unknown_frame_count_(0),
on_unknown_frame_result_(false),
last_window_update_stream_(0),
last_window_update_delta_(0),
last_push_promise_stream_(0),
last_push_promise_promised_stream_(0),
data_bytes_(0),
fin_frame_count_(0),
fin_flag_count_(0),
end_of_stream_count_(0),
control_frame_header_data_count_(0),
zero_length_control_frame_header_data_count_(0),
data_frame_count_(0),
last_payload_len_(0),
last_frame_len_(0),
unknown_payload_len_(0),
header_buffer_(new char[kDefaultHeaderBufferSize]),
header_buffer_length_(0),
header_buffer_size_(kDefaultHeaderBufferSize),
header_stream_id_(static_cast<SpdyStreamId>(-1)),
header_control_type_(SpdyFrameType::DATA),
header_buffer_valid_(false) {}
void OnError(Http2DecoderAdapter::SpdyFramerError error,
std::string ) override {
QUICHE_VLOG(1) << "SpdyFramer Error: "
<< Http2DecoderAdapter::SpdyFramerErrorToString(error);
++error_count_;
}
void OnDataFrameHeader(SpdyStreamId stream_id, size_t length,
bool fin) override {
QUICHE_VLOG(1) << "OnDataFrameHeader(" << stream_id << ", " << length
<< ", " << fin << ")";
++data_frame_count_;
header_stream_id_ = stream_id;
}
void OnStreamFrameData(SpdyStreamId stream_id, const char* data,
size_t len) override {
QUICHE_VLOG(1) << "OnStreamFrameData(" << stream_id << ", data, " << len
<< ", "
<< ") data:\n"
<< quiche::QuicheTextUtils::HexDump(
absl::string_view(data, len));
EXPECT_EQ(header_stream_id_, stream_id);
data_bytes_ += len;
}
void OnStreamEnd(SpdyStreamId stream_id) override {
QUICHE_VLOG(1) << "OnStreamEnd(" << stream_id << ")";
EXPECT_EQ(header_stream_id_, stream_id);
++end_of_stream_count_;
}
void OnStreamPadLength(SpdyStreamId stream_id, size_t value) override {
QUICHE_VLOG(1) << "OnStreamPadding(" << stream_id << ", " << value << ")\n";
EXPECT_EQ(header_stream_id_, stream_id);
data_bytes_ += 1;
}
void OnStreamPadding(SpdyStreamId stream_id, size_t len) override {
QUICHE_VLOG(1) << "OnStreamPadding(" << stream_id << ", " << len << ")\n";
EXPECT_EQ(header_stream_id_, stream_id);
data_bytes_ += len;
}
SpdyHeadersHandlerInterface* OnHeaderFrameStart(
SpdyStreamId ) override {
if (headers_handler_ == nullptr) {
headers_handler_ = std::make_unique<RecordingHeadersHandler>();
}
return headers_handler_.get();
}
void OnHeaderFrameEnd(SpdyStreamId ) override {
QUICHE_CHECK(headers_handler_ != nullptr);
headers_ = headers_handler_->decoded_block().Clone();
header_bytes_received_ = headers_handler_->uncompressed_header_bytes();
headers_handler_.reset();
}
void OnRstStream(SpdyStreamId stream_id, SpdyErrorCode error_code) override {
QUICHE_VLOG(1) << "OnRstStream(" << stream_id << ", " << error_code << ")";
++fin_frame_count_;
}
void OnSetting(SpdySettingsId id, uint32_t value) override {
QUICHE_VLOG(1) << "OnSetting(" << id << ", " << std::hex << value << ")";
++setting_count_;
}
void OnSettingsAck() override {
QUICHE_VLOG(1) << "OnSettingsAck";
++settings_ack_received_;
}
void OnSettingsEnd() override {
QUICHE_VLOG(1) << "OnSettingsEnd";
++settings_ack_sent_;
}
void OnPing(SpdyPingId unique_id, bool is_ack) override {
QUICHE_LOG(DFATAL) << "OnPing(" << unique_id << ", " << (is_ack ? 1 : 0)
<< ")";
}
void OnGoAway(SpdyStreamId last_accepted_stream_id,
SpdyErrorCode error_code) override {
QUICHE_VLOG(1) << "OnGoAway(" << last_accepted_stream_id << ", "
<< error_code << ")";
++goaway_count_;
}
void OnHeaders(SpdyStreamId stream_id, size_t payload_length,
bool has_priority, int weight, SpdyStreamId parent_stream_id,
bool exclusive, bool fin, bool end) override {
QUICHE_VLOG(1) << "OnHeaders(" << stream_id << ", " << payload_length
<< ", " << has_priority << ", " << weight << ", "
<< parent_stream_id << ", " << exclusive << ", " << fin
<< ", " << end << ")";
++headers_frame_count_;
InitHeaderStreaming(SpdyFrameType::HEADERS, stream_id);
if (fin) {
++fin_flag_count_;
}
header_has_priority_ = has_priority;
header_parent_stream_id_ = parent_stream_id;
header_exclusive_ = exclusive;
}
void OnWindowUpdate(SpdyStreamId stream_id, int delta_window_size) override {
QUICHE_VLOG(1) << "OnWindowUpdate(" << stream_id << ", "
<< delta_window_size << ")";
last_window_update_stream_ = stream_id;
last_window_update_delta_ = delta_window_size;
}
void OnPushPromise(SpdyStreamId stream_id, SpdyStreamId promised_stream_id,
bool end) override {
QUICHE_VLOG(1) << "OnPushPromise(" << stream_id << ", "
<< promised_stream_id << ", " << end << ")";
++push_promise_frame_count_;
InitHeaderStreaming(SpdyFrameType::PUSH_PROMISE, stream_id);
last_push_promise_stream_ = stream_id;
last_push_promise_promised_stream_ = promised_stream_id;
}
void OnContinuation(SpdyStreamId stream_id, size_t payload_size,
bool end) override {
QUICHE_VLOG(1) << "OnContinuation(" << stream_id << ", " << payload_size
<< ", " << end << ")";
++continuation_count_;
}
void OnAltSvc(SpdyStreamId stream_id, absl::string_view origin,
const SpdyAltSvcWireFormat::AlternativeServiceVector&
altsvc_vector) override {
QUICHE_VLOG(1) << "OnAltSvc(" << stream_id << ", \"" << origin
<< "\", altsvc_vector)";
test_altsvc_ir_ = std::make_unique<SpdyAltSvcIR>(stream_id);
if (origin.length() > 0) {
test_altsvc_ir_->set_origin(std::string(origin));
}
for (const auto& altsvc : altsvc_vector) {
test_altsvc_ir_->add_altsvc(altsvc);
}
++altsvc_count_;
}
void OnPriority(SpdyStreamId stream_id, SpdyStreamId parent_stream_id,
int weight, bool exclusive) override {
QUICHE_VLOG(1) << "OnPriority(" << stream_id << ", " << parent_stream_id
<< ", " << weight << ", " << (exclusive ? 1 : 0) << ")";
++priority_count_;
}
void OnPriorityUpdate(SpdyStreamId prioritized_stream_id,
absl::string_view priority_field_value) override {
QUICHE_VLOG(1) << "OnPriorityUpdate(" << prioritized_stream_id << ", "
<< priority_field_value << ")";
}
bool OnUnknownFrame(SpdyStreamId stream_id, uint8_t frame_type) override {
QUICHE_VLOG(1) << "OnUnknownFrame(" << stream_id << ", " << frame_type
<< ")";
return on_unknown_frame_result_;
}
void OnUnknownFrameStart(SpdyStreamId stream_id, size_t length, uint8_t type,
uint8_t flags) override {
QUICHE_VLOG(1) << "OnUnknownFrameStart(" << stream_id << ", " << length
<< ", " << static_cast<int>(type) << ", "
<< static_cast<int>(flags) << ")";
++unknown_frame_count_;
}
void OnUnknownFramePayload(SpdyStreamId stream_id,
absl::string_view payload) override {
QUICHE_VLOG(1) << "OnUnknownFramePayload(" << stream_id << ", " << payload
<< ")";
unknown_payload_len_ += payload.length();
}
void OnSendCompressedFrame(SpdyStreamId stream_id, SpdyFrameType type,
size_t payload_len, size_t frame_len) override {
QUICHE_VLOG(1) << "OnSendCompressedFrame(" << stream_id << ", " << type
<< ", " << payload_len << ", " << frame_len << ")";
last_payload_len_ = payload_len;
last_frame_len_ = frame_len;
}
void OnReceiveCompressedFrame(SpdyStreamId stream_id, SpdyFrameType type,
size_t frame_len) override {
QUICHE_VLOG(1) << "OnReceiveCompressedFrame(" << stream_id << ", " << type
<< ", " << frame_len << ")";
last_frame_len_ = frame_len;
}
void SimulateInFramer(const unsigned char* input, size_t size) {
deframer_.set_visitor(this);
size_t input_remaining = size;
const char* input_ptr = reinterpret_cast<const char*>(input);
while (input_remaining > 0 && deframer_.spdy_framer_error() ==
Http2DecoderAdapter::SPDY_NO_ERROR) {
const size_t kMaxReadSize = 32;
size_t bytes_read =
(rand() % std::min(input_remaining, kMaxReadSize)) + 1;
size_t bytes_processed = deframer_.ProcessInput(input_ptr, bytes_read);
input_remaining -= bytes_processed;
input_ptr += bytes_processed;
}
}
void InitHeaderStreaming(SpdyFrameType header_control_type,
SpdyStreamId stream_id) {
if (!IsDefinedFrameType(SerializeFrameType(header_control_type))) {
QUICHE_DLOG(FATAL) << "Attempted to init header streaming with "
<< "invalid control frame type: "
<< header_control_type;
}
memset(header_buffer_.get(), 0, header_buffer_size_);
header_buffer_length_ = 0;
header_stream_id_ = stream_id;
header_control_type_ = header_control_type;
header_buffer_valid_ = true;
}
void set_extension_visitor(ExtensionVisitorInterface* extension) {
deframer_.set_extension_visitor(extension);
}
void set_header_buffer_size(size_t header_buffer_size) {
header_buffer_size_ = header_buffer_size;
header_buffer_.reset(new char[header_buffer_size]);
}
SpdyFramer framer_;
Http2DecoderAdapter deframer_;
int error_count_;
int headers_frame_count_;
int push_promise_frame_count_;
int goaway_count_;
int setting_count_;
int settings_ack_sent_;
int settings_ack_received_;
int continuation_count_;
int altsvc_count_;
int priority_count_;
std::unique_ptr<SpdyAltSvcIR> test_altsvc_ir_;
int unknown_frame_count_;
bool on_unknown_frame_result_;
SpdyStreamId last_window_update_stream_;
int last_window_update_delta_;
SpdyStreamId last_push_promise_stream_;
SpdyStreamId last_push_promise_promised_stream_;
int data_bytes_;
int fin_frame_count_;
int fin_flag_count_;
int end_of_stream_count_;
int control_frame_header_data_count_;
int zero_length_control_frame_header_data_count_;
int data_frame_count_;
size_t last_payload_len_;
size_t last_frame_len_;
size_t unknown_payload_len_;
std::unique_ptr<char[]> header_buffer_;
size_t header_buffer_length_;
size_t header_buffer_size_;
size_t header_bytes_received_;
SpdyStreamId header_stream_id_;
SpdyFrameType header_control_type_;
bool header_buffer_valid_;
std::unique_ptr<RecordingHeadersHandler> headers_handler_;
quiche::HttpHeaderBlock headers_;
bool header_has_priority_;
SpdyStreamId header_parent_stream_id_;
bool header_exclusive_;
};
class TestExtension : public ExtensionVisitorInterface {
public:
void OnSetting(SpdySettingsId id, uint32_t value) override {
settings_received_.push_back({id, value});
}
bool OnFrameHeader(SpdyStreamId stream_id, size_t length, uint8_t type,
uint8_t flags) override {
stream_id_ = stream_id;
length_ = length;
type_ = type;
flags_ = flags;
return true;
}
void OnFramePayload(const char* data, size_t len) override {
payload_.append(data, len);
}
std::vector<std::pair<SpdySettingsId, uint32_t>> settings_received_;
SpdyStreamId stream_id_ = 0;
size_t length_ = 0;
uint8_t type_ = 0;
uint8_t flags_ = 0;
std::string payload_;
};
class TestSpdyUnknownIR : public SpdyUnknownIR {
public:
using SpdyUnknownIR::set_length;
using SpdyUnknownIR::SpdyUnknownIR;
};
enum Output { USE, NOT_USE };
class SpdyFramerTest : public quiche::test::QuicheTestWithParam<Output> {
public:
SpdyFramerTest()
: output_(output_buffer, kSize),
framer_(SpdyFramer::ENABLE_COMPRESSION),
deframer_(std::make_unique<Http2DecoderAdapter>()) {}
protected:
void SetUp() override {
switch (GetParam()) {
case USE:
use_output_ = true;
break;
case NOT_USE:
use_output_ = false;
break;
}
}
void CompareFrame(const std::string& description,
const SpdySerializedFrame& actual_frame,
const unsigned char* expected, const int expected_len) {
const unsigned char* actual =
reinterpret_cast<const unsigned char*>(actual_frame.data());
CompareCharArraysWithHexError(description, actual, actual_frame.size(),
expected, expected_len);
}
bool use_output_ = false;
ArrayOutputBuffer output_;
SpdyFramer framer_;
std::unique_ptr<Http2DecoderAdapter> deframer_;
};
INSTANTIATE_TEST_SUITE_P(SpdyFramerTests, SpdyFramerTest,
::testing::Values(USE, NOT_USE));
TEST_P(SpdyFramerTest, HeaderBlockInBuffer) {
SpdyFramer framer(SpdyFramer::DISABLE_COMPRESSION);
SpdyHeadersIR headers( 1);
headers.SetHeader("alpha", "beta");
headers.SetHeader("gamma", "charlie");
headers.SetHeader("cookie", "key1=value1; key2=value2");
SpdySerializedFrame frame(
SpdyFramerPeer::SerializeHeaders(&framer, headers, &output_));
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
visitor.SimulateInFramer(reinterpret_cast<unsigned char*>(frame.data()),
frame.size());
EXPECT_EQ(0, visitor.zero_length_control_frame_header_data_count_);
EXPECT_EQ(headers.header_block(), visitor.headers_);
}
TEST_P(SpdyFramerTest, UndersizedHeaderBlockInBuffer) {
SpdyFramer framer(SpdyFramer::DISABLE_COMPRESSION);
SpdyHeadersIR headers( 1);
headers.SetHeader("alpha", "beta");
headers.SetHeader("gamma", "charlie");
SpdySerializedFrame frame(
SpdyFramerPeer::SerializeHeaders(&framer, headers, &output_));
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
visitor.SimulateInFramer(reinterpret_cast<unsigned char*>(frame.data()),
frame.size() - 2);
EXPECT_EQ(0, visitor.zero_length_control_frame_header_data_count_);
EXPECT_THAT(visitor.headers_, testing::IsEmpty());
}
TEST_P(SpdyFramerTest, HeaderStreamDependencyValues) {
SpdyFramer framer(SpdyFramer::DISABLE_COMPRESSION);
const SpdyStreamId parent_stream_id_test_array[] = {0, 3};
for (SpdyStreamId parent_stream_id : parent_stream_id_test_array) {
const bool exclusive_test_array[] = {true, false};
for (bool exclusive : exclusive_test_array) {
SpdyHeadersIR headers(1);
headers.set_has_priority(true);
headers.set_parent_stream_id(parent_stream_id);
headers.set_exclusive(exclusive);
SpdySerializedFrame frame(
SpdyFramerPeer::SerializeHeaders(&framer, headers, &output_));
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
visitor.SimulateInFramer(reinterpret_cast<unsigned char*>(frame.data()),
frame.size());
EXPECT_TRUE(visitor.header_has_priority_);
EXPECT_EQ(parent_stream_id, visitor.header_parent_stream_id_);
EXPECT_EQ(exclusive, visitor.header_exclusive_);
}
}
}
TEST_P(SpdyFramerTest, AcceptMaxFrameSizeSetting) {
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
unsigned char kH2FrameData[] = {
0x00, 0x40, 0x00,
0x00,
0x00,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00,
};
SpdySerializedFrame frame = MakeSerializedFrame(
reinterpret_cast<char*>(kH2FrameData), sizeof(kH2FrameData));
EXPECT_CALL(visitor, OnCommonHeader(1, 16384, 0x0, 0x0));
EXPECT_CALL(visitor, OnDataFrameHeader(1, 1 << 14, false));
EXPECT_CALL(visitor, OnStreamFrameData(1, _, 4));
deframer_->ProcessInput(frame.data(), frame.size());
EXPECT_FALSE(deframer_->HasError());
}
TEST_P(SpdyFramerTest, ExceedMaxFrameSizeSetting) {
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
unsigned char kH2FrameData[] = {
0x00, 0x40, 0x01,
0x00,
0x00,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00,
};
SpdySerializedFrame frame = MakeSerializedFrame(
reinterpret_cast<char*>(kH2FrameData), sizeof(kH2FrameData));
EXPECT_CALL(visitor, OnCommonHeader(1, 16385, 0x0, 0x0));
EXPECT_CALL(visitor, OnError(Http2DecoderAdapter::SPDY_OVERSIZED_PAYLOAD, _));
deframer_->ProcessInput(frame.data(), frame.size());
EXPECT_TRUE(deframer_->HasError());
EXPECT_EQ(Http2DecoderAdapter::SPDY_OVERSIZED_PAYLOAD,
deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
}
TEST_P(SpdyFramerTest, AcceptLargerMaxFrameSizeSetting) {
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
const size_t big_frame_size = (1 << 14) + 1;
deframer_->SetMaxFrameSize(big_frame_size);
unsigned char kH2FrameData[] = {
0x00, 0x40, 0x01,
0x00,
0x00,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00,
};
SpdySerializedFrame frame = MakeSerializedFrame(
reinterpret_cast<char*>(kH2FrameData), sizeof(kH2FrameData));
EXPECT_CALL(visitor, OnCommonHeader(1, big_frame_size, 0x0, 0x0));
EXPECT_CALL(visitor, OnDataFrameHeader(1, big_frame_size, false));
EXPECT_CALL(visitor, OnStreamFrameData(1, _, 4));
deframer_->ProcessInput(frame.data(), frame.size());
EXPECT_FALSE(deframer_->HasError());
}
TEST_P(SpdyFramerTest, OversizedDataPaddingError) {
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
unsigned char kH2FrameData[] = {
0x00, 0x00, 0x05,
0x00,
0x09,
0x00, 0x00, 0x00, 0x01,
0xff,
0x00, 0x00, 0x00, 0x00,
};
SpdySerializedFrame frame = MakeSerializedFrame(
reinterpret_cast<char*>(kH2FrameData), sizeof(kH2FrameData));
{
testing::InSequence seq;
EXPECT_CALL(visitor, OnCommonHeader(1, 5, 0x0, 0x9));
EXPECT_CALL(visitor, OnDataFrameHeader(1, 5, 1));
EXPECT_CALL(visitor, OnStreamPadding(1, 1));
EXPECT_CALL(visitor, OnError(Http2DecoderAdapter::SPDY_INVALID_PADDING, _));
}
EXPECT_GT(frame.size(), deframer_->ProcessInput(frame.data(), frame.size()));
EXPECT_TRUE(deframer_->HasError());
EXPECT_EQ(Http2DecoderAdapter::SPDY_INVALID_PADDING,
deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
}
TEST_P(SpdyFramerTest, CorrectlySizedDataPaddingNoError) {
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
char kH2FrameData[] = {
0x00, 0x00, 0x05,
0x00,
0x08,
0x00, 0x00, 0x00, 0x01,
0x04,
0x00, 0x00, 0x00, 0x00,
};
SpdySerializedFrame frame =
MakeSerializedFrame(kH2FrameData, sizeof(kH2FrameData));
{
testing::InSequence seq;
EXPECT_CALL(visitor, OnCommonHeader(1, 5, 0x0, 0x8));
EXPECT_CALL(visitor, OnDataFrameHeader(1, 5, false));
EXPECT_CALL(visitor, OnStreamPadLength(1, 4));
EXPECT_CALL(visitor, OnError(_, _)).Times(0);
EXPECT_CALL(visitor, OnStreamPadding(1, 4));
}
EXPECT_EQ(frame.size(), deframer_->ProcessInput(frame.data(), frame.size()));
EXPECT_FALSE(deframer_->HasError());
EXPECT_EQ(Http2DecoderAdapter::SPDY_NO_ERROR, deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
}
TEST_P(SpdyFramerTest, OversizedHeadersPaddingError) {
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
unsigned char kH2FrameData[] = {
0x00, 0x00, 0x05,
0x01,
0x08,
0x00, 0x00, 0x00, 0x01,
0xff,
0x00, 0x00, 0x00, 0x00,
};
SpdySerializedFrame frame = MakeSerializedFrame(
reinterpret_cast<char*>(kH2FrameData), sizeof(kH2FrameData));
EXPECT_CALL(visitor, OnCommonHeader(1, 5, 0x1, 0x8));
EXPECT_CALL(visitor, OnHeaders(1, 5, false, 0, 0, false, false, false));
EXPECT_CALL(visitor, OnHeaderFrameStart(1)).Times(1);
EXPECT_CALL(visitor, OnError(Http2DecoderAdapter::SPDY_INVALID_PADDING, _));
EXPECT_EQ(frame.size(), deframer_->ProcessInput(frame.data(), frame.size()));
EXPECT_TRUE(deframer_->HasError());
EXPECT_EQ(Http2DecoderAdapter::SPDY_INVALID_PADDING,
deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
}
TEST_P(SpdyFramerTest, CorrectlySizedHeadersPaddingNoError) {
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
char kH2FrameData[] = {
0x00, 0x00, 0x05,
0x01,
0x08,
0x00, 0x00, 0x00, 0x01,
0x04,
0x00, 0x00, 0x00, 0x00,
};
SpdySerializedFrame frame =
MakeSerializedFrame(kH2FrameData, sizeof(kH2FrameData));
EXPECT_CALL(visitor, OnCommonHeader(1, 5, 0x1, 0x8));
EXPECT_CALL(visitor, OnHeaders(1, 5, false, 0, 0, false, false, false));
EXPECT_CALL(visitor, OnHeaderFrameStart(1)).Times(1);
EXPECT_EQ(frame.size(), deframer_->ProcessInput(frame.data(), frame.size()));
EXPECT_FALSE(deframer_->HasError());
EXPECT_EQ(Http2DecoderAdapter::SPDY_NO_ERROR, deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
}
TEST_P(SpdyFramerTest, DataWithStreamIdZero) {
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
const char bytes[] = "hello";
SpdyDataIR data_ir( 0, bytes);
SpdySerializedFrame frame(framer_.SerializeData(data_ir));
EXPECT_CALL(visitor, OnCommonHeader(0, _, 0x0, _));
EXPECT_CALL(visitor, OnError(Http2DecoderAdapter::SPDY_INVALID_STREAM_ID, _));
EXPECT_GT(frame.size(), deframer_->ProcessInput(frame.data(), frame.size()));
EXPECT_TRUE(deframer_->HasError());
EXPECT_EQ(Http2DecoderAdapter::SPDY_INVALID_STREAM_ID,
deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
}
TEST_P(SpdyFramerTest, HeadersWithStreamIdZero) {
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
SpdyHeadersIR headers( 0);
headers.SetHeader("alpha", "beta");
SpdySerializedFrame frame(
SpdyFramerPeer::SerializeHeaders(&framer_, headers, &output_));
EXPECT_CALL(visitor, OnCommonHeader(0, _, 0x1, _));
EXPECT_CALL(visitor, OnError(Http2DecoderAdapter::SPDY_INVALID_STREAM_ID, _));
EXPECT_GT(frame.size(), deframer_->ProcessInput(frame.data(), frame.size()));
EXPECT_TRUE(deframer_->HasError());
EXPECT_EQ(Http2DecoderAdapter::SPDY_INVALID_STREAM_ID,
deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
}
TEST_P(SpdyFramerTest, PriorityWithStreamIdZero) {
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
SpdyPriorityIR priority_ir( 0,
1,
16,
true);
SpdySerializedFrame frame(framer_.SerializeFrame(priority_ir));
if (use_output_) {
EXPECT_EQ(framer_.SerializeFrame(priority_ir, &output_), frame.size());
frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
EXPECT_CALL(visitor, OnCommonHeader(0, _, 0x2, _));
EXPECT_CALL(visitor, OnError(Http2DecoderAdapter::SPDY_INVALID_STREAM_ID, _));
EXPECT_GT(frame.size(), deframer_->ProcessInput(frame.data(), frame.size()));
EXPECT_TRUE(deframer_->HasError());
EXPECT_EQ(Http2DecoderAdapter::SPDY_INVALID_STREAM_ID,
deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
}
TEST_P(SpdyFramerTest, RstStreamWithStreamIdZero) {
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
SpdyRstStreamIR rst_stream_ir( 0, ERROR_CODE_PROTOCOL_ERROR);
SpdySerializedFrame frame(framer_.SerializeRstStream(rst_stream_ir));
if (use_output_) {
EXPECT_TRUE(framer_.SerializeRstStream(rst_stream_ir, &output_));
frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
EXPECT_CALL(visitor, OnCommonHeader(0, _, 0x3, _));
EXPECT_CALL(visitor, OnError(Http2DecoderAdapter::SPDY_INVALID_STREAM_ID, _));
EXPECT_GT(frame.size(), deframer_->ProcessInput(frame.data(), frame.size()));
EXPECT_TRUE(deframer_->HasError());
EXPECT_EQ(Http2DecoderAdapter::SPDY_INVALID_STREAM_ID,
deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
}
TEST_P(SpdyFramerTest, SettingsWithStreamIdNotZero) {
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
char kH2FrameData[] = {
0x00, 0x00, 0x06,
0x04,
0x00,
0x00, 0x00, 0x00, 0x01,
0x00, 0x04,
0x0a, 0x0b, 0x0c, 0x0d,
};
SpdySerializedFrame frame =
MakeSerializedFrame(kH2FrameData, sizeof(kH2FrameData));
EXPECT_CALL(visitor, OnCommonHeader(1, 6, 0x4, 0x0));
EXPECT_CALL(visitor, OnError(Http2DecoderAdapter::SPDY_INVALID_STREAM_ID, _));
EXPECT_GT(frame.size(), deframer_->ProcessInput(frame.data(), frame.size()));
EXPECT_TRUE(deframer_->HasError());
EXPECT_EQ(Http2DecoderAdapter::SPDY_INVALID_STREAM_ID,
deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
}
TEST_P(SpdyFramerTest, GoawayWithStreamIdNotZero) {
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
char kH2FrameData[] = {
0x00, 0x00, 0x0a,
0x07,
0x00,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x47, 0x41,
};
SpdySerializedFrame frame =
MakeSerializedFrame(kH2FrameData, sizeof(kH2FrameData));
EXPECT_CALL(visitor, OnCommonHeader(1, 10, 0x7, 0x0));
EXPECT_CALL(visitor, OnError(Http2DecoderAdapter::SPDY_INVALID_STREAM_ID, _));
EXPECT_GT(frame.size(), deframer_->ProcessInput(frame.data(), frame.size()));
EXPECT_TRUE(deframer_->HasError());
EXPECT_EQ(Http2DecoderAdapter::SPDY_INVALID_STREAM_ID,
deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
}
TEST_P(SpdyFramerTest, ContinuationWithStreamIdZero) {
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
SpdyContinuationIR continuation( 0);
std::string some_nonsense_encoding = "some nonsense encoding";
continuation.take_encoding(std::move(some_nonsense_encoding));
continuation.set_end_headers(true);
SpdySerializedFrame frame(framer_.SerializeContinuation(continuation));
if (use_output_) {
ASSERT_TRUE(framer_.SerializeContinuation(continuation, &output_));
frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
EXPECT_CALL(visitor, OnCommonHeader(0, _, 0x9, _));
EXPECT_CALL(visitor, OnError(Http2DecoderAdapter::SPDY_INVALID_STREAM_ID, _));
EXPECT_GT(frame.size(), deframer_->ProcessInput(frame.data(), frame.size()));
EXPECT_TRUE(deframer_->HasError());
EXPECT_EQ(Http2DecoderAdapter::SPDY_INVALID_STREAM_ID,
deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
}
TEST_P(SpdyFramerTest, PushPromiseWithStreamIdZero) {
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
SpdyPushPromiseIR push_promise( 0,
4);
push_promise.SetHeader("alpha", "beta");
SpdySerializedFrame frame(SpdyFramerPeer::SerializePushPromise(
&framer_, push_promise, use_output_ ? &output_ : nullptr));
EXPECT_CALL(visitor, OnCommonHeader(0, _, 0x5, _));
EXPECT_CALL(visitor, OnError(Http2DecoderAdapter::SPDY_INVALID_STREAM_ID, _));
EXPECT_GT(frame.size(), deframer_->ProcessInput(frame.data(), frame.size()));
EXPECT_TRUE(deframer_->HasError());
EXPECT_EQ(Http2DecoderAdapter::SPDY_INVALID_STREAM_ID,
deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
}
TEST_P(SpdyFramerTest, PushPromiseWithPromisedStreamIdZero) {
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
SpdyPushPromiseIR push_promise( 3,
0);
push_promise.SetHeader("alpha", "beta");
SpdySerializedFrame frame(SpdyFramerPeer::SerializePushPromise(
&framer_, push_promise, use_output_ ? &output_ : nullptr));
EXPECT_CALL(visitor, OnCommonHeader(3, _, 0x5, _));
EXPECT_CALL(visitor,
OnError(Http2DecoderAdapter::SPDY_INVALID_CONTROL_FRAME, _));
deframer_->ProcessInput(frame.data(), frame.size());
EXPECT_TRUE(deframer_->HasError());
EXPECT_EQ(Http2DecoderAdapter::SPDY_INVALID_CONTROL_FRAME,
deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
}
TEST_P(SpdyFramerTest, MultiValueHeader) {
SpdyFramer framer(SpdyFramer::DISABLE_COMPRESSION);
std::string value("value1\0value2", 13);
quiche::HttpHeaderBlock header_set;
header_set["name"] = value;
HpackEncoder encoder;
encoder.DisableCompression();
std::string buffer = encoder.EncodeHeaderBlock(header_set);
SpdyFrameBuilder frame(1024);
frame.BeginNewFrame(SpdyFrameType::HEADERS,
HEADERS_FLAG_PRIORITY | HEADERS_FLAG_END_HEADERS, 3,
buffer.size() + 5 );
frame.WriteUInt32(0);
frame.WriteUInt8(255);
frame.WriteBytes(&buffer[0], buffer.size());
SpdySerializedFrame control_frame(frame.take());
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
visitor.SimulateInFramer(
reinterpret_cast<unsigned char*>(control_frame.data()),
control_frame.size());
EXPECT_THAT(visitor.headers_, testing::ElementsAre(testing::Pair(
"name", absl::string_view(value))));
}
TEST_P(SpdyFramerTest, CompressEmptyHeaders) {
SpdyHeadersIR headers(1);
headers.SetHeader("server", "SpdyServer 1.0");
headers.SetHeader("date", "Mon 12 Jan 2009 12:12:12 PST");
headers.SetHeader("status", "200");
headers.SetHeader("version", "HTTP/1.1");
headers.SetHeader("content-type", "text/html");
headers.SetHeader("content-length", "12");
headers.SetHeader("x-empty-header", "");
SpdyFramer framer(SpdyFramer::ENABLE_COMPRESSION);
SpdySerializedFrame frame1(
SpdyFramerPeer::SerializeHeaders(&framer, headers, &output_));
}
TEST_P(SpdyFramerTest, Basic) {
const unsigned char kH2Input[] = {
0x00, 0x00, 0x05,
0x01,
0x24,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00,
0x82,
0x00, 0x00, 0x01,
0x01,
0x04,
0x00, 0x00, 0x00, 0x01,
0x8c,
0x00, 0x00, 0x0c,
0x00,
0x00,
0x00, 0x00, 0x00, 0x01,
0xde, 0xad, 0xbe, 0xef,
0xde, 0xad, 0xbe, 0xef,
0xde, 0xad, 0xbe, 0xef,
0x00, 0x00, 0x05,
0x01,
0x24,
0x00, 0x00, 0x00, 0x03,
0x00, 0x00, 0x00, 0x00,
0x82,
0x00, 0x00, 0x08,
0x00,
0x00,
0x00, 0x00, 0x00, 0x03,
0xde, 0xad, 0xbe, 0xef,
0xde, 0xad, 0xbe, 0xef,
0x00, 0x00, 0x04,
0x00,
0x00,
0x00, 0x00, 0x00, 0x01,
0xde, 0xad, 0xbe, 0xef,
0x00, 0x00, 0x04,
0x03,
0x00,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x08,
0x00, 0x00, 0x00,
0x00,
0x00,
0x00, 0x00, 0x00, 0x03,
0x00, 0x00, 0x04,
0x03,
0x00,
0x00, 0x00, 0x00, 0x03,
0x00, 0x00, 0x00, 0x08,
};
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
visitor.SimulateInFramer(kH2Input, sizeof(kH2Input));
EXPECT_EQ(24, visitor.data_bytes_);
EXPECT_EQ(0, visitor.error_count_);
EXPECT_EQ(2, visitor.fin_frame_count_);
EXPECT_EQ(3, visitor.headers_frame_count_);
EXPECT_EQ(0, visitor.fin_flag_count_);
EXPECT_EQ(0, visitor.end_of_stream_count_);
EXPECT_EQ(4, visitor.data_frame_count_);
}
TEST_P(SpdyFramerTest, BasicWithError) {
const unsigned char kH2Input[] = {
0x00, 0x00, 0x01,
0x01,
0x04,
0x00, 0x00, 0x00, 0x01,
0x8c,
0x00, 0x00, 0x0c,
0x00,
0x00,
0x00, 0x00, 0x00, 0x01,
0xde, 0xad, 0xbe, 0xef,
0xde, 0xad, 0xbe, 0xef,
0xde, 0xad, 0xbe, 0xef,
0x00, 0x00, 0x06,
0x01,
0x24,
0x00, 0x00, 0x00, 0x03,
0x00, 0x00, 0x00, 0x00,
0x82,
0x8c,
0x00, 0x00, 0x08,
0x00,
0x00,
0x00, 0x00, 0x00, 0x03,
0xde, 0xad, 0xbe, 0xef,
0xde, 0xad, 0xbe, 0xef,
0x00, 0x00, 0x04,
0x00,
0x00,
0x00, 0x00, 0x00, 0x01,
0xde, 0xad, 0xbe, 0xef,
0x00, 0x00, 0x04,
0x03,
0x00,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x08,
0x00, 0x00, 0x00,
0x00,
0x00,
0x00, 0x00, 0x00, 0x03,
0x00, 0x00, 0x04,
0x03,
0x00,
0x00, 0x00, 0x00, 0x03,
0x00, 0x00, 0x00, 0x08,
};
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
testing::InSequence s;
EXPECT_CALL(visitor, OnCommonHeader(1, 1, 0x1, 0x4));
EXPECT_CALL(visitor, OnHeaders(1, 1, false, 0, 0, false, false, true));
EXPECT_CALL(visitor, OnHeaderFrameStart(1));
EXPECT_CALL(visitor, OnHeaderFrameEnd(1));
EXPECT_CALL(visitor, OnCommonHeader(1, 12, 0x0, 0x0));
EXPECT_CALL(visitor, OnDataFrameHeader(1, 12, false));
EXPECT_CALL(visitor, OnStreamFrameData(1, _, 12));
EXPECT_CALL(visitor, OnCommonHeader(3, 6, 0x1, 0x24));
EXPECT_CALL(visitor, OnHeaders(3, 6, true, 131, 0, false, false, true));
EXPECT_CALL(visitor, OnHeaderFrameStart(3));
EXPECT_CALL(visitor, OnHeaderFrameEnd(3));
EXPECT_CALL(visitor, OnCommonHeader(3, 8, 0x0, 0x0));
EXPECT_CALL(visitor, OnDataFrameHeader(3, 8, false))
.WillOnce(testing::InvokeWithoutArgs(
[this]() { deframer_->StopProcessing(); }));
EXPECT_CALL(
visitor,
OnError(http2::Http2DecoderAdapter::SpdyFramerError::SPDY_STOP_PROCESSING,
"Ignoring further events on this connection."));
size_t processed = deframer_->ProcessInput(
reinterpret_cast<const char*>(kH2Input), sizeof(kH2Input));
EXPECT_LT(processed, sizeof(kH2Input));
}
TEST_P(SpdyFramerTest, FinOnDataFrame) {
const unsigned char kH2Input[] = {
0x00, 0x00, 0x05,
0x01,
0x24,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00,
0x82,
0x00, 0x00, 0x01,
0x01,
0x04,
0x00, 0x00, 0x00, 0x01,
0x8c,
0x00, 0x00, 0x0c,
0x00,
0x00,
0x00, 0x00, 0x00, 0x01,
0xde, 0xad, 0xbe, 0xef,
0xde, 0xad, 0xbe, 0xef,
0xde, 0xad, 0xbe, 0xef,
0x00, 0x00, 0x04,
0x00,
0x01,
0x00, 0x00, 0x00, 0x01,
0xde, 0xad, 0xbe, 0xef,
};
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
visitor.SimulateInFramer(kH2Input, sizeof(kH2Input));
EXPECT_EQ(0, visitor.error_count_);
EXPECT_EQ(2, visitor.headers_frame_count_);
EXPECT_EQ(16, visitor.data_bytes_);
EXPECT_EQ(0, visitor.fin_frame_count_);
EXPECT_EQ(0, visitor.fin_flag_count_);
EXPECT_EQ(1, visitor.end_of_stream_count_);
EXPECT_EQ(2, visitor.data_frame_count_);
}
TEST_P(SpdyFramerTest, FinOnHeadersFrame) {
const unsigned char kH2Input[] = {
0x00, 0x00, 0x05,
0x01,
0x24,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00,
0x82,
0x00, 0x00, 0x01,
0x01,
0x05,
0x00, 0x00, 0x00, 0x01,
0x8c,
};
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
visitor.SimulateInFramer(kH2Input, sizeof(kH2Input));
EXPECT_EQ(0, visitor.error_count_);
EXPECT_EQ(2, visitor.headers_frame_count_);
EXPECT_EQ(0, visitor.data_bytes_);
EXPECT_EQ(0, visitor.fin_frame_count_);
EXPECT_EQ(1, visitor.fin_flag_count_);
EXPECT_EQ(1, visitor.end_of_stream_count_);
EXPECT_EQ(0, visitor.data_frame_count_);
}
TEST_P(SpdyFramerTest, UnclosedStreamDataCompressorsOneByteAtATime) {
const char kHeader1[] = "header1";
const char kHeader2[] = "header2";
const char kValue1[] = "value1";
const char kValue2[] = "value2";
SpdyHeadersIR headers( 1);
headers.SetHeader(kHeader1, kValue1);
headers.SetHeader(kHeader2, kValue2);
SpdySerializedFrame headers_frame(SpdyFramerPeer::SerializeHeaders(
&framer_, headers, use_output_ ? &output_ : nullptr));
const char bytes[] = "this is a test test test test test!";
SpdyDataIR data_ir( 1,
absl::string_view(bytes, ABSL_ARRAYSIZE(bytes)));
data_ir.set_fin(true);
SpdySerializedFrame send_frame(framer_.SerializeData(data_ir));
TestSpdyVisitor visitor(SpdyFramer::ENABLE_COMPRESSION);
const unsigned char* data;
data = reinterpret_cast<const unsigned char*>(headers_frame.data());
for (size_t idx = 0; idx < headers_frame.size(); ++idx) {
visitor.SimulateInFramer(data + idx, 1);
ASSERT_EQ(0, visitor.error_count_);
}
data = reinterpret_cast<const unsigned char*>(send_frame.data());
for (size_t idx = 0; idx < send_frame.size(); ++idx) {
visitor.SimulateInFramer(data + idx, 1);
ASSERT_EQ(0, visitor.error_count_);
}
EXPECT_EQ(0, visitor.error_count_);
EXPECT_EQ(1, visitor.headers_frame_count_);
EXPECT_EQ(ABSL_ARRAYSIZE(bytes), static_cast<unsigned>(visitor.data_bytes_));
EXPECT_EQ(0, visitor.fin_frame_count_);
EXPECT_EQ(0, visitor.fin_flag_count_);
EXPECT_EQ(1, visitor.end_of_stream_count_);
EXPECT_EQ(1, visitor.data_frame_count_);
}
TEST_P(SpdyFramerTest, WindowUpdateFrame) {
SpdyWindowUpdateIR window_update( 1,
0x12345678);
SpdySerializedFrame frame(framer_.SerializeWindowUpdate(window_update));
if (use_output_) {
ASSERT_TRUE(framer_.SerializeWindowUpdate(window_update, &output_));
frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
const char kDescription[] = "WINDOW_UPDATE frame, stream 1, delta 0x12345678";
const unsigned char kH2FrameData[] = {
0x00, 0x00, 0x04,
0x08,
0x00,
0x00, 0x00, 0x00, 0x01,
0x12, 0x34, 0x56, 0x78,
};
CompareFrame(kDescription, frame, kH2FrameData, ABSL_ARRAYSIZE(kH2FrameData));
}
TEST_P(SpdyFramerTest, CreateDataFrame) {
{
const char kDescription[] = "'hello' data frame, no FIN";
const unsigned char kH2FrameData[] = {
0x00, 0x00, 0x05,
0x00,
0x00,
0x00, 0x00, 0x00, 0x01,
'h', 'e', 'l', 'l',
'o',
};
const char bytes[] = "hello";
SpdyDataIR data_ir( 1, bytes);
SpdySerializedFrame frame(framer_.SerializeData(data_ir));
CompareFrame(kDescription, frame, kH2FrameData,
ABSL_ARRAYSIZE(kH2FrameData));
SpdyDataIR data_header_ir( 1);
data_header_ir.SetDataShallow(bytes);
frame =
framer_.SerializeDataFrameHeaderWithPaddingLengthField(data_header_ir);
CompareCharArraysWithHexError(
kDescription, reinterpret_cast<const unsigned char*>(frame.data()),
kDataFrameMinimumSize, kH2FrameData, kDataFrameMinimumSize);
}
{
const char kDescription[] = "'hello' data frame with more padding, no FIN";
const unsigned char kH2FrameData[] = {
0x00, 0x00, 0xfd,
0x00,
0x08,
0x00, 0x00, 0x00, 0x01,
0xf7,
'h', 'e', 'l', 'l',
'o',
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
const char bytes[] = "hello";
SpdyDataIR data_ir( 1, bytes);
data_ir.set_padding_len(248);
SpdySerializedFrame frame(framer_.SerializeData(data_ir));
CompareFrame(kDescription, frame, kH2FrameData,
ABSL_ARRAYSIZE(kH2FrameData));
frame = framer_.SerializeDataFrameHeaderWithPaddingLengthField(data_ir);
CompareCharArraysWithHexError(
kDescription, reinterpret_cast<const unsigned char*>(frame.data()),
kDataFrameMinimumSize, kH2FrameData, kDataFrameMinimumSize);
}
{
const char kDescription[] = "'hello' data frame with few padding, no FIN";
const unsigned char kH2FrameData[] = {
0x00, 0x00, 0x0d,
0x00,
0x08,
0x00, 0x00, 0x00, 0x01,
0x07,
'h', 'e', 'l', 'l',
'o',
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00,
};
const char bytes[] = "hello";
SpdyDataIR data_ir( 1, bytes);
data_ir.set_padding_len(8);
SpdySerializedFrame frame(framer_.SerializeData(data_ir));
CompareFrame(kDescription, frame, kH2FrameData,
ABSL_ARRAYSIZE(kH2FrameData));
frame = framer_.SerializeDataFrameHeaderWithPaddingLengthField(data_ir);
CompareCharArraysWithHexError(
kDescription, reinterpret_cast<const unsigned char*>(frame.data()),
kDataFrameMinimumSize, kH2FrameData, kDataFrameMinimumSize);
}
{
const char kDescription[] =
"'hello' data frame with 1 byte padding, no FIN";
const unsigned char kH2FrameData[] = {
0x00, 0x00, 0x06,
0x00,
0x08,
0x00, 0x00, 0x00, 0x01,
0x00,
'h', 'e', 'l', 'l',
'o',
};
const char bytes[] = "hello";
SpdyDataIR data_ir( 1, bytes);
data_ir.set_padding_len(1);
SpdySerializedFrame frame(framer_.SerializeData(data_ir));
CompareFrame(kDescription, frame, kH2FrameData,
ABSL_ARRAYSIZE(kH2FrameData));
frame = framer_.SerializeDataFrameHeaderWithPaddingLengthField(data_ir);
CompareCharArraysWithHexError(
kDescription, reinterpret_cast<const unsigned char*>(frame.data()),
kDataFrameMinimumSize, kH2FrameData, kDataFrameMinimumSize);
}
{
const char kDescription[] = "Data frame with negative data byte, no FIN";
const unsigned char kH2FrameData[] = {
0x00, 0x00, 0x01,
0x00,
0x00,
0x00, 0x00, 0x00, 0x01,
0xff,
};
SpdyDataIR data_ir( 1, "\xff");
SpdySerializedFrame frame(framer_.SerializeData(data_ir));
CompareFrame(kDescription, frame, kH2FrameData,
ABSL_ARRAYSIZE(kH2FrameData));
}
{
const char kDescription[] = "'hello' data frame, with FIN";
const unsigned char kH2FrameData[] = {
0x00, 0x00, 0x05,
0x00,
0x01,
0x00, 0x00, 0x00, 0x01,
0x68, 0x65, 0x6c, 0x6c,
0x6f,
};
SpdyDataIR data_ir( 1, "hello");
data_ir.set_fin(true);
SpdySerializedFrame frame(framer_.SerializeData(data_ir));
CompareFrame(kDescription, frame, kH2FrameData,
ABSL_ARRAYSIZE(kH2FrameData));
}
{
const char kDescription[] = "Empty data frame";
const unsigned char kH2FrameData[] = {
0x00, 0x00, 0x00,
0x00,
0x00,
0x00, 0x00, 0x00, 0x01,
};
SpdyDataIR data_ir( 1, "");
SpdySerializedFrame frame(framer_.SerializeData(data_ir));
CompareFrame(kDescription, frame, kH2FrameData,
ABSL_ARRAYSIZE(kH2FrameData));
frame = framer_.SerializeDataFrameHeaderWithPaddingLengthField(data_ir);
CompareCharArraysWithHexError(
kDescription, reinterpret_cast<const unsigned char*>(frame.data()),
kDataFrameMinimumSize, kH2FrameData, kDataFrameMinimumSize);
}
{
const char kDescription[] = "Data frame with max stream ID";
const unsigned char kH2FrameData[] = {
0x00, 0x00, 0x05,
0x00,
0x01,
0x7f, 0xff, 0xff, 0xff,
0x68, 0x65, 0x6c, 0x6c,
0x6f,
};
SpdyDataIR data_ir( 0x7fffffff, "hello");
data_ir.set_fin(true);
SpdySerializedFrame frame(framer_.SerializeData(data_ir));
CompareFrame(kDescription, frame, kH2FrameData,
ABSL_ARRAYSIZE(kH2FrameData));
}
}
TEST_P(SpdyFramerTest, CreateRstStream) {
{
const char kDescription[] = "RST_STREAM frame";
const unsigned char kH2FrameData[] = {
0x00, 0x00, 0x04,
0x03,
0x00,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x01,
};
SpdyRstStreamIR rst_stream( 1, ERROR_CODE_PROTOCOL_ERROR);
SpdySerializedFrame frame(framer_.SerializeRstStream(rst_stream));
if (use_output_) {
ASSERT_TRUE(framer_.SerializeRstStream(rst_stream, &output_));
frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
CompareFrame(kDescription, frame, kH2FrameData,
ABSL_ARRAYSIZE(kH2FrameData));
}
{
const char kDescription[] = "RST_STREAM frame with max stream ID";
const unsigned char kH2FrameData[] = {
0x00, 0x00, 0x04,
0x03,
0x00,
0x7f, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x01,
};
SpdyRstStreamIR rst_stream( 0x7FFFFFFF,
ERROR_CODE_PROTOCOL_ERROR);
SpdySerializedFrame frame(framer_.SerializeRstStream(rst_stream));
if (use_output_) {
output_.Reset();
ASSERT_TRUE(framer_.SerializeRstStream(rst_stream, &output_));
frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
CompareFrame(kDescription, frame, kH2FrameData,
ABSL_ARRAYSIZE(kH2FrameData));
}
{
const char kDescription[] = "RST_STREAM frame with max status code";
const unsigned char kH2FrameData[] = {
0x00, 0x00, 0x04,
0x03,
0x00,
0x7f, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x02,
};
SpdyRstStreamIR rst_stream( 0x7FFFFFFF,
ERROR_CODE_INTERNAL_ERROR);
SpdySerializedFrame frame(framer_.SerializeRstStream(rst_stream));
if (use_output_) {
output_.Reset();
ASSERT_TRUE(framer_.SerializeRstStream(rst_stream, &output_));
frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
CompareFrame(kDescription, frame, kH2FrameData,
ABSL_ARRAYSIZE(kH2FrameData));
}
}
TEST_P(SpdyFramerTest, CreateSettings) {
{
const char kDescription[] = "Network byte order SETTINGS frame";
const unsigned char kH2FrameData[] = {
0x00, 0x00, 0x06,
0x04,
0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x04,
0x0a, 0x0b, 0x0c, 0x0d,
};
uint32_t kValue = 0x0a0b0c0d;
SpdySettingsIR settings_ir;
SpdyKnownSettingsId kId = SETTINGS_INITIAL_WINDOW_SIZE;
settings_ir.AddSetting(kId, kValue);
SpdySerializedFrame frame(framer_.SerializeSettings(settings_ir));
if (use_output_) {
ASSERT_TRUE(framer_.SerializeSettings(settings_ir, &output_));
frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
CompareFrame(kDescription, frame, kH2FrameData,
ABSL_ARRAYSIZE(kH2FrameData));
}
{
const char kDescription[] = "Basic SETTINGS frame";
const unsigned char kH2FrameData[] = {
0x00, 0x00, 0x18,
0x04,
0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x01,
0x00, 0x00, 0x00, 0x05,
0x00, 0x02,
0x00, 0x00, 0x00, 0x06,
0x00, 0x03,
0x00, 0x00, 0x00, 0x07,
0x00, 0x04,
0x00, 0x00, 0x00, 0x08,
};
SpdySettingsIR settings_ir;
settings_ir.AddSetting(SETTINGS_HEADER_TABLE_SIZE, 5);
settings_ir.AddSetting(SETTINGS_ENABLE_PUSH, 6);
settings_ir.AddSetting(SETTINGS_MAX_CONCURRENT_STREAMS, 7);
settings_ir.AddSetting(SETTINGS_INITIAL_WINDOW_SIZE, 8);
SpdySerializedFrame frame(framer_.SerializeSettings(settings_ir));
if (use_output_) {
output_.Reset();
ASSERT_TRUE(framer_.SerializeSettings(settings_ir, &output_));
frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
CompareFrame(kDescription, frame, kH2FrameData,
ABSL_ARRAYSIZE(kH2FrameData));
}
{
const char kDescription[] = "Empty SETTINGS frame";
const unsigned char kH2FrameData[] = {
0x00, 0x00, 0x00,
0x04,
0x00,
0x00, 0x00, 0x00, 0x00,
};
SpdySettingsIR settings_ir;
SpdySerializedFrame frame(framer_.SerializeSettings(settings_ir));
if (use_output_) {
output_.Reset();
ASSERT_TRUE(framer_.SerializeSettings(settings_ir, &output_));
frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
CompareFrame(kDescription, frame, kH2FrameData,
ABSL_ARRAYSIZE(kH2FrameData));
}
}
TEST_P(SpdyFramerTest, CreatePingFrame) {
{
const char kDescription[] = "PING frame";
const unsigned char kH2FrameData[] = {
0x00, 0x00, 0x08,
0x06,
0x00,
0x00, 0x00, 0x00, 0x00,
0x12, 0x34, 0x56, 0x78,
0x9a, 0xbc, 0xde, 0xff,
};
const unsigned char kH2FrameDataWithAck[] = {
0x00, 0x00, 0x08,
0x06,
0x01,
0x00, 0x00, 0x00, 0x00,
0x12, 0x34, 0x56, 0x78,
0x9a, 0xbc, 0xde, 0xff,
};
const SpdyPingId kPingId = 0x123456789abcdeffULL;
SpdyPingIR ping_ir(kPingId);
ASSERT_FALSE(ping_ir.is_ack());
SpdySerializedFrame frame(framer_.SerializePing(ping_ir));
if (use_output_) {
ASSERT_TRUE(framer_.SerializePing(ping_ir, &output_));
frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
CompareFrame(kDescription, frame, kH2FrameData,
ABSL_ARRAYSIZE(kH2FrameData));
ping_ir.set_is_ack(true);
frame = framer_.SerializePing(ping_ir);
if (use_output_) {
output_.Reset();
ASSERT_TRUE(framer_.SerializePing(ping_ir, &output_));
frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
CompareFrame(kDescription, frame, kH2FrameDataWithAck,
ABSL_ARRAYSIZE(kH2FrameDataWithAck));
}
}
TEST_P(SpdyFramerTest, CreateGoAway) {
{
const char kDescription[] = "GOAWAY frame";
const unsigned char kH2FrameData[] = {
0x00, 0x00, 0x0a,
0x07,
0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x47, 0x41,
};
SpdyGoAwayIR goaway_ir( 0, ERROR_CODE_NO_ERROR,
"GA");
SpdySerializedFrame frame(framer_.SerializeGoAway(goaway_ir));
if (use_output_) {
ASSERT_TRUE(framer_.SerializeGoAway(goaway_ir, &output_));
frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
CompareFrame(kDescription, frame, kH2FrameData,
ABSL_ARRAYSIZE(kH2FrameData));
}
{
const char kDescription[] = "GOAWAY frame with max stream ID, status";
const unsigned char kH2FrameData[] = {
0x00, 0x00, 0x0a,
0x07,
0x00,
0x00, 0x00, 0x00, 0x00,
0x7f, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x02,
0x47, 0x41,
};
SpdyGoAwayIR goaway_ir( 0x7FFFFFFF,
ERROR_CODE_INTERNAL_ERROR, "GA");
SpdySerializedFrame frame(framer_.SerializeGoAway(goaway_ir));
if (use_output_) {
output_.Reset();
ASSERT_TRUE(framer_.SerializeGoAway(goaway_ir, &output_));
frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
CompareFrame(kDescription, frame, kH2FrameData,
ABSL_ARRAYSIZE(kH2FrameData));
}
}
TEST_P(SpdyFramerTest, CreateHeadersUncompressed) {
SpdyFramer framer(SpdyFramer::DISABLE_COMPRESSION);
{
const char kDescription[] = "HEADERS frame, no FIN";
const unsigned char kH2FrameData[] = {
0x00, 0x00, 0x12,
0x01,
0x04,
0x00, 0x00, 0x00, 0x01,
0x00,
0x03,
0x62, 0x61, 0x72,
0x03,
0x66, 0x6f, 0x6f,
0x00,
0x03,
0x66, 0x6f, 0x6f,
0x03,
0x62, 0x61, 0x72,
};
SpdyHeadersIR headers( 1);
headers.SetHeader("bar", "foo");
headers.SetHeader("foo", "bar");
SpdySerializedFrame frame(SpdyFramerPeer::SerializeHeaders(
&framer, headers, use_output_ ? &output_ : nullptr));
CompareFrame(kDescription, frame, kH2FrameData,
ABSL_ARRAYSIZE(kH2FrameData));
}
{
const char kDescription[] =
"HEADERS frame with a 0-length header name, FIN, max stream ID";
const unsigned char kH2FrameData[] = {
0x00, 0x00, 0x0f,
0x01,
0x05,
0x7f, 0xff, 0xff, 0xff,
0x00,
0x00,
0x03,
0x66, 0x6f, 0x6f,
0x00,
0x03,
0x66, 0x6f, 0x6f,
0x03,
0x62, 0x61, 0x72,
};
SpdyHeadersIR headers( 0x7fffffff);
headers.set_fin(true);
headers.SetHeader("", "foo");
headers.SetHeader("foo", "bar");
SpdySerializedFrame frame(SpdyFramerPeer::SerializeHeaders(
&framer, headers, use_output_ ? &output_ : nullptr));
CompareFrame(kDescription, frame, kH2FrameData,
ABSL_ARRAYSIZE(kH2FrameData));
}
{
const char kDescription[] =
"HEADERS frame with a 0-length header val, FIN, max stream ID";
const unsigned char kH2FrameData[] = {
0x00, 0x00, 0x0f,
0x01,
0x05,
0x7f, 0xff, 0xff, 0xff,
0x00,
0x03,
0x62, 0x61, 0x72,
0x03,
0x66, 0x6f, 0x6f,
0x00,
0x03,
0x66, 0x6f, 0x6f,
0x00,
};
SpdyHeadersIR headers_ir( 0x7fffffff);
headers_ir.set_fin(true);
headers_ir.SetHeader("bar", "foo");
headers_ir.SetHeader("foo", "");
SpdySerializedFrame frame(SpdyFramerPeer::SerializeHeaders(
&framer, headers_ir, use_output_ ? &output_ : nullptr));
CompareFrame(kDescription, frame, kH2FrameData,
ABSL_ARRAYSIZE(kH2FrameData));
}
{
const char kDescription[] =
"HEADERS frame with a 0-length header val, FIN, max stream ID, pri";
const unsigned char kH2FrameData[] = {
0x00, 0x00, 0x14,
0x01,
0x25,
0x7f, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00,
0xdb,
0x00,
0x03,
0x62, 0x61, 0x72,
0x03,
0x66, 0x6f, 0x6f,
0x00,
0x03,
0x66, 0x6f, 0x6f,
0x00,
};
SpdyHeadersIR headers_ir( 0x7fffffff);
headers_ir.set_fin(true);
headers_ir.set_has_priority(true);
headers_ir.set_weight(220);
headers_ir.SetHeader("bar", "foo");
headers_ir.SetHeader("foo", "");
SpdySerializedFrame frame(SpdyFramerPeer::SerializeHeaders(
&framer, headers_ir, use_output_ ? &output_ : nullptr));
CompareFrame(kDescription, frame, kH2FrameData,
ABSL_ARRAYSIZE(kH2FrameData));
}
{
const char kDescription[] =
"HEADERS frame with a 0-length header val, FIN, max stream ID, pri, "
"exclusive=true, parent_stream=0";
const unsigned char kV4FrameData[] = {
0x00, 0x00, 0x14,
0x01,
0x25,
0x7f, 0xff, 0xff, 0xff,
0x80, 0x00, 0x00, 0x00,
0xdb,
0x00,
0x03,
0x62, 0x61, 0x72,
0x03,
0x66, 0x6f, 0x6f,
0x00,
0x03,
0x66, 0x6f, 0x6f,
0x00,
};
SpdyHeadersIR headers_ir( 0x7fffffff);
headers_ir.set_fin(true);
headers_ir.set_has_priority(true);
headers_ir.set_weight(220);
headers_ir.set_exclusive(true);
headers_ir.set_parent_stream_id(0);
headers_ir.SetHeader("bar", "foo");
headers_ir.SetHeader("foo", "");
SpdySerializedFrame frame(SpdyFramerPeer::SerializeHeaders(
&framer, headers_ir, use_output_ ? &output_ : nullptr));
CompareFrame(kDescription, frame, kV4FrameData,
ABSL_ARRAYSIZE(kV4FrameData));
}
{
const char kDescription[] =
"HEADERS frame with a 0-length header val, FIN, max stream ID, pri, "
"exclusive=false, parent_stream=max stream ID";
const unsigned char kV4FrameData[] = {
0x00, 0x00, 0x14,
0x01,
0x25,
0x7f, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff,
0xdb,
0x00,
0x03,
0x62, 0x61, 0x72,
0x03,
0x66, 0x6f, 0x6f,
0x00,
0x03,
0x66, 0x6f, 0x6f,
0x00,
};
SpdyHeadersIR headers_ir( 0x7fffffff);
headers_ir.set_fin(true);
headers_ir.set_has_priority(true);
headers_ir.set_weight(220);
headers_ir.set_exclusive(false);
headers_ir.set_parent_stream_id(0x7fffffff);
headers_ir.SetHeader("bar", "foo");
headers_ir.SetHeader("foo", "");
SpdySerializedFrame frame(SpdyFramerPeer::SerializeHeaders(
&framer, headers_ir, use_output_ ? &output_ : nullptr));
CompareFrame(kDescription, frame, kV4FrameData,
ABSL_ARRAYSIZE(kV4FrameData));
}
{
const char kDescription[] =
"HEADERS frame with a 0-length header name, FIN, max stream ID, padded";
const unsigned char kH2FrameData[] = {
0x00, 0x00, 0x15,
0x01,
0x0d,
0x7f, 0xff, 0xff, 0xff,
0x05,
0x00,
0x00,
0x03,
0x66, 0x6f, 0x6f,
0x00,
0x03,
0x66, 0x6f, 0x6f,
0x03,
0x62, 0x61, 0x72,
0x00, 0x00, 0x00, 0x00,
0x00,
};
SpdyHeadersIR headers_ir( 0x7fffffff);
headers_ir.set_fin(true);
headers_ir.SetHeader("", "foo");
headers_ir.SetHeader("foo", "bar");
headers_ir.set_padding_len(6);
SpdySerializedFrame frame(SpdyFramerPeer::SerializeHeaders(
&framer, headers_ir, use_output_ ? &output_ : nullptr));
CompareFrame(kDescription, frame, kH2FrameData,
ABSL_ARRAYSIZE(kH2FrameData));
}
}
TEST_P(SpdyFramerTest, CreateWindowUpdate) {
{
const char kDescription[] = "WINDOW_UPDATE frame";
const unsigned char kH2FrameData[] = {
0x00, 0x00, 0x04,
0x08,
0x00,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x01,
};
SpdySerializedFrame frame(framer_.SerializeWindowUpdate(
SpdyWindowUpdateIR( 1, 1)));
if (use_output_) {
output_.Reset();
ASSERT_TRUE(framer_.SerializeWindowUpdate(
SpdyWindowUpdateIR( 1, 1), &output_));
frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
CompareFrame(kDescription, frame, kH2FrameData,
ABSL_ARRAYSIZE(kH2FrameData));
}
{
const char kDescription[] = "WINDOW_UPDATE frame with max stream ID";
const unsigned char kH2FrameData[] = {
0x00, 0x00, 0x04,
0x08,
0x00,
0x7f, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x01,
};
SpdySerializedFrame frame(framer_.SerializeWindowUpdate(
SpdyWindowUpdateIR( 0x7FFFFFFF, 1)));
if (use_output_) {
output_.Reset();
ASSERT_TRUE(framer_.SerializeWindowUpdate(
SpdyWindowUpdateIR( 0x7FFFFFFF, 1),
&output_));
frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
CompareFrame(kDescription, frame, kH2FrameData,
ABSL_ARRAYSIZE(kH2FrameData));
}
{
const char kDescription[] = "WINDOW_UPDATE frame with max window delta";
const unsigned char kH2FrameData[] = {
0x00, 0x00, 0x04,
0x08,
0x00,
0x00, 0x00, 0x00, 0x01,
0x7f, 0xff, 0xff, 0xff,
};
SpdySerializedFrame frame(framer_.SerializeWindowUpdate(
SpdyWindowUpdateIR( 1, 0x7FFFFFFF)));
if (use_output_) {
output_.Reset();
ASSERT_TRUE(framer_.SerializeWindowUpdate(
SpdyWindowUpdateIR( 1, 0x7FFFFFFF),
&output_));
frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
CompareFrame(kDescription, frame, kH2FrameData,
ABSL_ARRAYSIZE(kH2FrameData));
}
}
TEST_P(SpdyFramerTest, CreatePushPromiseUncompressed) {
{
SpdyFramer framer(SpdyFramer::DISABLE_COMPRESSION);
const char kDescription[] = "PUSH_PROMISE frame without padding";
const unsigned char kFrameData[] = {
0x00, 0x00, 0x16,
0x05,
0x04,
0x00, 0x00, 0x00, 0x29,
0x00, 0x00, 0x00, 0x3a,
0x00,
0x03,
0x62, 0x61, 0x72,
0x03,
0x66, 0x6f, 0x6f,
0x00,
0x03,
0x66, 0x6f, 0x6f,
0x03,
0x62, 0x61, 0x72,
};
SpdyPushPromiseIR push_promise( 41,
58);
push_promise.SetHeader("bar", "foo");
push_promise.SetHeader("foo", "bar");
SpdySerializedFrame frame(SpdyFramerPeer::SerializePushPromise(
&framer, push_promise, use_output_ ? &output_ : nullptr));
CompareFrame(kDescription, frame, kFrameData, ABSL_ARRAYSIZE(kFrameData));
}
{
SpdyFramer framer(SpdyFramer::DISABLE_COMPRESSION);
const char kDescription[] = "PUSH_PROMISE frame with one byte of padding";
const unsigned char kFrameData[] = {
0x00, 0x00, 0x17,
0x05,
0x0c,
0x00, 0x00, 0x00, 0x29,
0x00,
0x00, 0x00, 0x00, 0x3a,
0x00,
0x03,
0x62, 0x61, 0x72,
0x03,
0x66, 0x6f, 0x6f,
0x00,
0x03,
0x66, 0x6f, 0x6f,
0x03,
0x62, 0x61, 0x72,
};
SpdyPushPromiseIR push_promise( 41,
58);
push_promise.set_padding_len(1);
push_promise.SetHeader("bar", "foo");
push_promise.SetHeader("foo", "bar");
output_.Reset();
SpdySerializedFrame frame(SpdyFramerPeer::SerializePushPromise(
&framer, push_promise, use_output_ ? &output_ : nullptr));
CompareFrame(kDescription, frame, kFrameData, ABSL_ARRAYSIZE(kFrameData));
}
{
SpdyFramer framer(SpdyFramer::DISABLE_COMPRESSION);
const char kDescription[] = "PUSH_PROMISE frame with 177 bytes of padding";
const unsigned char kFrameData[] = {
0x00, 0x00, 0xc7,
0x05,
0x0c,
0x00, 0x00, 0x00, 0x2a,
0xb0,
0x00, 0x00, 0x00, 0x39,
0x00,
0x03,
0x62, 0x61, 0x72,
0x03,
0x66, 0x6f, 0x6f,
0x00,
0x03,
0x66, 0x6f, 0x6f,
0x03,
0x62, 0x61, 0x72,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
SpdyPushPromiseIR push_promise( 42,
57);
push_promise.set_padding_len(177);
push_promise.SetHeader("bar", "foo");
push_promise.SetHeader("foo", "bar");
output_.Reset();
SpdySerializedFrame frame(SpdyFramerPeer::SerializePushPromise(
&framer, push_promise, use_output_ ? &output_ : nullptr));
CompareFrame(kDescription, frame, kFrameData, ABSL_ARRAYSIZE(kFrameData));
}
}
TEST_P(SpdyFramerTest, GetNumberRequiredContinuationFrames) {
EXPECT_EQ(1u, GetNumberRequiredContinuationFrames(16383 + 16374));
EXPECT_EQ(2u, GetNumberRequiredContinuationFrames(16383 + 16374 + 1));
EXPECT_EQ(2u, GetNumberRequiredContinuationFrames(16383 + 2 * 16374));
EXPECT_EQ(3u, GetNumberRequiredContinuationFrames(16383 + 2 * 16374 + 1));
}
TEST_P(SpdyFramerTest, CreateContinuationUncompressed) {
SpdyFramer framer(SpdyFramer::DISABLE_COMPRESSION);
const char kDescription[] = "CONTINUATION frame";
const unsigned char kFrameData[] = {
0x00, 0x00, 0x12,
0x09,
0x04,
0x00, 0x00, 0x00, 0x2a,
0x00,
0x03,
0x62, 0x61, 0x72,
0x03,
0x66, 0x6f, 0x6f,
0x00,
0x03,
0x66, 0x6f, 0x6f,
0x03,
0x62, 0x61, 0x72,
};
quiche::HttpHeaderBlock header_block;
header_block["bar"] = "foo";
header_block["foo"] = "bar";
HpackEncoder encoder;
encoder.DisableCompression();
std::string buffer = encoder.EncodeHeaderBlock(header_block);
SpdyContinuationIR continuation( 42);
continuation.take_encoding(std::move(buffer));
continuation.set_end_headers(true);
SpdySerializedFrame frame(framer.SerializeContinuation(continuation));
if (use_output_) {
ASSERT_TRUE(framer.SerializeContinuation(continuation, &output_));
frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
CompareFrame(kDescription, frame, kFrameData, ABSL_ARRAYSIZE(kFrameData));
}
TEST_P(SpdyFramerTest, SendUnexpectedContinuation) {
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
char kH2FrameData[] = {
0x00, 0x00, 0x12,
0x09,
0x04,
0x00, 0x00, 0x00, 0x2a,
0x00,
0x03,
0x62, 0x61, 0x72,
0x03,
0x66, 0x6f, 0x6f,
0x00,
0x03,
0x66, 0x6f, 0x6f,
0x03,
0x62, 0x61, 0x72,
};
SpdySerializedFrame frame =
MakeSerializedFrame(kH2FrameData, sizeof(kH2FrameData));
EXPECT_CALL(visitor, OnCommonHeader(42, 18, 0x9, 0x4));
EXPECT_CALL(visitor, OnError(Http2DecoderAdapter::SPDY_UNEXPECTED_FRAME, _));
EXPECT_GT(frame.size(), deframer_->ProcessInput(frame.data(), frame.size()));
EXPECT_TRUE(deframer_->HasError());
EXPECT_EQ(Http2DecoderAdapter::SPDY_UNEXPECTED_FRAME,
deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
}
TEST_P(SpdyFramerTest, CreatePushPromiseThenContinuationUncompressed) {
{
SpdyFramer framer(SpdyFramer::DISABLE_COMPRESSION);
const char kDescription[] =
"PUSH_PROMISE and CONTINUATION frames with one byte of padding";
const unsigned char kPartialPushPromiseFrameData[] = {
0x00, 0x3f, 0xf6,
0x05,
0x08,
0x00, 0x00, 0x00, 0x2a,
0x00,
0x00, 0x00, 0x00, 0x39,
0x00,
0x03,
0x78, 0x78, 0x78,
0x7f, 0x80, 0x7f,
0x78, 0x78, 0x78, 0x78,
0x78, 0x78, 0x78, 0x78,
0x78, 0x78, 0x78, 0x78,
0x78, 0x78, 0x78, 0x78,
0x78, 0x78, 0x78, 0x78,
0x78, 0x78, 0x78, 0x78,
0x78, 0x78, 0x78, 0x78,
0x78, 0x78, 0x78, 0x78,
0x78, 0x78, 0x78, 0x78,
0x78, 0x78, 0x78, 0x78,
0x78, 0x78, 0x78, 0x78,
0x78, 0x78, 0x78, 0x78,
0x78, 0x78, 0x78, 0x78,
0x78, 0x78, 0x78, 0x78,
0x78, 0x78, 0x78, 0x78,
0x78, 0x78, 0x78, 0x78,
0x78, 0x78, 0x78, 0x78,
0x78, 0x78, 0x78, 0x78,
0x78, 0x78, 0x78, 0x78,
0x78, 0x78, 0x78, 0x78,
0x78, 0x78, 0x78, 0x78,
};
const unsigned char kContinuationFrameData[] = {
0x00, 0x00, 0x16,
0x09,
0x04,
0x00, 0x00, 0x00, 0x2a,
0x78, 0x78, 0x78, 0x78,
0x78, 0x78, 0x78, 0x78,
0x78, 0x78, 0x78, 0x78,
0x78, 0x78, 0x78, 0x78,
0x78, 0x78, 0x78, 0x78,
0x78,
};
SpdyPushPromiseIR push_promise( 42,
57);
push_promise.set_padding_len(1);
std::string big_value(kHttp2MaxControlFrameSendSize, 'x');
push_promise.SetHeader("xxx", big_value);
SpdySerializedFrame frame(SpdyFramerPeer::SerializePushPromise(
&framer, push_promise, use_output_ ? &output_ : nullptr));
int len_non_data_payload = 31;
EXPECT_EQ(kHttp2MaxControlFrameSendSize + len_non_data_payload,
frame.size());
const unsigned char* frame_data =
reinterpret_cast<const unsigned char*>(frame.data());
CompareCharArraysWithHexError(kDescription, frame_data,
ABSL_ARRAYSIZE(kPartialPushPromiseFrameData),
kPartialPushPromiseFrameData,
ABSL_ARRAYSIZE(kPartialPushPromiseFrameData));
frame_data += kHttp2MaxControlFrameSendSize;
CompareCharArraysWithHexError(
kDescription, frame_data, ABSL_ARRAYSIZE(kContinuationFrameData),
kContinuationFrameData, ABSL_ARRAYSIZE(kContinuationFrameData));
}
}
TEST_P(SpdyFramerTest, CreateAltSvc) {
const char kDescription[] = "ALTSVC frame";
const unsigned char kType = SerializeFrameType(SpdyFrameType::ALTSVC);
const unsigned char kFrameData[] = {
0x00, 0x00, 0x49, kType, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x06, 'o',
'r', 'i', 'g', 'i', 'n', 'p', 'i', 'd', '1', '=', '"', 'h',
'o', 's', 't', ':', '4', '4', '3', '"', ';', ' ', 'm', 'a',
'=', '5', ',', 'p', '%', '2', '2', '%', '3', 'D', 'i', '%',
'3', 'A', 'd', '=', '"', 'h', '_', '\\', '\\', 'o', '\\', '"',
's', 't', ':', '1', '2', '3', '"', ';', ' ', 'm', 'a', '=',
'4', '2', ';', ' ', 'v', '=', '"', '2', '4', '"'};
SpdyAltSvcIR altsvc_ir( 3);
altsvc_ir.set_origin("origin");
altsvc_ir.add_altsvc(SpdyAltSvcWireFormat::AlternativeService(
"pid1", "host", 443, 5, SpdyAltSvcWireFormat::VersionVector()));
altsvc_ir.add_altsvc(SpdyAltSvcWireFormat::AlternativeService(
"p\"=i:d", "h_\\o\"st", 123, 42,
SpdyAltSvcWireFormat::VersionVector{24}));
SpdySerializedFrame frame(framer_.SerializeFrame(altsvc_ir));
if (use_output_) {
EXPECT_EQ(framer_.SerializeFrame(altsvc_ir, &output_), frame.size());
frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
CompareFrame(kDescription, frame, kFrameData, ABSL_ARRAYSIZE(kFrameData));
}
TEST_P(SpdyFramerTest, CreatePriority) {
const char kDescription[] = "PRIORITY frame";
const unsigned char kFrameData[] = {
0x00, 0x00, 0x05,
0x02,
0x00,
0x00, 0x00, 0x00, 0x02,
0x80, 0x00, 0x00, 0x01,
0x10,
};
SpdyPriorityIR priority_ir( 2,
1,
17,
true);
SpdySerializedFrame frame(framer_.SerializeFrame(priority_ir));
if (use_output_) {
EXPECT_EQ(framer_.SerializeFrame(priority_ir, &output_), frame.size());
frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
CompareFrame(kDescription, frame, kFrameData, ABSL_ARRAYSIZE(kFrameData));
}
TEST_P(SpdyFramerTest, CreatePriorityUpdate) {
const char kDescription[] = "PRIORITY_UPDATE frame";
const unsigned char kType =
SerializeFrameType(SpdyFrameType::PRIORITY_UPDATE);
const unsigned char kFrameData[] = {
0x00, 0x00, 0x07,
kType,
0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x03,
'u', '=', '0'};
SpdyPriorityUpdateIR priority_update_ir( 0,
3,
"u=0");
SpdySerializedFrame frame(framer_.SerializeFrame(priority_update_ir));
if (use_output_) {
EXPECT_EQ(framer_.SerializeFrame(priority_update_ir, &output_),
frame.size());
frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
CompareFrame(kDescription, frame, kFrameData, ABSL_ARRAYSIZE(kFrameData));
}
TEST_P(SpdyFramerTest, CreateAcceptCh) {
const char kDescription[] = "ACCEPT_CH frame";
const unsigned char kType = SerializeFrameType(SpdyFrameType::ACCEPT_CH);
const unsigned char kFrameData[] = {
0x00, 0x00, 0x2d,
kType,
0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x0f,
'w', 'w', 'w', '.', 'e', 'x',
'a', 'm', 'p', 'l', 'e', '.',
'c', 'o', 'm',
0x00, 0x03,
'f', 'o', 'o',
0x00, 0x10,
'm', 'a', 'i', 'l', '.', 'e',
'x', 'a', 'm', 'p', 'l', 'e',
'.', 'c', 'o', 'm',
0x00, 0x03,
'b', 'a', 'r'};
SpdyAcceptChIR accept_ch_ir(
{{"www.example.com", "foo"}, {"mail.example.com", "bar"}});
SpdySerializedFrame frame(framer_.SerializeFrame(accept_ch_ir));
if (use_output_) {
EXPECT_EQ(framer_.SerializeFrame(accept_ch_ir, &output_), frame.size());
frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
CompareFrame(kDescription, frame, kFrameData, ABSL_ARRAYSIZE(kFrameData));
}
TEST_P(SpdyFramerTest, CreateUnknown) {
const char kDescription[] = "Unknown frame";
const uint8_t kType = 0xaf;
const uint8_t kFlags = 0x11;
const uint8_t kLength = strlen(kDescription);
const unsigned char kFrameData[] = {
0x00, 0x00, kLength,
kType,
kFlags,
0x00, 0x00, 0x00, 0x02,
0x55, 0x6e, 0x6b, 0x6e,
0x6f, 0x77, 0x6e, 0x20,
0x66, 0x72, 0x61, 0x6d,
0x65,
};
SpdyUnknownIR unknown_ir( 2,
kType,
kFlags,
kDescription);
SpdySerializedFrame frame(framer_.SerializeFrame(unknown_ir));
if (use_output_) {
EXPECT_EQ(framer_.SerializeFrame(unknown_ir, &output_), frame.size());
frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
CompareFrame(kDescription, frame, kFrameData, ABSL_ARRAYSIZE(kFrameData));
}
TEST_P(SpdyFramerTest, CreateUnknownUnchecked) {
const char kDescription[] = "Unknown frame";
const uint8_t kType = 0x00;
const uint8_t kFlags = 0x11;
const uint8_t kLength = std::numeric_limits<uint8_t>::max();
const unsigned int kStreamId = kStreamIdMask + 42;
const unsigned char kFrameData[] = {
0x00, 0x00, kLength,
kType,
kFlags,
0x80, 0x00, 0x00, 0x29,
0x55, 0x6e, 0x6b, 0x6e,
0x6f, 0x77, 0x6e, 0x20,
0x66, 0x72, 0x61, 0x6d,
0x65,
};
TestSpdyUnknownIR unknown_ir( kStreamId,
kType,
kFlags,
kDescription);
unknown_ir.set_length(kLength);
SpdySerializedFrame frame(framer_.SerializeFrame(unknown_ir));
if (use_output_) {
EXPECT_EQ(framer_.SerializeFrame(unknown_ir, &output_), frame.size());
frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
CompareFrame(kDescription, frame, kFrameData, ABSL_ARRAYSIZE(kFrameData));
}
TEST_P(SpdyFramerTest, ReadCompressedHeadersHeaderBlock) {
SpdyHeadersIR headers_ir( 1);
headers_ir.SetHeader("alpha", "beta");
headers_ir.SetHeader("gamma", "delta");
SpdySerializedFrame control_frame(SpdyFramerPeer::SerializeHeaders(
&framer_, headers_ir, use_output_ ? &output_ : nullptr));
TestSpdyVisitor visitor(SpdyFramer::ENABLE_COMPRESSION);
visitor.SimulateInFramer(
reinterpret_cast<unsigned char*>(control_frame.data()),
control_frame.size());
EXPECT_EQ(1, visitor.headers_frame_count_);
EXPECT_EQ(0, visitor.control_frame_header_data_count_);
EXPECT_EQ(0, visitor.zero_length_control_frame_header_data_count_);
EXPECT_EQ(0, visitor.end_of_stream_count_);
EXPECT_EQ(headers_ir.header_block(), visitor.headers_);
}
TEST_P(SpdyFramerTest, ReadCompressedHeadersHeaderBlockWithHalfClose) {
SpdyHeadersIR headers_ir( 1);
headers_ir.set_fin(true);
headers_ir.SetHeader("alpha", "beta");
headers_ir.SetHeader("gamma", "delta");
SpdySerializedFrame control_frame(SpdyFramerPeer::SerializeHeaders(
&framer_, headers_ir, use_output_ ? &output_ : nullptr));
TestSpdyVisitor visitor(SpdyFramer::ENABLE_COMPRESSION);
visitor.SimulateInFramer(
reinterpret_cast<unsigned char*>(control_frame.data()),
control_frame.size());
EXPECT_EQ(1, visitor.headers_frame_count_);
EXPECT_EQ(0, visitor.control_frame_header_data_count_);
EXPECT_EQ(0, visitor.zero_length_control_frame_header_data_count_);
EXPECT_EQ(1, visitor.end_of_stream_count_);
EXPECT_EQ(headers_ir.header_block(), visitor.headers_);
}
TEST_P(SpdyFramerTest, TooLargeHeadersFrameUsesContinuation) {
SpdyFramer framer(SpdyFramer::DISABLE_COMPRESSION);
SpdyHeadersIR headers( 1);
headers.set_padding_len(256);
const size_t kBigValueSize = kHttp2MaxControlFrameSendSize;
std::string big_value(kBigValueSize, 'x');
headers.SetHeader("aa", big_value);
SpdySerializedFrame control_frame(SpdyFramerPeer::SerializeHeaders(
&framer, headers, use_output_ ? &output_ : nullptr));
EXPECT_GT(control_frame.size(), kHttp2MaxControlFrameSendSize);
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
visitor.SimulateInFramer(
reinterpret_cast<unsigned char*>(control_frame.data()),
control_frame.size());
EXPECT_TRUE(visitor.header_buffer_valid_);
EXPECT_EQ(0, visitor.error_count_);
EXPECT_EQ(1, visitor.headers_frame_count_);
EXPECT_EQ(1, visitor.continuation_count_);
EXPECT_EQ(0, visitor.zero_length_control_frame_header_data_count_);
}
TEST_P(SpdyFramerTest, MultipleContinuationFramesWithIterator) {
SpdyFramer framer(SpdyFramer::DISABLE_COMPRESSION);
auto headers = std::make_unique<SpdyHeadersIR>( 1);
headers->set_padding_len(256);
const size_t kBigValueSize = kHttp2MaxControlFrameSendSize;
std::string big_valuex(kBigValueSize, 'x');
headers->SetHeader("aa", big_valuex);
std::string big_valuez(kBigValueSize, 'z');
headers->SetHeader("bb", big_valuez);
SpdyFramer::SpdyHeaderFrameIterator frame_it(&framer, std::move(headers));
EXPECT_TRUE(frame_it.HasNextFrame());
EXPECT_GT(frame_it.NextFrame(&output_), 0u);
SpdySerializedFrame headers_frame =
MakeSerializedFrame(output_.Begin(), output_.Size());
EXPECT_EQ(headers_frame.size(), kHttp2MaxControlFrameSendSize);
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
visitor.SimulateInFramer(
reinterpret_cast<unsigned char*>(headers_frame.data()),
headers_frame.size());
EXPECT_TRUE(visitor.header_buffer_valid_);
EXPECT_EQ(0, visitor.error_count_);
EXPECT_EQ(1, visitor.headers_frame_count_);
EXPECT_EQ(0, visitor.continuation_count_);
EXPECT_EQ(0, visitor.zero_length_control_frame_header_data_count_);
output_.Reset();
EXPECT_TRUE(frame_it.HasNextFrame());
EXPECT_GT(frame_it.NextFrame(&output_), 0u);
SpdySerializedFrame first_cont_frame =
MakeSerializedFrame(output_.Begin(), output_.Size());
EXPECT_EQ(first_cont_frame.size(), kHttp2MaxControlFrameSendSize);
visitor.SimulateInFramer(
reinterpret_cast<unsigned char*>(first_cont_frame.data()),
first_cont_frame.size());
EXPECT_TRUE(visitor.header_buffer_valid_);
EXPECT_EQ(0, visitor.error_count_);
EXPECT_EQ(1, visitor.headers_frame_count_);
EXPECT_EQ(1, visitor.continuation_count_);
EXPECT_EQ(0, visitor.zero_length_control_frame_header_data_count_);
output_.Reset();
EXPECT_TRUE(frame_it.HasNextFrame());
EXPECT_GT(frame_it.NextFrame(&output_), 0u);
SpdySerializedFrame second_cont_frame =
MakeSerializedFrame(output_.Begin(), output_.Size());
EXPECT_LT(second_cont_frame.size(), kHttp2MaxControlFrameSendSize);
visitor.SimulateInFramer(
reinterpret_cast<unsigned char*>(second_cont_frame.data()),
second_cont_frame.size());
EXPECT_TRUE(visitor.header_buffer_valid_);
EXPECT_EQ(0, visitor.error_count_);
EXPECT_EQ(1, visitor.headers_frame_count_);
EXPECT_EQ(2, visitor.continuation_count_);
EXPECT_EQ(0, visitor.zero_length_control_frame_header_data_count_);
EXPECT_FALSE(frame_it.HasNextFrame());
}
TEST_P(SpdyFramerTest, PushPromiseFramesWithIterator) {
SpdyFramer framer(SpdyFramer::DISABLE_COMPRESSION);
auto push_promise =
std::make_unique<SpdyPushPromiseIR>( 1,
2);
push_promise->set_padding_len(256);
const size_t kBigValueSize = kHttp2MaxControlFrameSendSize;
std::string big_valuex(kBigValueSize, 'x');
push_promise->SetHeader("aa", big_valuex);
std::string big_valuez(kBigValueSize, 'z');
push_promise->SetHeader("bb", big_valuez);
SpdyFramer::SpdyPushPromiseFrameIterator frame_it(&framer,
std::move(push_promise));
EXPECT_TRUE(frame_it.HasNextFrame());
EXPECT_GT(frame_it.NextFrame(&output_), 0u);
SpdySerializedFrame push_promise_frame =
MakeSerializedFrame(output_.Begin(), output_.Size());
EXPECT_EQ(push_promise_frame.size(), kHttp2MaxControlFrameSendSize);
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
visitor.SimulateInFramer(
reinterpret_cast<unsigned char*>(push_promise_frame.data()),
push_promise_frame.size());
EXPECT_TRUE(visitor.header_buffer_valid_);
EXPECT_EQ(0, visitor.error_count_);
EXPECT_EQ(1, visitor.push_promise_frame_count_);
EXPECT_EQ(0, visitor.continuation_count_);
EXPECT_EQ(0, visitor.zero_length_control_frame_header_data_count_);
EXPECT_TRUE(frame_it.HasNextFrame());
output_.Reset();
EXPECT_GT(frame_it.NextFrame(&output_), 0u);
SpdySerializedFrame first_cont_frame =
MakeSerializedFrame(output_.Begin(), output_.Size());
EXPECT_EQ(first_cont_frame.size(), kHttp2MaxControlFrameSendSize);
visitor.SimulateInFramer(
reinterpret_cast<unsigned char*>(first_cont_frame.data()),
first_cont_frame.size());
EXPECT_TRUE(visitor.header_buffer_valid_);
EXPECT_EQ(0, visitor.error_count_);
EXPECT_EQ(1, visitor.push_promise_frame_count_);
EXPECT_EQ(1, visitor.continuation_count_);
EXPECT_EQ(0, visitor.zero_length_control_frame_header_data_count_);
EXPECT_TRUE(frame_it.HasNextFrame());
output_.Reset();
EXPECT_GT(frame_it.NextFrame(&output_), 0u);
SpdySerializedFrame second_cont_frame =
MakeSerializedFrame(output_.Begin(), output_.Size());
EXPECT_LT(second_cont_frame.size(), kHttp2MaxControlFrameSendSize);
visitor.SimulateInFramer(
reinterpret_cast<unsigned char*>(second_cont_frame.data()),
second_cont_frame.size());
EXPECT_TRUE(visitor.header_buffer_valid_);
EXPECT_EQ(0, visitor.error_count_);
EXPECT_EQ(1, visitor.push_promise_frame_count_);
EXPECT_EQ(2, visitor.continuation_count_);
EXPECT_EQ(0, visitor.zero_length_control_frame_header_data_count_);
EXPECT_FALSE(frame_it.HasNextFrame());
}
class SpdyControlFrameIteratorTest : public quiche::test::QuicheTest {
public:
SpdyControlFrameIteratorTest() : output_(output_buffer, kSize) {}
void RunTest(std::unique_ptr<SpdyFrameIR> ir) {
SpdyFramer framer(SpdyFramer::DISABLE_COMPRESSION);
SpdySerializedFrame frame(framer.SerializeFrame(*ir));
std::unique_ptr<SpdyFrameSequence> it =
SpdyFramer::CreateIterator(&framer, std::move(ir));
EXPECT_TRUE(it->HasNextFrame());
EXPECT_EQ(it->NextFrame(&output_), frame.size());
EXPECT_FALSE(it->HasNextFrame());
}
private:
ArrayOutputBuffer output_;
};
TEST_F(SpdyControlFrameIteratorTest, RstStreamFrameWithIterator) {
auto ir = std::make_unique<SpdyRstStreamIR>(0, ERROR_CODE_PROTOCOL_ERROR);
RunTest(std::move(ir));
}
TEST_F(SpdyControlFrameIteratorTest, SettingsFrameWithIterator) {
auto ir = std::make_unique<SpdySettingsIR>();
uint32_t kValue = 0x0a0b0c0d;
SpdyKnownSettingsId kId = SETTINGS_INITIAL_WINDOW_SIZE;
ir->AddSetting(kId, kValue);
RunTest(std::move(ir));
}
TEST_F(SpdyControlFrameIteratorTest, PingFrameWithIterator) {
const SpdyPingId kPingId = 0x123456789abcdeffULL;
auto ir = std::make_unique<SpdyPingIR>(kPingId);
RunTest(std::move(ir));
}
TEST_F(SpdyControlFrameIteratorTest, GoAwayFrameWithIterator) {
auto ir = std::make_unique<SpdyGoAwayIR>(0, ERROR_CODE_NO_ERROR, "GA");
RunTest(std::move(ir));
}
TEST_F(SpdyControlFrameIteratorTest, WindowUpdateFrameWithIterator) {
auto ir = std::make_unique<SpdyWindowUpdateIR>(1, 1);
RunTest(std::move(ir));
}
TEST_F(SpdyControlFrameIteratorTest, AtlSvcFrameWithIterator) {
auto ir = std::make_unique<SpdyAltSvcIR>(3);
ir->set_origin("origin");
ir->add_altsvc(SpdyAltSvcWireFormat::AlternativeService(
"pid1", "host", 443, 5, SpdyAltSvcWireFormat::VersionVector()));
ir->add_altsvc(SpdyAltSvcWireFormat::AlternativeService(
"p\"=i:d", "h_\\o\"st", 123, 42,
SpdyAltSvcWireFormat::VersionVector{24}));
RunTest(std::move(ir));
}
TEST_F(SpdyControlFrameIteratorTest, PriorityFrameWithIterator) {
auto ir = std::make_unique<SpdyPriorityIR>(2, 1, 17, true);
RunTest(std::move(ir));
}
TEST_P(SpdyFramerTest, TooLargePushPromiseFrameUsesContinuation) {
SpdyFramer framer(SpdyFramer::DISABLE_COMPRESSION);
SpdyPushPromiseIR push_promise( 1,
2);
push_promise.set_padding_len(256);
const size_t kBigValueSize = kHttp2MaxControlFrameSendSize;
std::string big_value(kBigValueSize, 'x');
push_promise.SetHeader("aa", big_value);
SpdySerializedFrame control_frame(SpdyFramerPeer::SerializePushPromise(
&framer, push_promise, use_output_ ? &output_ : nullptr));
EXPECT_GT(control_frame.size(), kHttp2MaxControlFrameSendSize);
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
visitor.SimulateInFramer(
reinterpret_cast<unsigned char*>(control_frame.data()),
control_frame.size());
EXPECT_TRUE(visitor.header_buffer_valid_);
EXPECT_EQ(0, visitor.error_count_);
EXPECT_EQ(1, visitor.push_promise_frame_count_);
EXPECT_EQ(1, visitor.continuation_count_);
EXPECT_EQ(0, visitor.zero_length_control_frame_header_data_count_);
}
TEST_P(SpdyFramerTest, ControlFrameMuchTooLarge) {
const size_t kHeaderBufferChunks = 4;
const size_t kHeaderBufferSize =
kHttp2DefaultFramePayloadLimit / kHeaderBufferChunks;
const size_t kBigValueSize = kHeaderBufferSize * 2;
std::string big_value(kBigValueSize, 'x');
SpdyHeadersIR headers( 1);
headers.set_fin(true);
headers.SetHeader("aa", big_value);
SpdySerializedFrame control_frame(SpdyFramerPeer::SerializeHeaders(
&framer_, headers, use_output_ ? &output_ : nullptr));
TestSpdyVisitor visitor(SpdyFramer::ENABLE_COMPRESSION);
visitor.set_header_buffer_size(kHeaderBufferSize);
visitor.SimulateInFramer(
reinterpret_cast<unsigned char*>(control_frame.data()),
control_frame.size());
EXPECT_GT(visitor.header_bytes_received_, visitor.header_buffer_size_);
EXPECT_EQ(1, visitor.end_of_stream_count_);
}
TEST_P(SpdyFramerTest, ControlFrameSizesAreValidated) {
const size_t length = 20;
ASSERT_GT(kGoawayFrameMinimumSize, kFrameHeaderSize);
const size_t less_than_min_length =
kGoawayFrameMinimumSize - kFrameHeaderSize - 1;
ASSERT_LE(less_than_min_length, std::numeric_limits<unsigned char>::max());
const unsigned char kH2Len = static_cast<unsigned char>(less_than_min_length);
const unsigned char kH2FrameData[] = {
0x00, 0x00, kH2Len,
0x07,
0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00,
};
const size_t pad_length = length + kFrameHeaderSize - sizeof(kH2FrameData);
std::string pad(pad_length, 'A');
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
visitor.SimulateInFramer(kH2FrameData, sizeof(kH2FrameData));
visitor.SimulateInFramer(reinterpret_cast<const unsigned char*>(pad.c_str()),
pad.length());
EXPECT_EQ(1, visitor.error_count_);
EXPECT_EQ(Http2DecoderAdapter::SPDY_INVALID_CONTROL_FRAME,
visitor.deframer_.spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
visitor.deframer_.spdy_framer_error());
EXPECT_EQ(0, visitor.goaway_count_);
}
TEST_P(SpdyFramerTest, ReadZeroLenSettingsFrame) {
SpdySettingsIR settings_ir;
SpdySerializedFrame control_frame(framer_.SerializeSettings(settings_ir));
if (use_output_) {
ASSERT_TRUE(framer_.SerializeSettings(settings_ir, &output_));
control_frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
SetFrameLength(&control_frame, 0);
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
visitor.SimulateInFramer(
reinterpret_cast<unsigned char*>(control_frame.data()), kFrameHeaderSize);
EXPECT_EQ(0, visitor.error_count_);
}
TEST_P(SpdyFramerTest, ReadBogusLenSettingsFrame) {
SpdySettingsIR settings_ir;
settings_ir.AddSetting(SETTINGS_INITIAL_WINDOW_SIZE, 0x00000002);
settings_ir.AddSetting(SETTINGS_MAX_CONCURRENT_STREAMS, 0x00000002);
SpdySerializedFrame control_frame(framer_.SerializeSettings(settings_ir));
if (use_output_) {
ASSERT_TRUE(framer_.SerializeSettings(settings_ir, &output_));
control_frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
const size_t kNewLength = 8;
SetFrameLength(&control_frame, kNewLength);
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
visitor.SimulateInFramer(
reinterpret_cast<unsigned char*>(control_frame.data()),
kFrameHeaderSize + kNewLength);
EXPECT_EQ(1, visitor.error_count_);
EXPECT_EQ(Http2DecoderAdapter::SPDY_INVALID_CONTROL_FRAME_SIZE,
visitor.deframer_.spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
visitor.deframer_.spdy_framer_error());
}
TEST_P(SpdyFramerTest, ReadLargeSettingsFrame) {
SpdySettingsIR settings_ir;
settings_ir.AddSetting(SETTINGS_HEADER_TABLE_SIZE, 5);
settings_ir.AddSetting(SETTINGS_ENABLE_PUSH, 6);
settings_ir.AddSetting(SETTINGS_MAX_CONCURRENT_STREAMS, 7);
SpdySerializedFrame control_frame(framer_.SerializeSettings(settings_ir));
if (use_output_) {
ASSERT_TRUE(framer_.SerializeSettings(settings_ir, &output_));
control_frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
visitor.SimulateInFramer(
reinterpret_cast<unsigned char*>(control_frame.data()),
control_frame.size());
EXPECT_EQ(0, visitor.error_count_);
EXPECT_EQ(3, visitor.setting_count_);
EXPECT_EQ(1, visitor.settings_ack_sent_);
size_t framed_data = 0;
size_t unframed_data = control_frame.size();
size_t kReadChunkSize = 5;
while (unframed_data > 0) {
size_t to_read = std::min(kReadChunkSize, unframed_data);
visitor.SimulateInFramer(
reinterpret_cast<unsigned char*>(control_frame.data() + framed_data),
to_read);
unframed_data -= to_read;
framed_data += to_read;
}
EXPECT_EQ(0, visitor.error_count_);
EXPECT_EQ(3 * 2, visitor.setting_count_);
EXPECT_EQ(2, visitor.settings_ack_sent_);
}
TEST_P(SpdyFramerTest, ReadDuplicateSettings) {
const unsigned char kH2FrameData[] = {
0x00, 0x00, 0x12,
0x04,
0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x01,
0x00, 0x00, 0x00, 0x02,
0x00, 0x01,
0x00, 0x00, 0x00, 0x03,
0x00, 0x03,
0x00, 0x00, 0x00, 0x03,
};
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
visitor.SimulateInFramer(kH2FrameData, sizeof(kH2FrameData));
EXPECT_EQ(3, visitor.setting_count_);
EXPECT_EQ(0, visitor.error_count_);
EXPECT_EQ(1, visitor.settings_ack_sent_);
}
TEST_P(SpdyFramerTest, ReadUnknownSettingsId) {
const unsigned char kH2FrameData[] = {
0x00, 0x00, 0x06,
0x04,
0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x10,
0x00, 0x00, 0x00, 0x02,
};
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
visitor.SimulateInFramer(kH2FrameData, sizeof(kH2FrameData));
EXPECT_EQ(1, visitor.setting_count_);
EXPECT_EQ(0, visitor.error_count_);
}
TEST_P(SpdyFramerTest, ReadKnownAndUnknownSettingsWithExtension) {
const unsigned char kH2FrameData[] = {
0x00, 0x00, 0x18,
0x04,
0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x10,
0x00, 0x00, 0x00, 0x02,
0x00, 0x5f,
0x00, 0x01, 0x00, 0x02,
0x00, 0x02,
0x00, 0x00, 0x00, 0x01,
0x00, 0x08,
0x00, 0x00, 0x00, 0x01,
};
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
TestExtension extension;
visitor.set_extension_visitor(&extension);
visitor.SimulateInFramer(kH2FrameData, sizeof(kH2FrameData));
EXPECT_EQ(4, visitor.setting_count_);
EXPECT_EQ(0, visitor.error_count_);
EXPECT_THAT(
extension.settings_received_,
testing::ElementsAre(testing::Pair(16, 2), testing::Pair(95, 65538)));
}
TEST_P(SpdyFramerTest, ReadOutOfOrderSettings) {
const unsigned char kH2FrameData[] = {
0x00, 0x00, 0x12,
0x04,
0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x02,
0x00, 0x00, 0x00, 0x02,
0x00, 0x01,
0x00, 0x00, 0x00, 0x03,
0x00, 0x03,
0x00, 0x00, 0x00, 0x03,
};
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
visitor.SimulateInFramer(kH2FrameData, sizeof(kH2FrameData));
EXPECT_EQ(3, visitor.setting_count_);
EXPECT_EQ(0, visitor.error_count_);
}
TEST_P(SpdyFramerTest, ProcessSettingsAckFrame) {
const unsigned char kFrameData[] = {
0x00, 0x00, 0x00,
0x04,
0x01,
0x00, 0x00, 0x00, 0x00,
};
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
visitor.SimulateInFramer(kFrameData, sizeof(kFrameData));
EXPECT_EQ(0, visitor.error_count_);
EXPECT_EQ(0, visitor.setting_count_);
EXPECT_EQ(1, visitor.settings_ack_received_);
}
TEST_P(SpdyFramerTest, ProcessDataFrameWithPadding) {
const int kPaddingLen = 119;
const char data_payload[] = "hello";
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
SpdyDataIR data_ir( 1, data_payload);
data_ir.set_padding_len(kPaddingLen);
SpdySerializedFrame frame(framer_.SerializeData(data_ir));
int bytes_consumed = 0;
EXPECT_CALL(visitor,
OnCommonHeader(1, kPaddingLen + strlen(data_payload), 0x0, 0x8));
EXPECT_CALL(visitor,
OnDataFrameHeader(1, kPaddingLen + strlen(data_payload), false));
QUICHE_CHECK_EQ(kDataFrameMinimumSize,
deframer_->ProcessInput(frame.data(), kDataFrameMinimumSize));
QUICHE_CHECK_EQ(deframer_->state(),
Http2DecoderAdapter::SPDY_READ_DATA_FRAME_PADDING_LENGTH);
QUICHE_CHECK_EQ(deframer_->spdy_framer_error(),
Http2DecoderAdapter::SPDY_NO_ERROR);
bytes_consumed += kDataFrameMinimumSize;
EXPECT_CALL(visitor, OnStreamPadLength(1, kPaddingLen - 1));
QUICHE_CHECK_EQ(1u,
deframer_->ProcessInput(frame.data() + bytes_consumed, 1));
QUICHE_CHECK_EQ(deframer_->state(),
Http2DecoderAdapter::SPDY_FORWARD_STREAM_FRAME);
QUICHE_CHECK_EQ(deframer_->spdy_framer_error(),
Http2DecoderAdapter::SPDY_NO_ERROR);
bytes_consumed += 1;
EXPECT_CALL(visitor, OnStreamFrameData(1, _, 2));
QUICHE_CHECK_EQ(2u,
deframer_->ProcessInput(frame.data() + bytes_consumed, 2));
QUICHE_CHECK_EQ(deframer_->state(),
Http2DecoderAdapter::SPDY_FORWARD_STREAM_FRAME);
QUICHE_CHECK_EQ(deframer_->spdy_framer_error(),
Http2DecoderAdapter::SPDY_NO_ERROR);
bytes_consumed += 2;
EXPECT_CALL(visitor, OnStreamFrameData(1, _, 3));
QUICHE_CHECK_EQ(3u,
deframer_->ProcessInput(frame.data() + bytes_consumed, 3));
QUICHE_CHECK_EQ(deframer_->state(),
Http2DecoderAdapter::SPDY_CONSUME_PADDING);
QUICHE_CHECK_EQ(deframer_->spdy_framer_error(),
Http2DecoderAdapter::SPDY_NO_ERROR);
bytes_consumed += 3;
EXPECT_CALL(visitor, OnStreamPadding(1, 100));
QUICHE_CHECK_EQ(100u,
deframer_->ProcessInput(frame.data() + bytes_consumed, 100));
QUICHE_CHECK_EQ(deframer_->state(),
Http2DecoderAdapter::SPDY_CONSUME_PADDING);
QUICHE_CHECK_EQ(deframer_->spdy_framer_error(),
Http2DecoderAdapter::SPDY_NO_ERROR);
bytes_consumed += 100;
EXPECT_CALL(visitor, OnStreamPadding(1, 18));
QUICHE_CHECK_EQ(18u,
deframer_->ProcessInput(frame.data() + bytes_consumed, 18));
QUICHE_CHECK_EQ(deframer_->state(),
Http2DecoderAdapter::SPDY_READY_FOR_FRAME);
QUICHE_CHECK_EQ(deframer_->spdy_framer_error(),
Http2DecoderAdapter::SPDY_NO_ERROR);
}
TEST_P(SpdyFramerTest, ReadWindowUpdate) {
SpdySerializedFrame control_frame(framer_.SerializeWindowUpdate(
SpdyWindowUpdateIR( 1, 2)));
if (use_output_) {
ASSERT_TRUE(framer_.SerializeWindowUpdate(
SpdyWindowUpdateIR( 1, 2), &output_));
control_frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
visitor.SimulateInFramer(
reinterpret_cast<unsigned char*>(control_frame.data()),
control_frame.size());
EXPECT_EQ(1u, visitor.last_window_update_stream_);
EXPECT_EQ(2, visitor.last_window_update_delta_);
}
TEST_P(SpdyFramerTest, ReadCompressedPushPromise) {
SpdyPushPromiseIR push_promise( 42,
57);
push_promise.SetHeader("foo", "bar");
push_promise.SetHeader("bar", "foofoo");
SpdySerializedFrame frame(SpdyFramerPeer::SerializePushPromise(
&framer_, push_promise, use_output_ ? &output_ : nullptr));
TestSpdyVisitor visitor(SpdyFramer::ENABLE_COMPRESSION);
visitor.SimulateInFramer(reinterpret_cast<unsigned char*>(frame.data()),
frame.size());
EXPECT_EQ(42u, visitor.last_push_promise_stream_);
EXPECT_EQ(57u, visitor.last_push_promise_promised_stream_);
EXPECT_EQ(push_promise.header_block(), visitor.headers_);
}
TEST_P(SpdyFramerTest, ReadHeadersWithContinuation) {
const unsigned char kInput[] = {
0x00, 0x00, 0x14,
0x01,
0x08,
0x00, 0x00, 0x00, 0x01,
0x03,
0x00,
0x06,
'c', 'o', 'o', 'k', 'i', 'e',
0x07,
'f', 'o', 'o', '=', 'b', 'a', 'r',
0x00, 0x00, 0x00,
0x00, 0x00, 0x14,
0x09,
0x00,
0x00, 0x00, 0x00, 0x01,
0x00,
0x06,
'c', 'o', 'o', 'k', 'i', 'e',
0x08,
'b', 'a', 'z', '=', 'b', 'i', 'n', 'g',
0x00,
0x06,
'c',
0x00, 0x00, 0x12,
0x09,
0x04,
0x00, 0x00, 0x00, 0x01,
'o', 'o', 'k', 'i', 'e',
0x00,
0x00,
0x04,
'n', 'a', 'm', 'e',
0x05,
'v', 'a', 'l', 'u', 'e',
};
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
visitor.SimulateInFramer(kInput, sizeof(kInput));
EXPECT_EQ(0, visitor.error_count_);
EXPECT_EQ(1, visitor.headers_frame_count_);
EXPECT_EQ(2, visitor.continuation_count_);
EXPECT_EQ(0, visitor.zero_length_control_frame_header_data_count_);
EXPECT_EQ(0, visitor.end_of_stream_count_);
EXPECT_THAT(
visitor.headers_,
testing::ElementsAre(testing::Pair("cookie", "foo=bar; baz=bing; "),
testing::Pair("name", "value")));
}
TEST_P(SpdyFramerTest, ReadHeadersWithContinuationAndFin) {
const unsigned char kInput[] = {
0x00, 0x00, 0x10,
0x01,
0x01,
0x00, 0x00, 0x00, 0x01,
0x00,
0x06,
'c', 'o', 'o', 'k', 'i', 'e',
0x07,
'f', 'o', 'o', '=', 'b', 'a', 'r',
0x00, 0x00, 0x14,
0x09,
0x00,
0x00, 0x00, 0x00, 0x01,
0x00,
0x06,
'c', 'o', 'o', 'k', 'i', 'e',
0x08,
'b', 'a', 'z', '=', 'b', 'i', 'n', 'g',
0x00,
0x06,
'c',
0x00, 0x00, 0x12,
0x09,
0x04,
0x00, 0x00, 0x00, 0x01,
'o', 'o', 'k', 'i', 'e',
0x00,
0x00,
0x04,
'n', 'a', 'm', 'e',
0x05,
'v', 'a', 'l', 'u', 'e',
};
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
visitor.SimulateInFramer(kInput, sizeof(kInput));
EXPECT_EQ(0, visitor.error_count_);
EXPECT_EQ(1, visitor.headers_frame_count_);
EXPECT_EQ(2, visitor.continuation_count_);
EXPECT_EQ(1, visitor.fin_flag_count_);
EXPECT_EQ(0, visitor.zero_length_control_frame_header_data_count_);
EXPECT_EQ(1, visitor.end_of_stream_count_);
EXPECT_THAT(
visitor.headers_,
testing::ElementsAre(testing::Pair("cookie", "foo=bar; baz=bing; "),
testing::Pair("name", "value")));
}
TEST_P(SpdyFramerTest, ReadPushPromiseWithContinuation) {
const unsigned char kInput[] = {
0x00, 0x00, 0x17,
0x05,
0x08,
0x00, 0x00, 0x00, 0x01,
0x02,
0x00, 0x00, 0x00, 0x2a,
0x00,
0x06,
'c', 'o', 'o', 'k', 'i', 'e',
0x07,
'f', 'o', 'o', '=', 'b', 'a', 'r',
0x00, 0x00,
0x00, 0x00, 0x14,
0x09,
0x00,
0x00, 0x00, 0x00, 0x01,
0x00,
0x06,
'c', 'o', 'o', 'k', 'i', 'e',
0x08,
'b', 'a', 'z', '=', 'b', 'i', 'n', 'g',
0x00,
0x06,
'c',
0x00, 0x00, 0x12,
0x09,
0x04,
0x00, 0x00, 0x00, 0x01,
'o', 'o', 'k', 'i', 'e',
0x00,
0x00,
0x04,
'n', 'a', 'm', 'e',
0x05,
'v', 'a', 'l', 'u', 'e',
};
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
visitor.SimulateInFramer(kInput, sizeof(kInput));
EXPECT_EQ(0, visitor.error_count_);
EXPECT_EQ(1u, visitor.last_push_promise_stream_);
EXPECT_EQ(42u, visitor.last_push_promise_promised_stream_);
EXPECT_EQ(2, visitor.continuation_count_);
EXPECT_EQ(0, visitor.zero_length_control_frame_header_data_count_);
EXPECT_EQ(0, visitor.end_of_stream_count_);
EXPECT_THAT(
visitor.headers_,
testing::ElementsAre(testing::Pair("cookie", "foo=bar; baz=bing; "),
testing::Pair("name", "value")));
}
TEST_P(SpdyFramerTest, ReceiveUnknownMidContinuation) {
const unsigned char kInput[] = {
0x00, 0x00, 0x10,
0x01,
0x00,
0x00, 0x00, 0x00, 0x01,
0x00, 0x06, 0x63, 0x6f,
0x6f, 0x6b, 0x69, 0x65,
0x07, 0x66, 0x6f, 0x6f,
0x3d, 0x62, 0x61, 0x72,
0x00, 0x00, 0x14,
0xa9,
0x00,
0x00, 0x00, 0x00, 0x01,
0x00, 0x06, 0x63, 0x6f,
0x6f, 0x6b, 0x69, 0x65,
0x08, 0x62, 0x61, 0x7a,
0x3d, 0x62, 0x69, 0x6e,
0x67, 0x00, 0x06, 0x63,
};
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
visitor.on_unknown_frame_result_ = true;
deframer_->set_visitor(&visitor);
visitor.SimulateInFramer(kInput, sizeof(kInput));
EXPECT_EQ(1, visitor.error_count_);
EXPECT_EQ(Http2DecoderAdapter::SPDY_UNEXPECTED_FRAME,
visitor.deframer_.spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
visitor.deframer_.spdy_framer_error());
EXPECT_EQ(1, visitor.headers_frame_count_);
EXPECT_EQ(0, visitor.continuation_count_);
EXPECT_EQ(0u, visitor.header_buffer_length_);
}
TEST_P(SpdyFramerTest, ReceiveUnknownMidContinuationWithExtension) {
const unsigned char kInput[] = {
0x00, 0x00, 0x10,
0x01,
0x00,
0x00, 0x00, 0x00, 0x01,
0x00, 0x06, 0x63, 0x6f,
0x6f, 0x6b, 0x69, 0x65,
0x07, 0x66, 0x6f, 0x6f,
0x3d, 0x62, 0x61, 0x72,
0x00, 0x00, 0x14,
0xa9,
0x00,
0x00, 0x00, 0x00, 0x01,
0x00, 0x06, 0x63, 0x6f,
0x6f, 0x6b, 0x69, 0x65,
0x08, 0x62, 0x61, 0x7a,
0x3d, 0x62, 0x69, 0x6e,
0x67, 0x00, 0x06, 0x63,
};
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
TestExtension extension;
visitor.set_extension_visitor(&extension);
deframer_->set_visitor(&visitor);
visitor.SimulateInFramer(kInput, sizeof(kInput));
EXPECT_EQ(1, visitor.error_count_);
EXPECT_EQ(Http2DecoderAdapter::SPDY_UNEXPECTED_FRAME,
visitor.deframer_.spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
visitor.deframer_.spdy_framer_error());
EXPECT_EQ(1, visitor.headers_frame_count_);
EXPECT_EQ(0, visitor.continuation_count_);
EXPECT_EQ(0u, visitor.header_buffer_length_);
}
TEST_P(SpdyFramerTest, ReceiveContinuationOnWrongStream) {
const unsigned char kInput[] = {
0x00, 0x00, 0x10,
0x01,
0x00,
0x00, 0x00, 0x00, 0x01,
0x00, 0x06, 0x63, 0x6f,
0x6f, 0x6b, 0x69, 0x65,
0x07, 0x66, 0x6f, 0x6f,
0x3d, 0x62, 0x61, 0x72,
0x00, 0x00, 0x14,
0x09,
0x00,
0x00, 0x00, 0x00, 0x02,
0x00, 0x06, 0x63, 0x6f,
0x6f, 0x6b, 0x69, 0x65,
0x08, 0x62, 0x61, 0x7a,
0x3d, 0x62, 0x69, 0x6e,
0x67, 0x00, 0x06, 0x63,
};
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
deframer_->set_visitor(&visitor);
visitor.SimulateInFramer(kInput, sizeof(kInput));
EXPECT_EQ(1, visitor.error_count_);
EXPECT_EQ(Http2DecoderAdapter::SPDY_UNEXPECTED_FRAME,
visitor.deframer_.spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
visitor.deframer_.spdy_framer_error());
EXPECT_EQ(1, visitor.headers_frame_count_);
EXPECT_EQ(0, visitor.continuation_count_);
EXPECT_EQ(0u, visitor.header_buffer_length_);
}
TEST_P(SpdyFramerTest, ReadContinuationOutOfOrder) {
const unsigned char kInput[] = {
0x00, 0x00, 0x18,
0x09,
0x00,
0x00, 0x00, 0x00, 0x01,
0x00, 0x06, 0x63, 0x6f,
0x6f, 0x6b, 0x69, 0x65,
0x07, 0x66, 0x6f, 0x6f,
0x3d, 0x62, 0x61, 0x72,
};
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
deframer_->set_visitor(&visitor);
visitor.SimulateInFramer(kInput, sizeof(kInput));
EXPECT_EQ(1, visitor.error_count_);
EXPECT_EQ(Http2DecoderAdapter::SPDY_UNEXPECTED_FRAME,
visitor.deframer_.spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
visitor.deframer_.spdy_framer_error());
EXPECT_EQ(0, visitor.continuation_count_);
EXPECT_EQ(0u, visitor.header_buffer_length_);
}
TEST_P(SpdyFramerTest, ExpectContinuationReceiveData) {
const unsigned char kInput[] = {
0x00, 0x00, 0x10,
0x01,
0x00,
0x00, 0x00, 0x00, 0x01,
0x00, 0x06, 0x63, 0x6f,
0x6f, 0x6b, 0x69, 0x65,
0x07, 0x66, 0x6f, 0x6f,
0x3d, 0x62, 0x61, 0x72,
0x00, 0x00, 0x00,
0x00,
0x01,
0x00, 0x00, 0x00, 0x04,
0xde, 0xad, 0xbe, 0xef,
};
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
deframer_->set_visitor(&visitor);
visitor.SimulateInFramer(kInput, sizeof(kInput));
EXPECT_EQ(1, visitor.error_count_);
EXPECT_EQ(Http2DecoderAdapter::SPDY_UNEXPECTED_FRAME,
visitor.deframer_.spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
visitor.deframer_.spdy_framer_error());
EXPECT_EQ(1, visitor.headers_frame_count_);
EXPECT_EQ(0, visitor.continuation_count_);
EXPECT_EQ(0u, visitor.header_buffer_length_);
EXPECT_EQ(0, visitor.data_frame_count_);
}
TEST_P(SpdyFramerTest, ExpectContinuationReceiveControlFrame) {
const unsigned char kInput[] = {
0x00, 0x00, 0x10,
0x01,
0x00,
0x00, 0x00, 0x00, 0x01,
0x00, 0x06, 0x63, 0x6f,
0x6f, 0x6b, 0x69, 0x65,
0x07, 0x66, 0x6f, 0x6f,
0x3d, 0x62, 0x61, 0x72,
0x00, 0x00, 0x10,
0x01,
0x00,
0x00, 0x00, 0x00, 0x01,
0x00, 0x06, 0x63, 0x6f,
0x6f, 0x6b, 0x69, 0x65,
0x07, 0x66, 0x6f, 0x6f,
0x3d, 0x62, 0x61, 0x72,
};
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
deframer_->set_visitor(&visitor);
visitor.SimulateInFramer(kInput, sizeof(kInput));
EXPECT_EQ(1, visitor.error_count_);
EXPECT_EQ(Http2DecoderAdapter::SPDY_UNEXPECTED_FRAME,
visitor.deframer_.spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
visitor.deframer_.spdy_framer_error());
EXPECT_EQ(1, visitor.headers_frame_count_);
EXPECT_EQ(0, visitor.continuation_count_);
EXPECT_EQ(0u, visitor.header_buffer_length_);
EXPECT_EQ(0, visitor.data_frame_count_);
}
TEST_P(SpdyFramerTest, ReadGarbage) {
unsigned char garbage_frame[256];
memset(garbage_frame, ~0, sizeof(garbage_frame));
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
visitor.SimulateInFramer(garbage_frame, sizeof(garbage_frame));
EXPECT_EQ(1, visitor.error_count_);
}
TEST_P(SpdyFramerTest, ReadUnknownExtensionFrame) {
const unsigned char unknown_frame[] = {
0x00, 0x00, 0x08,
0xff,
0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
};
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
visitor.on_unknown_frame_result_ = true;
visitor.SimulateInFramer(unknown_frame, ABSL_ARRAYSIZE(unknown_frame));
EXPECT_EQ(0, visitor.error_count_);
EXPECT_EQ(1, visitor.unknown_frame_count_);
EXPECT_EQ(8, visitor.unknown_payload_len_);
SpdySettingsIR settings_ir;
settings_ir.AddSetting(SETTINGS_HEADER_TABLE_SIZE, 10);
SpdySerializedFrame control_frame(framer_.SerializeSettings(settings_ir));
if (use_output_) {
ASSERT_TRUE(framer_.SerializeSettings(settings_ir, &output_));
control_frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
visitor.SimulateInFramer(
reinterpret_cast<unsigned char*>(control_frame.data()),
control_frame.size());
EXPECT_EQ(0, visitor.error_count_);
EXPECT_EQ(1, visitor.setting_count_);
EXPECT_EQ(1, visitor.settings_ack_sent_);
}
TEST_P(SpdyFramerTest, ReadUnknownExtensionFrameWithExtension) {
const unsigned char unknown_frame[] = {
0x00, 0x00, 0x14,
0xff,
0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
};
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
TestExtension extension;
visitor.set_extension_visitor(&extension);
visitor.SimulateInFramer(unknown_frame, ABSL_ARRAYSIZE(unknown_frame));
EXPECT_EQ(0, visitor.error_count_);
EXPECT_EQ(0x7fffffffu, extension.stream_id_);
EXPECT_EQ(20u, extension.length_);
EXPECT_EQ(255, extension.type_);
EXPECT_EQ(0xff, extension.flags_);
EXPECT_EQ(std::string(20, '\xff'), extension.payload_);
SpdySettingsIR settings_ir;
settings_ir.AddSetting(SETTINGS_HEADER_TABLE_SIZE, 10);
SpdySerializedFrame control_frame(framer_.SerializeSettings(settings_ir));
visitor.SimulateInFramer(
reinterpret_cast<unsigned char*>(control_frame.data()),
control_frame.size());
EXPECT_EQ(0, visitor.error_count_);
EXPECT_EQ(1, visitor.setting_count_);
EXPECT_EQ(1, visitor.settings_ack_sent_);
}
TEST_P(SpdyFramerTest, ReadGarbageWithValidLength) {
const unsigned char kFrameData[] = {
0x00, 0x00, 0x08,
0xff,
0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
};
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
visitor.SimulateInFramer(kFrameData, ABSL_ARRAYSIZE(kFrameData));
EXPECT_EQ(1, visitor.error_count_);
}
TEST_P(SpdyFramerTest, ReadGarbageHPACKEncoding) {
const unsigned char kInput[] = {
0x00, 0x12, 0x01,
0x04,
0x00,
0x00, 0x00, 0x01, 0xef,
0xef, 0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0xff,
};
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
visitor.SimulateInFramer(kInput, ABSL_ARRAYSIZE(kInput));
EXPECT_EQ(1, visitor.error_count_);
}
TEST_P(SpdyFramerTest, SizesTest) {
EXPECT_EQ(9u, kFrameHeaderSize);
EXPECT_EQ(9u, kDataFrameMinimumSize);
EXPECT_EQ(9u, kHeadersFrameMinimumSize);
EXPECT_EQ(14u, kPriorityFrameSize);
EXPECT_EQ(13u, kRstStreamFrameSize);
EXPECT_EQ(9u, kSettingsFrameMinimumSize);
EXPECT_EQ(13u, kPushPromiseFrameMinimumSize);
EXPECT_EQ(17u, kPingFrameSize);
EXPECT_EQ(17u, kGoawayFrameMinimumSize);
EXPECT_EQ(13u, kWindowUpdateFrameSize);
EXPECT_EQ(9u, kContinuationFrameMinimumSize);
EXPECT_EQ(11u, kGetAltSvcFrameMinimumSize);
EXPECT_EQ(9u, kFrameMinimumSize);
EXPECT_EQ(16384u, kHttp2DefaultFramePayloadLimit);
EXPECT_EQ(16393u, kHttp2DefaultFrameSizeLimit);
}
TEST_P(SpdyFramerTest, StateToStringTest) {
EXPECT_STREQ("ERROR", Http2DecoderAdapter::StateToString(
Http2DecoderAdapter::SPDY_ERROR));
EXPECT_STREQ("FRAME_COMPLETE", Http2DecoderAdapter::StateToString(
Http2DecoderAdapter::SPDY_FRAME_COMPLETE));
EXPECT_STREQ("READY_FOR_FRAME",
Http2DecoderAdapter::StateToString(
Http2DecoderAdapter::SPDY_READY_FOR_FRAME));
EXPECT_STREQ("READING_COMMON_HEADER",
Http2DecoderAdapter::StateToString(
Http2DecoderAdapter::SPDY_READING_COMMON_HEADER));
EXPECT_STREQ("CONTROL_FRAME_PAYLOAD",
Http2DecoderAdapter::StateToString(
Http2DecoderAdapter::SPDY_CONTROL_FRAME_PAYLOAD));
EXPECT_STREQ("IGNORE_REMAINING_PAYLOAD",
Http2DecoderAdapter::StateToString(
Http2DecoderAdapter::SPDY_IGNORE_REMAINING_PAYLOAD));
EXPECT_STREQ("FORWARD_STREAM_FRAME",
Http2DecoderAdapter::StateToString(
Http2DecoderAdapter::SPDY_FORWARD_STREAM_FRAME));
EXPECT_STREQ(
"SPDY_CONTROL_FRAME_BEFORE_HEADER_BLOCK",
Http2DecoderAdapter::StateToString(
Http2DecoderAdapter::SPDY_CONTROL_FRAME_BEFORE_HEADER_BLOCK));
EXPECT_STREQ("SPDY_CONTROL_FRAME_HEADER_BLOCK",
Http2DecoderAdapter::StateToString(
Http2DecoderAdapter::SPDY_CONTROL_FRAME_HEADER_BLOCK));
EXPECT_STREQ("SPDY_SETTINGS_FRAME_PAYLOAD",
Http2DecoderAdapter::StateToString(
Http2DecoderAdapter::SPDY_SETTINGS_FRAME_PAYLOAD));
EXPECT_STREQ("SPDY_ALTSVC_FRAME_PAYLOAD",
Http2DecoderAdapter::StateToString(
Http2DecoderAdapter::SPDY_ALTSVC_FRAME_PAYLOAD));
EXPECT_STREQ("UNKNOWN_STATE",
Http2DecoderAdapter::StateToString(
Http2DecoderAdapter::SPDY_ALTSVC_FRAME_PAYLOAD + 1));
}
TEST_P(SpdyFramerTest, SpdyFramerErrorToStringTest) {
EXPECT_STREQ("NO_ERROR", Http2DecoderAdapter::SpdyFramerErrorToString(
Http2DecoderAdapter::SPDY_NO_ERROR));
EXPECT_STREQ("INVALID_STREAM_ID",
Http2DecoderAdapter::SpdyFramerErrorToString(
Http2DecoderAdapter::SPDY_INVALID_STREAM_ID));
EXPECT_STREQ("INVALID_CONTROL_FRAME",
Http2DecoderAdapter::SpdyFramerErrorToString(
Http2DecoderAdapter::SPDY_INVALID_CONTROL_FRAME));
EXPECT_STREQ("CONTROL_PAYLOAD_TOO_LARGE",
Http2DecoderAdapter::SpdyFramerErrorToString(
Http2DecoderAdapter::SPDY_CONTROL_PAYLOAD_TOO_LARGE));
EXPECT_STREQ("DECOMPRESS_FAILURE",
Http2DecoderAdapter::SpdyFramerErrorToString(
Http2DecoderAdapter::SPDY_DECOMPRESS_FAILURE));
EXPECT_STREQ("INVALID_PADDING",
Http2DecoderAdapter::SpdyFramerErrorToString(
Http2DecoderAdapter::SPDY_INVALID_PADDING));
EXPECT_STREQ("INVALID_DATA_FRAME_FLAGS",
Http2DecoderAdapter::SpdyFramerErrorToString(
Http2DecoderAdapter::SPDY_INVALID_DATA_FRAME_FLAGS));
EXPECT_STREQ("UNEXPECTED_FRAME",
Http2DecoderAdapter::SpdyFramerErrorToString(
Http2DecoderAdapter::SPDY_UNEXPECTED_FRAME));
EXPECT_STREQ("INTERNAL_FRAMER_ERROR",
Http2DecoderAdapter::SpdyFramerErrorToString(
Http2DecoderAdapter::SPDY_INTERNAL_FRAMER_ERROR));
EXPECT_STREQ("INVALID_CONTROL_FRAME_SIZE",
Http2DecoderAdapter::SpdyFramerErrorToString(
Http2DecoderAdapter::SPDY_INVALID_CONTROL_FRAME_SIZE));
EXPECT_STREQ("OVERSIZED_PAYLOAD",
Http2DecoderAdapter::SpdyFramerErrorToString(
Http2DecoderAdapter::SPDY_OVERSIZED_PAYLOAD));
EXPECT_STREQ("UNKNOWN_ERROR", Http2DecoderAdapter::SpdyFramerErrorToString(
Http2DecoderAdapter::LAST_ERROR));
EXPECT_STREQ("UNKNOWN_ERROR",
Http2DecoderAdapter::SpdyFramerErrorToString(
static_cast<Http2DecoderAdapter::SpdyFramerError>(
Http2DecoderAdapter::LAST_ERROR + 1)));
}
TEST_P(SpdyFramerTest, DataFrameFlagsV4) {
uint8_t valid_data_flags = DATA_FLAG_FIN | DATA_FLAG_PADDED;
uint8_t flags = 0;
do {
SCOPED_TRACE(testing::Message()
<< "Flags " << std::hex << static_cast<int>(flags));
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
SpdyDataIR data_ir( 1, "hello");
SpdySerializedFrame frame(framer_.SerializeData(data_ir));
SetFrameFlags(&frame, flags);
EXPECT_CALL(visitor, OnCommonHeader(1, 5, 0x0, flags));
if (flags & ~valid_data_flags) {
EXPECT_CALL(visitor, OnError(_, _));
} else {
EXPECT_CALL(visitor, OnDataFrameHeader(1, 5, flags & DATA_FLAG_FIN));
if (flags & DATA_FLAG_PADDED) {
EXPECT_CALL(visitor, OnStreamPadding(_, 1));
EXPECT_CALL(visitor, OnError(_, _));
} else {
EXPECT_CALL(visitor, OnStreamFrameData(_, _, 5));
if (flags & DATA_FLAG_FIN) {
EXPECT_CALL(visitor, OnStreamEnd(_));
}
}
}
deframer_->ProcessInput(frame.data(), frame.size());
if (flags & ~valid_data_flags) {
EXPECT_EQ(Http2DecoderAdapter::SPDY_ERROR, deframer_->state());
EXPECT_EQ(Http2DecoderAdapter::SPDY_INVALID_DATA_FRAME_FLAGS,
deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
} else if (flags & DATA_FLAG_PADDED) {
EXPECT_EQ(Http2DecoderAdapter::SPDY_ERROR, deframer_->state());
EXPECT_EQ(Http2DecoderAdapter::SPDY_INVALID_PADDING,
deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
} else {
EXPECT_EQ(Http2DecoderAdapter::SPDY_READY_FOR_FRAME, deframer_->state());
EXPECT_EQ(Http2DecoderAdapter::SPDY_NO_ERROR,
deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
}
deframer_ = std::make_unique<Http2DecoderAdapter>();
} while (++flags != 0);
}
TEST_P(SpdyFramerTest, RstStreamFrameFlags) {
uint8_t flags = 0;
do {
SCOPED_TRACE(testing::Message()
<< "Flags " << std::hex << static_cast<int>(flags));
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
SpdyRstStreamIR rst_stream( 13, ERROR_CODE_CANCEL);
SpdySerializedFrame frame(framer_.SerializeRstStream(rst_stream));
if (use_output_) {
output_.Reset();
ASSERT_TRUE(framer_.SerializeRstStream(rst_stream, &output_));
frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
SetFrameFlags(&frame, flags);
EXPECT_CALL(visitor, OnCommonHeader(13, 4, 0x3, flags));
EXPECT_CALL(visitor, OnRstStream(13, ERROR_CODE_CANCEL));
deframer_->ProcessInput(frame.data(), frame.size());
EXPECT_EQ(Http2DecoderAdapter::SPDY_READY_FOR_FRAME, deframer_->state());
EXPECT_EQ(Http2DecoderAdapter::SPDY_NO_ERROR,
deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
deframer_ = std::make_unique<Http2DecoderAdapter>();
} while (++flags != 0);
}
TEST_P(SpdyFramerTest, SettingsFrameFlags) {
uint8_t flags = 0;
do {
SCOPED_TRACE(testing::Message()
<< "Flags " << std::hex << static_cast<int>(flags));
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
SpdySettingsIR settings_ir;
settings_ir.AddSetting(SETTINGS_INITIAL_WINDOW_SIZE, 16);
SpdySerializedFrame frame(framer_.SerializeSettings(settings_ir));
if (use_output_) {
output_.Reset();
ASSERT_TRUE(framer_.SerializeSettings(settings_ir, &output_));
frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
SetFrameFlags(&frame, flags);
EXPECT_CALL(visitor, OnCommonHeader(0, 6, 0x4, flags));
if (flags & SETTINGS_FLAG_ACK) {
EXPECT_CALL(visitor, OnError(_, _));
} else {
EXPECT_CALL(visitor, OnSettings());
EXPECT_CALL(visitor, OnSetting(SETTINGS_INITIAL_WINDOW_SIZE, 16));
EXPECT_CALL(visitor, OnSettingsEnd());
}
deframer_->ProcessInput(frame.data(), frame.size());
if (flags & SETTINGS_FLAG_ACK) {
EXPECT_EQ(Http2DecoderAdapter::SPDY_ERROR, deframer_->state());
EXPECT_EQ(Http2DecoderAdapter::SPDY_INVALID_CONTROL_FRAME_SIZE,
deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
} else {
EXPECT_EQ(Http2DecoderAdapter::SPDY_READY_FOR_FRAME, deframer_->state());
EXPECT_EQ(Http2DecoderAdapter::SPDY_NO_ERROR,
deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
}
deframer_ = std::make_unique<Http2DecoderAdapter>();
} while (++flags != 0);
}
TEST_P(SpdyFramerTest, GoawayFrameFlags) {
uint8_t flags = 0;
do {
SCOPED_TRACE(testing::Message()
<< "Flags " << std::hex << static_cast<int>(flags));
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
SpdyGoAwayIR goaway_ir( 97, ERROR_CODE_NO_ERROR,
"test");
SpdySerializedFrame frame(framer_.SerializeGoAway(goaway_ir));
if (use_output_) {
output_.Reset();
ASSERT_TRUE(framer_.SerializeGoAway(goaway_ir, &output_));
frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
SetFrameFlags(&frame, flags);
EXPECT_CALL(visitor, OnCommonHeader(0, _, 0x7, flags));
EXPECT_CALL(visitor, OnGoAway(97, ERROR_CODE_NO_ERROR));
EXPECT_CALL(visitor, OnGoAwayFrameData)
.WillRepeatedly(testing::Return(true));
deframer_->ProcessInput(frame.data(), frame.size());
EXPECT_EQ(Http2DecoderAdapter::SPDY_READY_FOR_FRAME, deframer_->state());
EXPECT_EQ(Http2DecoderAdapter::SPDY_NO_ERROR,
deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
deframer_ = std::make_unique<Http2DecoderAdapter>();
} while (++flags != 0);
}
TEST_P(SpdyFramerTest, HeadersFrameFlags) {
uint8_t flags = 0;
do {
SCOPED_TRACE(testing::Message()
<< "Flags " << std::hex << static_cast<int>(flags));
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
SpdyFramer framer(SpdyFramer::ENABLE_COMPRESSION);
Http2DecoderAdapter deframer;
deframer.set_visitor(&visitor);
SpdyHeadersIR headers_ir( 57);
if (flags & HEADERS_FLAG_PRIORITY) {
headers_ir.set_weight(3);
headers_ir.set_has_priority(true);
headers_ir.set_parent_stream_id(5);
headers_ir.set_exclusive(true);
}
headers_ir.SetHeader("foo", "bar");
SpdySerializedFrame frame(SpdyFramerPeer::SerializeHeaders(
&framer, headers_ir, use_output_ ? &output_ : nullptr));
uint8_t set_flags = flags & ~HEADERS_FLAG_PADDED;
SetFrameFlags(&frame, set_flags);
SpdyStreamId stream_id = 57;
bool has_priority = false;
int weight = 0;
SpdyStreamId parent_stream_id = 0;
bool exclusive = false;
bool fin = flags & CONTROL_FLAG_FIN;
bool end = flags & HEADERS_FLAG_END_HEADERS;
if (flags & HEADERS_FLAG_PRIORITY) {
has_priority = true;
weight = 3;
parent_stream_id = 5;
exclusive = true;
}
EXPECT_CALL(visitor, OnCommonHeader(stream_id, _, 0x1, set_flags));
EXPECT_CALL(visitor, OnHeaders(stream_id, _, has_priority, weight,
parent_stream_id, exclusive, fin, end));
EXPECT_CALL(visitor, OnHeaderFrameStart(57)).Times(1);
if (end) {
EXPECT_CALL(visitor, OnHeaderFrameEnd(57)).Times(1);
}
if (flags & DATA_FLAG_FIN && end) {
EXPECT_CALL(visitor, OnStreamEnd(_));
} else {
EXPECT_CALL(visitor, OnStreamEnd(_)).Times(0);
}
deframer.ProcessInput(frame.data(), frame.size());
EXPECT_EQ(Http2DecoderAdapter::SPDY_READY_FOR_FRAME, deframer.state());
EXPECT_EQ(Http2DecoderAdapter::SPDY_NO_ERROR, deframer.spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer.spdy_framer_error());
} while (++flags != 0);
}
TEST_P(SpdyFramerTest, PingFrameFlags) {
uint8_t flags = 0;
do {
SCOPED_TRACE(testing::Message()
<< "Flags " << std::hex << static_cast<int>(flags));
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
SpdySerializedFrame frame(framer_.SerializePing(SpdyPingIR(42)));
SetFrameFlags(&frame, flags);
EXPECT_CALL(visitor, OnCommonHeader(0, 8, 0x6, flags));
EXPECT_CALL(visitor, OnPing(42, flags & PING_FLAG_ACK));
deframer_->ProcessInput(frame.data(), frame.size());
EXPECT_EQ(Http2DecoderAdapter::SPDY_READY_FOR_FRAME, deframer_->state());
EXPECT_EQ(Http2DecoderAdapter::SPDY_NO_ERROR,
deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
deframer_ = std::make_unique<Http2DecoderAdapter>();
} while (++flags != 0);
}
TEST_P(SpdyFramerTest, WindowUpdateFrameFlags) {
uint8_t flags = 0;
do {
SCOPED_TRACE(testing::Message()
<< "Flags " << std::hex << static_cast<int>(flags));
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
SpdySerializedFrame frame(framer_.SerializeWindowUpdate(
SpdyWindowUpdateIR( 4, 1024)));
SetFrameFlags(&frame, flags);
EXPECT_CALL(visitor, OnCommonHeader(4, 4, 0x8, flags));
EXPECT_CALL(visitor, OnWindowUpdate(4, 1024));
deframer_->ProcessInput(frame.data(), frame.size());
EXPECT_EQ(Http2DecoderAdapter::SPDY_READY_FOR_FRAME, deframer_->state());
EXPECT_EQ(Http2DecoderAdapter::SPDY_NO_ERROR,
deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
deframer_ = std::make_unique<Http2DecoderAdapter>();
} while (++flags != 0);
}
TEST_P(SpdyFramerTest, PushPromiseFrameFlags) {
const SpdyStreamId client_id = 123;
const SpdyStreamId promised_id = 22;
uint8_t flags = 0;
do {
SCOPED_TRACE(testing::Message()
<< "Flags " << std::hex << static_cast<int>(flags));
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
testing::StrictMock<test::MockDebugVisitor> debug_visitor;
SpdyFramer framer(SpdyFramer::ENABLE_COMPRESSION);
Http2DecoderAdapter deframer;
deframer.set_visitor(&visitor);
deframer.set_debug_visitor(&debug_visitor);
framer.set_debug_visitor(&debug_visitor);
EXPECT_CALL(
debug_visitor,
OnSendCompressedFrame(client_id, SpdyFrameType::PUSH_PROMISE, _, _));
SpdyPushPromiseIR push_promise(client_id, promised_id);
push_promise.SetHeader("foo", "bar");
SpdySerializedFrame frame(SpdyFramerPeer::SerializePushPromise(
&framer, push_promise, use_output_ ? &output_ : nullptr));
SetFrameFlags(&frame, flags & ~HEADERS_FLAG_PADDED);
bool end = flags & PUSH_PROMISE_FLAG_END_PUSH_PROMISE;
EXPECT_CALL(debug_visitor, OnReceiveCompressedFrame(
client_id, SpdyFrameType::PUSH_PROMISE, _));
EXPECT_CALL(visitor, OnCommonHeader(client_id, _, 0x5,
flags & ~HEADERS_FLAG_PADDED));
EXPECT_CALL(visitor, OnPushPromise(client_id, promised_id, end));
EXPECT_CALL(visitor, OnHeaderFrameStart(client_id)).Times(1);
if (end) {
EXPECT_CALL(visitor, OnHeaderFrameEnd(client_id)).Times(1);
}
deframer.ProcessInput(frame.data(), frame.size());
EXPECT_EQ(Http2DecoderAdapter::SPDY_READY_FOR_FRAME, deframer.state());
EXPECT_EQ(Http2DecoderAdapter::SPDY_NO_ERROR, deframer.spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer.spdy_framer_error());
} while (++flags != 0);
}
TEST_P(SpdyFramerTest, ContinuationFrameFlags) {
uint8_t flags = 0;
do {
if (use_output_) {
output_.Reset();
}
SCOPED_TRACE(testing::Message()
<< "Flags " << std::hex << static_cast<int>(flags));
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
testing::StrictMock<test::MockDebugVisitor> debug_visitor;
SpdyFramer framer(SpdyFramer::ENABLE_COMPRESSION);
Http2DecoderAdapter deframer;
deframer.set_visitor(&visitor);
deframer.set_debug_visitor(&debug_visitor);
framer.set_debug_visitor(&debug_visitor);
EXPECT_CALL(debug_visitor,
OnSendCompressedFrame(42, SpdyFrameType::HEADERS, _, _));
EXPECT_CALL(debug_visitor,
OnReceiveCompressedFrame(42, SpdyFrameType::HEADERS, _));
EXPECT_CALL(visitor, OnCommonHeader(42, _, 0x1, 0));
EXPECT_CALL(visitor, OnHeaders(42, _, false, 0, 0, false, false, false));
EXPECT_CALL(visitor, OnHeaderFrameStart(42)).Times(1);
SpdyHeadersIR headers_ir( 42);
headers_ir.SetHeader("foo", "bar");
SpdySerializedFrame frame0;
if (use_output_) {
EXPECT_TRUE(framer.SerializeHeaders(headers_ir, &output_));
frame0 = MakeSerializedFrame(output_.Begin(), output_.Size());
} else {
frame0 = framer.SerializeHeaders(headers_ir);
}
SetFrameFlags(&frame0, 0);
SpdyContinuationIR continuation( 42);
SpdySerializedFrame frame1;
if (use_output_) {
char* begin = output_.Begin() + output_.Size();
ASSERT_TRUE(framer.SerializeContinuation(continuation, &output_));
frame1 = MakeSerializedFrame(begin, output_.Size() - frame0.size());
} else {
frame1 = framer.SerializeContinuation(continuation);
}
SetFrameFlags(&frame1, flags);
EXPECT_CALL(debug_visitor,
OnReceiveCompressedFrame(42, SpdyFrameType::CONTINUATION, _));
EXPECT_CALL(visitor, OnCommonHeader(42, _, 0x9, flags));
EXPECT_CALL(visitor,
OnContinuation(42, _, flags & HEADERS_FLAG_END_HEADERS));
bool end = flags & HEADERS_FLAG_END_HEADERS;
if (end) {
EXPECT_CALL(visitor, OnHeaderFrameEnd(42)).Times(1);
}
deframer.ProcessInput(frame0.data(), frame0.size());
deframer.ProcessInput(frame1.data(), frame1.size());
EXPECT_EQ(Http2DecoderAdapter::SPDY_READY_FOR_FRAME, deframer.state());
EXPECT_EQ(Http2DecoderAdapter::SPDY_NO_ERROR, deframer.spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer.spdy_framer_error());
} while (++flags != 0);
}
TEST_P(SpdyFramerTest, RstStreamStatusBounds) {
const unsigned char kH2RstStreamInvalid[] = {
0x00, 0x00, 0x04,
0x03,
0x00,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00,
};
const unsigned char kH2RstStreamNumStatusCodes[] = {
0x00, 0x00, 0x04,
0x03,
0x00,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0xff,
};
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
EXPECT_CALL(visitor, OnCommonHeader(1, 4, 0x3, 0x0));
EXPECT_CALL(visitor, OnRstStream(1, ERROR_CODE_NO_ERROR));
deframer_->ProcessInput(reinterpret_cast<const char*>(kH2RstStreamInvalid),
ABSL_ARRAYSIZE(kH2RstStreamInvalid));
EXPECT_EQ(Http2DecoderAdapter::SPDY_READY_FOR_FRAME, deframer_->state());
EXPECT_EQ(Http2DecoderAdapter::SPDY_NO_ERROR, deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
deframer_ = std::make_unique<Http2DecoderAdapter>();
deframer_->set_visitor(&visitor);
EXPECT_CALL(visitor, OnCommonHeader(1, 4, 0x3, 0x0));
EXPECT_CALL(visitor, OnRstStream(1, ERROR_CODE_INTERNAL_ERROR));
deframer_->ProcessInput(
reinterpret_cast<const char*>(kH2RstStreamNumStatusCodes),
ABSL_ARRAYSIZE(kH2RstStreamNumStatusCodes));
EXPECT_EQ(Http2DecoderAdapter::SPDY_READY_FOR_FRAME, deframer_->state());
EXPECT_EQ(Http2DecoderAdapter::SPDY_NO_ERROR, deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
}
TEST_P(SpdyFramerTest, GoAwayStatusBounds) {
const unsigned char kH2FrameData[] = {
0x00, 0x00, 0x0a,
0x07,
0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x01,
0xff, 0xff, 0xff, 0xff,
0x47, 0x41,
};
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
EXPECT_CALL(visitor, OnCommonHeader(0, 10, 0x7, 0x0));
EXPECT_CALL(visitor, OnGoAway(1, ERROR_CODE_INTERNAL_ERROR));
EXPECT_CALL(visitor, OnGoAwayFrameData).WillRepeatedly(testing::Return(true));
deframer_->ProcessInput(reinterpret_cast<const char*>(kH2FrameData),
ABSL_ARRAYSIZE(kH2FrameData));
EXPECT_EQ(Http2DecoderAdapter::SPDY_READY_FOR_FRAME, deframer_->state());
EXPECT_EQ(Http2DecoderAdapter::SPDY_NO_ERROR, deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
}
TEST_P(SpdyFramerTest, GoAwayStreamIdBounds) {
const unsigned char kH2FrameData[] = {
0x00, 0x00, 0x08,
0x07,
0x00,
0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00,
};
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
EXPECT_CALL(visitor, OnCommonHeader(0, 8, 0x7, 0x0));
EXPECT_CALL(visitor, OnGoAway(0x7fffffff, ERROR_CODE_NO_ERROR));
EXPECT_CALL(visitor, OnGoAwayFrameData).WillRepeatedly(testing::Return(true));
deframer_->ProcessInput(reinterpret_cast<const char*>(kH2FrameData),
ABSL_ARRAYSIZE(kH2FrameData));
EXPECT_EQ(Http2DecoderAdapter::SPDY_READY_FOR_FRAME, deframer_->state());
EXPECT_EQ(Http2DecoderAdapter::SPDY_NO_ERROR, deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
}
TEST_P(SpdyFramerTest, OnAltSvcWithOrigin) {
const SpdyStreamId kStreamId = 0;
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
SpdyAltSvcWireFormat::AlternativeService altsvc1(
"pid1", "host", 443, 5, SpdyAltSvcWireFormat::VersionVector());
SpdyAltSvcWireFormat::AlternativeService altsvc2(
"p\"=i:d", "h_\\o\"st", 123, 42, SpdyAltSvcWireFormat::VersionVector{24});
SpdyAltSvcWireFormat::AlternativeServiceVector altsvc_vector;
altsvc_vector.push_back(altsvc1);
altsvc_vector.push_back(altsvc2);
EXPECT_CALL(visitor, OnCommonHeader(kStreamId, _, 0x0A, 0x0));
EXPECT_CALL(visitor,
OnAltSvc(kStreamId, absl::string_view("o_r|g!n"), altsvc_vector));
SpdyAltSvcIR altsvc_ir(kStreamId);
altsvc_ir.set_origin("o_r|g!n");
altsvc_ir.add_altsvc(altsvc1);
altsvc_ir.add_altsvc(altsvc2);
SpdySerializedFrame frame(framer_.SerializeFrame(altsvc_ir));
if (use_output_) {
output_.Reset();
EXPECT_EQ(framer_.SerializeFrame(altsvc_ir, &output_), frame.size());
frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
deframer_->ProcessInput(frame.data(), frame.size());
EXPECT_EQ(Http2DecoderAdapter::SPDY_READY_FOR_FRAME, deframer_->state());
EXPECT_EQ(Http2DecoderAdapter::SPDY_NO_ERROR, deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
}
TEST_P(SpdyFramerTest, OnAltSvcNoOrigin) {
const SpdyStreamId kStreamId = 1;
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
SpdyAltSvcWireFormat::AlternativeService altsvc1(
"pid1", "host", 443, 5, SpdyAltSvcWireFormat::VersionVector());
SpdyAltSvcWireFormat::AlternativeService altsvc2(
"p\"=i:d", "h_\\o\"st", 123, 42, SpdyAltSvcWireFormat::VersionVector{24});
SpdyAltSvcWireFormat::AlternativeServiceVector altsvc_vector;
altsvc_vector.push_back(altsvc1);
altsvc_vector.push_back(altsvc2);
EXPECT_CALL(visitor, OnCommonHeader(kStreamId, _, 0x0A, 0x0));
EXPECT_CALL(visitor,
OnAltSvc(kStreamId, absl::string_view(""), altsvc_vector));
SpdyAltSvcIR altsvc_ir(kStreamId);
altsvc_ir.add_altsvc(altsvc1);
altsvc_ir.add_altsvc(altsvc2);
SpdySerializedFrame frame(framer_.SerializeFrame(altsvc_ir));
deframer_->ProcessInput(frame.data(), frame.size());
EXPECT_EQ(Http2DecoderAdapter::SPDY_READY_FOR_FRAME, deframer_->state());
EXPECT_EQ(Http2DecoderAdapter::SPDY_NO_ERROR, deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
}
TEST_P(SpdyFramerTest, OnAltSvcEmptyProtocolId) {
const SpdyStreamId kStreamId = 0;
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
EXPECT_CALL(visitor, OnCommonHeader(kStreamId, _, 0x0A, 0x0));
EXPECT_CALL(visitor,
OnError(Http2DecoderAdapter::SPDY_INVALID_CONTROL_FRAME, _));
SpdyAltSvcIR altsvc_ir(kStreamId);
altsvc_ir.set_origin("o1");
altsvc_ir.add_altsvc(SpdyAltSvcWireFormat::AlternativeService(
"pid1", "host", 443, 5, SpdyAltSvcWireFormat::VersionVector()));
altsvc_ir.add_altsvc(SpdyAltSvcWireFormat::AlternativeService(
"", "h1", 443, 10, SpdyAltSvcWireFormat::VersionVector()));
SpdySerializedFrame frame(framer_.SerializeFrame(altsvc_ir));
if (use_output_) {
output_.Reset();
EXPECT_EQ(framer_.SerializeFrame(altsvc_ir, &output_), frame.size());
frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
deframer_->ProcessInput(frame.data(), frame.size());
EXPECT_EQ(Http2DecoderAdapter::SPDY_ERROR, deframer_->state());
EXPECT_EQ(Http2DecoderAdapter::SPDY_INVALID_CONTROL_FRAME,
deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
}
TEST_P(SpdyFramerTest, OnAltSvcBadLengths) {
const unsigned char kType = SerializeFrameType(SpdyFrameType::ALTSVC);
const unsigned char kFrameDataOriginLenLargerThanFrame[] = {
0x00, 0x00, 0x05, kType, 0x00, 0x00, 0x00,
0x00, 0x03, 0x42, 0x42, 'f', 'o', 'o',
};
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
deframer_->set_visitor(&visitor);
visitor.SimulateInFramer(kFrameDataOriginLenLargerThanFrame,
sizeof(kFrameDataOriginLenLargerThanFrame));
EXPECT_EQ(1, visitor.error_count_);
EXPECT_EQ(Http2DecoderAdapter::SPDY_INVALID_CONTROL_FRAME,
visitor.deframer_.spdy_framer_error());
}
TEST_P(SpdyFramerTest, ReadChunkedAltSvcFrame) {
SpdyAltSvcIR altsvc_ir( 1);
SpdyAltSvcWireFormat::AlternativeService altsvc1(
"pid1", "host", 443, 5, SpdyAltSvcWireFormat::VersionVector());
SpdyAltSvcWireFormat::AlternativeService altsvc2(
"p\"=i:d", "h_\\o\"st", 123, 42, SpdyAltSvcWireFormat::VersionVector{24});
altsvc_ir.add_altsvc(altsvc1);
altsvc_ir.add_altsvc(altsvc2);
SpdySerializedFrame control_frame(framer_.SerializeAltSvc(altsvc_ir));
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
size_t framed_data = 0;
size_t unframed_data = control_frame.size();
size_t kReadChunkSize = 5;
while (unframed_data > 0) {
size_t to_read = std::min(kReadChunkSize, unframed_data);
visitor.SimulateInFramer(
reinterpret_cast<unsigned char*>(control_frame.data() + framed_data),
to_read);
unframed_data -= to_read;
framed_data += to_read;
}
EXPECT_EQ(0, visitor.error_count_);
EXPECT_EQ(1, visitor.altsvc_count_);
ASSERT_NE(nullptr, visitor.test_altsvc_ir_);
ASSERT_EQ(2u, visitor.test_altsvc_ir_->altsvc_vector().size());
EXPECT_TRUE(visitor.test_altsvc_ir_->altsvc_vector()[0] == altsvc1);
EXPECT_TRUE(visitor.test_altsvc_ir_->altsvc_vector()[1] == altsvc2);
}
TEST_P(SpdyFramerTest, ReadAltSvcFrame) {
constexpr struct {
uint32_t stream_id;
const char* origin;
} test_cases[] = {{0, ""},
{1, ""},
{0, "https:
{1, "https:
for (const auto& test_case : test_cases) {
SpdyAltSvcIR altsvc_ir(test_case.stream_id);
SpdyAltSvcWireFormat::AlternativeService altsvc(
"pid1", "host", 443, 5, SpdyAltSvcWireFormat::VersionVector());
altsvc_ir.add_altsvc(altsvc);
altsvc_ir.set_origin(test_case.origin);
SpdySerializedFrame frame(framer_.SerializeAltSvc(altsvc_ir));
TestSpdyVisitor visitor(SpdyFramer::ENABLE_COMPRESSION);
deframer_->set_visitor(&visitor);
deframer_->ProcessInput(frame.data(), frame.size());
EXPECT_EQ(0, visitor.error_count_);
EXPECT_EQ(1, visitor.altsvc_count_);
EXPECT_EQ(Http2DecoderAdapter::SPDY_READY_FOR_FRAME, deframer_->state());
EXPECT_EQ(Http2DecoderAdapter::SPDY_NO_ERROR,
deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
}
}
TEST_P(SpdyFramerTest, ErrorOnAltSvcFrameWithInvalidValue) {
const char kFrameData[] = {
0x00, 0x00, 0x16,
0x0a,
0x00,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00,
0x74, 0x68, 0x69, 0x73,
0x69, 0x73, 0x6e, 0x6f, 0x74, 0x61, 0x76, 0x61,
0x6c, 0x69, 0x64, 0x76, 0x61, 0x6c, 0x75, 0x65,
};
TestSpdyVisitor visitor(SpdyFramer::ENABLE_COMPRESSION);
deframer_->set_visitor(&visitor);
deframer_->ProcessInput(kFrameData, sizeof(kFrameData));
EXPECT_EQ(1, visitor.error_count_);
EXPECT_EQ(0, visitor.altsvc_count_);
EXPECT_EQ(Http2DecoderAdapter::SPDY_ERROR, deframer_->state());
EXPECT_EQ(Http2DecoderAdapter::SPDY_INVALID_CONTROL_FRAME,
deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
}
TEST_P(SpdyFramerTest, ReadPriorityUpdateFrame) {
const char kFrameData[] = {
0x00, 0x00, 0x07,
0x10,
0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x03,
'f', 'o', 'o'
};
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
EXPECT_CALL(visitor, OnCommonHeader(0, 7, 0x10, 0x0));
EXPECT_CALL(visitor, OnPriorityUpdate(3, "foo"));
deframer_->ProcessInput(kFrameData, sizeof(kFrameData));
EXPECT_FALSE(deframer_->HasError());
}
TEST_P(SpdyFramerTest, ReadPriorityUpdateFrameWithEmptyPriorityFieldValue) {
const char kFrameData[] = {
0x00, 0x00, 0x04,
0x10,
0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x03
};
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
EXPECT_CALL(visitor, OnCommonHeader(0, 4, 0x10, 0x0));
EXPECT_CALL(visitor, OnPriorityUpdate(3, ""));
deframer_->ProcessInput(kFrameData, sizeof(kFrameData));
EXPECT_FALSE(deframer_->HasError());
}
TEST_P(SpdyFramerTest, PriorityUpdateFrameWithEmptyPayload) {
const char kFrameData[] = {
0x00, 0x00, 0x00,
0x10,
0x00,
0x00, 0x00, 0x00, 0x00,
};
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
EXPECT_CALL(visitor, OnCommonHeader(0, 0, 0x10, 0x0));
EXPECT_CALL(visitor,
OnError(Http2DecoderAdapter::SPDY_INVALID_CONTROL_FRAME_SIZE, _));
deframer_->ProcessInput(kFrameData, sizeof(kFrameData));
EXPECT_TRUE(deframer_->HasError());
}
TEST_P(SpdyFramerTest, PriorityUpdateFrameWithShortPayload) {
const char kFrameData[] = {
0x00, 0x00, 0x02,
0x10,
0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x01
};
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
EXPECT_CALL(visitor, OnCommonHeader(0, 2, 0x10, 0x0));
EXPECT_CALL(visitor,
OnError(Http2DecoderAdapter::SPDY_INVALID_CONTROL_FRAME_SIZE, _));
deframer_->ProcessInput(kFrameData, sizeof(kFrameData));
EXPECT_TRUE(deframer_->HasError());
}
TEST_P(SpdyFramerTest, PriorityUpdateFrameOnIncorrectStream) {
const char kFrameData[] = {
0x00, 0x00, 0x04,
0x10,
0x00,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x01,
};
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
EXPECT_CALL(visitor, OnCommonHeader(1, 4, 0x10, 0x0));
EXPECT_CALL(visitor, OnError(Http2DecoderAdapter::SPDY_INVALID_STREAM_ID, _));
deframer_->ProcessInput(kFrameData, sizeof(kFrameData));
EXPECT_TRUE(deframer_->HasError());
}
TEST_P(SpdyFramerTest, PriorityUpdateFramePrioritizingIncorrectStream) {
const char kFrameData[] = {
0x00, 0x00, 0x04,
0x10,
0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
};
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
EXPECT_CALL(visitor, OnCommonHeader(0, 4, 0x10, 0x0));
EXPECT_CALL(visitor, OnError(Http2DecoderAdapter::SPDY_INVALID_STREAM_ID, _));
deframer_->ProcessInput(kFrameData, sizeof(kFrameData));
EXPECT_TRUE(deframer_->HasError());
}
TEST_P(SpdyFramerTest, ReadPriority) {
SpdyPriorityIR priority( 3,
1,
256,
false);
SpdySerializedFrame frame(framer_.SerializePriority(priority));
if (use_output_) {
output_.Reset();
ASSERT_TRUE(framer_.SerializePriority(priority, &output_));
frame = MakeSerializedFrame(output_.Begin(), output_.Size());
}
testing::StrictMock<test::MockSpdyFramerVisitor> visitor;
deframer_->set_visitor(&visitor);
EXPECT_CALL(visitor, OnCommonHeader(3, 5, 0x2, 0x0));
EXPECT_CALL(visitor, OnPriority(3, 1, 256, false));
deframer_->ProcessInput(frame.data(), frame.size());
EXPECT_EQ(Http2DecoderAdapter::SPDY_READY_FOR_FRAME, deframer_->state());
EXPECT_EQ(Http2DecoderAdapter::SPDY_NO_ERROR, deframer_->spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
deframer_->spdy_framer_error());
}
TEST_P(SpdyFramerTest, ReadIncorrectlySizedPriority) {
const unsigned char kFrameData[] = {
0x00, 0x00, 0x04,
0x02,
0x00,
0x00, 0x00, 0x00, 0x03,
0x00, 0x00, 0x00, 0x01,
};
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
visitor.SimulateInFramer(kFrameData, sizeof(kFrameData));
EXPECT_EQ(Http2DecoderAdapter::SPDY_ERROR, visitor.deframer_.state());
EXPECT_EQ(Http2DecoderAdapter::SPDY_INVALID_CONTROL_FRAME_SIZE,
visitor.deframer_.spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
visitor.deframer_.spdy_framer_error());
}
TEST_P(SpdyFramerTest, ReadIncorrectlySizedPing) {
const unsigned char kFrameData[] = {
0x00, 0x00, 0x04,
0x06,
0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x01,
};
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
visitor.SimulateInFramer(kFrameData, sizeof(kFrameData));
EXPECT_EQ(Http2DecoderAdapter::SPDY_ERROR, visitor.deframer_.state());
EXPECT_EQ(Http2DecoderAdapter::SPDY_INVALID_CONTROL_FRAME_SIZE,
visitor.deframer_.spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
visitor.deframer_.spdy_framer_error());
}
TEST_P(SpdyFramerTest, ReadIncorrectlySizedWindowUpdate) {
const unsigned char kFrameData[] = {
0x00, 0x00, 0x03,
0x08,
0x00,
0x00, 0x00, 0x00, 0x03,
0x00, 0x00, 0x01,
};
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
visitor.SimulateInFramer(kFrameData, sizeof(kFrameData));
EXPECT_EQ(Http2DecoderAdapter::SPDY_ERROR, visitor.deframer_.state());
EXPECT_EQ(Http2DecoderAdapter::SPDY_INVALID_CONTROL_FRAME_SIZE,
visitor.deframer_.spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
visitor.deframer_.spdy_framer_error());
}
TEST_P(SpdyFramerTest, ReadIncorrectlySizedRstStream) {
const unsigned char kFrameData[] = {
0x00, 0x00, 0x03,
0x03,
0x00,
0x00, 0x00, 0x00, 0x03,
0x00, 0x00, 0x01,
};
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
visitor.SimulateInFramer(kFrameData, sizeof(kFrameData));
EXPECT_EQ(Http2DecoderAdapter::SPDY_ERROR, visitor.deframer_.state());
EXPECT_EQ(Http2DecoderAdapter::SPDY_INVALID_CONTROL_FRAME_SIZE,
visitor.deframer_.spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
visitor.deframer_.spdy_framer_error());
}
TEST_P(SpdyFramerTest, ReadInvalidRstStreamWithPayload) {
const unsigned char kFrameData[] = {
0x00, 0x00, 0x07,
0x03,
0x00,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00,
'f', 'o', 'o'
};
TestSpdyVisitor visitor(SpdyFramer::DISABLE_COMPRESSION);
visitor.SimulateInFramer(kFrameData, sizeof(kFrameData));
EXPECT_EQ(Http2DecoderAdapter::SPDY_ERROR, visitor.deframer_.state());
EXPECT_EQ(Http2DecoderAdapter::SPDY_INVALID_CONTROL_FRAME_SIZE,
visitor.deframer_.spdy_framer_error())
<< Http2DecoderAdapter::SpdyFramerErrorToString(
visitor.deframer_.spdy_framer_error());
}
TEST_P(SpdyFramerTest, ProcessAllInput) {
auto visitor =
std::make_unique<TestSpdyVisitor>(SpdyFramer::DISABLE_COMPRESSION);
deframer_->set_visitor(visitor.get());
SpdyHeadersIR headers( 1);
headers.SetHeader("alpha", "beta");
headers.SetHeader("gamma", "charlie");
headers.SetHeader("cookie", "key1=value1; key2=value2");
SpdySerializedFrame headers_frame(SpdyFramerPeer::SerializeHeaders(
&framer_, headers, use_output_ ? &output_ : nullptr));
const char four_score[] = "Four score and seven years ago";
SpdyDataIR four_score_ir( 1, four_score);
SpdySerializedFrame four_score_frame(framer_.SerializeData(four_score_ir));
SpdySerializedFrame frame1 = std::move(headers_frame);
SpdySerializedFrame frame2 = std::move(four_score_frame);
const size_t frame1_size = frame1.size();
const size_t frame2_size = frame2.size();
QUICHE_VLOG(1) << "frame1_size = " << frame1_size;
QUICHE_VLOG(1) << "frame2_size = " << frame2_size;
std::string input_buffer;
input_buffer.append(frame1.data(), frame1_size);
input_buffer.append(frame2.data(), frame2_size);
const char* buf = input_buffer.data();
const size_t buf_size = input_buffer.size();
QUICHE_VLOG(1) << "buf_size = " << buf_size;
size_t processed = deframer_->ProcessInput(buf, buf_size);
EXPECT_EQ(buf_size, processed);
EXPECT_EQ(Http2DecoderAdapter::SPDY_READY_FOR_FRAME, deframer_->state());
EXPECT_EQ(1, visitor->headers_frame_count_);
EXPECT_EQ(1, visitor->data_frame_count_);
EXPECT_EQ(strlen(four_score), static_cast<unsigned>(visitor->data_bytes_));
}
namespace {
void CheckFrameAndIRSize(SpdyFrameIR* ir, SpdyFramer* framer,
ArrayOutputBuffer* array_output_buffer) {
array_output_buffer->Reset();
SpdyFrameType type = ir->frame_type();
size_t ir_size = ir->size();
framer->SerializeFrame(*ir, array_output_buffer);
if (type == SpdyFrameType::HEADERS || type == SpdyFrameType::PUSH_PROMISE) {
EXPECT_GE(ir_size, array_output_buffer->Size() * 9 / 10);
EXPECT_LT(ir_size, array_output_buffer->Size() * 11 / 10);
} else {
EXPECT_EQ(ir_size, array_output_buffer->Size());
}
}
}
TEST_P(SpdyFramerTest, SpdyFrameIRSize) {
SpdyFramer framer(SpdyFramer::DISABLE_COMPRESSION);
const char bytes[] = "this is a very short data frame";
SpdyDataIR data_ir(1, absl::string_view(bytes, ABSL_ARRAYSIZE(bytes)));
CheckFrameAndIRSize(&data_ir, &framer, &output_);
SpdyRstStreamIR rst_ir( 1, ERROR_CODE_PROTOCOL_ERROR);
CheckFrameAndIRSize(&rst_ir, &framer, &output_);
SpdySettingsIR settings_ir;
settings_ir.AddSetting(SETTINGS_HEADER_TABLE_SIZE, 5);
settings_ir.AddSetting(SETTINGS_ENABLE_PUSH, 6);
settings_ir.AddSetting(SETTINGS_MAX_CONCURRENT_STREAMS, 7);
CheckFrameAndIRSize(&settings_ir, &framer, &output_);
SpdyPingIR ping_ir(42);
CheckFrameAndIRSize(&ping_ir, &framer, &output_);
SpdyGoAwayIR goaway_ir(97, ERROR_CODE_NO_ERROR, "Goaway description");
CheckFrameAndIRSize(&goaway_ir, &framer, &output_);
SpdyHeadersIR headers_ir(1);
headers_ir.SetHeader("alpha", "beta");
headers_ir.SetHeader("gamma", "charlie");
headers_ir.SetHeader("cookie", "key1=value1; key2=value2");
CheckFrameAndIRSize(&headers_ir, &framer, &output_);
SpdyHeadersIR headers_ir_with_continuation(1);
headers_ir_with_continuation.SetHeader("alpha", std::string(100000, 'x'));
headers_ir_with_continuation.SetHeader("beta", std::string(100000, 'x'));
headers_ir_with_continuation.SetHeader("cookie", "key1=value1; key2=value2");
CheckFrameAndIRSize(&headers_ir_with_continuation, &framer, &output_);
SpdyWindowUpdateIR window_update_ir(4, 1024);
CheckFrameAndIRSize(&window_update_ir, &framer, &output_);
SpdyPushPromiseIR push_promise_ir(3, 8);
push_promise_ir.SetHeader("alpha", std::string(100000, 'x'));
push_promise_ir.SetHeader("beta", std::string(100000, 'x'));
push_promise_ir.SetHeader("cookie", "key1=value1; key2=value2");
CheckFrameAndIRSize(&push_promise_ir, &framer, &output_);
SpdyAltSvcWireFormat::AlternativeService altsvc1(
"pid1", "host", 443, 5, SpdyAltSvcWireFormat::VersionVector());
SpdyAltSvcWireFormat::AlternativeService altsvc2(
"p\"=i:d", "h_\\o\"st", 123, 42, SpdyAltSvcWireFormat::VersionVector{24});
SpdyAltSvcWireFormat::AlternativeServiceVector altsvc_vector;
altsvc_vector.push_back(altsvc1);
altsvc_vector.push_back(altsvc2);
SpdyAltSvcIR altsvc_ir(0);
altsvc_ir.set_origin("o_r|g!n");
altsvc_ir.add_altsvc(altsvc1);
altsvc_ir.add_altsvc(altsvc2);
CheckFrameAndIRSize(&altsvc_ir, &framer, &output_);
SpdyPriorityIR priority_ir(3, 1, 256, false);
CheckFrameAndIRSize(&priority_ir, &framer, &output_);
const char kDescription[] = "Unknown frame";
const uint8_t kType = 0xaf;
const uint8_t kFlags = 0x11;
SpdyUnknownIR unknown_ir(2, kType, kFlags, kDescription);
CheckFrameAndIRSize(&unknown_ir, &framer, &output_);
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/core/spdy_framer.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/core/spdy_framer_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
9d971230-2c49-4826-81f8-9511039c41a5 | cpp | tensorflow/tensorflow | auto_sharding | third_party/xla/xla/hlo/experimental/auto_sharding/auto_sharding.cc | third_party/xla/xla/hlo/experimental/auto_sharding/auto_sharding_test.cc | #include "xla/hlo/experimental/auto_sharding/auto_sharding.h"
#include <algorithm>
#include <climits>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <functional>
#include <iterator>
#include <limits>
#include <memory>
#include <numeric>
#include <optional>
#include <queue>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/btree_set.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_cost_graph.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_device_mesh.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_memory.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_option.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_solver.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_strategy.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_util.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_wrapper.h"
#include "xla/hlo/experimental/auto_sharding/cluster_environment.h"
#include "xla/hlo/experimental/auto_sharding/matrix.h"
#include "xla/hlo/experimental/auto_sharding/metrics.h"
#include "xla/hlo/experimental/auto_sharding/profiling_result.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/transforms/hlo_constant_splitter.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/hlo/utils/hlo_sharding_util.h"
#include "xla/service/buffer_value.h"
#include "xla/service/call_graph.h"
#include "xla/service/computation_layout.h"
#include "xla/service/dump.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_memory_scheduler.h"
#include "xla/service/hlo_value.h"
#include "xla/service/optimize_input_output_buffer_alias.h"
#include "xla/service/sharding_propagation.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace spmd {
namespace {
constexpr double kSaltiplier = 0.0;
}
std::vector<double> CommunicationReshardingCostVector(
const StrategyGroup& strategy_group, const Shape& operand_shape,
const HloSharding& required_sharding,
const ClusterEnvironment& cluster_env) {
CHECK(!strategy_group.is_tuple) << "Only works with strategy vector.";
std::vector<double> ret;
ret.reserve(strategy_group.GetStrategies().size());
auto required_sharding_for_resharding = required_sharding.IsTileMaximal()
? HloSharding::Replicate()
: required_sharding;
for (const ShardingStrategy& x : strategy_group.GetStrategies()) {
ret.push_back(cluster_env.ReshardingCost(operand_shape, x.output_sharding,
required_sharding_for_resharding));
}
return ret;
}
double ComputeMemoryReshardingCost(const Shape& shape,
const HloSharding& src_sharding,
const HloSharding& dst_sharding,
const DeviceMesh& device_mesh) {
int64_t src_n_dim = NumTileDimensions(src_sharding);
int64_t dst_n_dim = NumTileDimensions(dst_sharding);
int64_t src_sharded_bytes = ByteSizeOfShapeWithSharding(shape, src_sharding);
double result = std::max(src_sharded_bytes,
ByteSizeOfShapeWithSharding(shape, dst_sharding));
if (src_n_dim != dst_n_dim && src_n_dim != -1 && dst_n_dim != -1) {
absl::StatusOr<Shape> inter_shape = ComputeIntermediateShape(
src_sharding, dst_sharding, shape, device_mesh);
if (inter_shape.ok()) {
std::optional<HloSharding> src_inter_sharding =
hlo_sharding_util::ReshapeSharding(shape, *inter_shape, src_sharding);
std::optional<HloSharding> dst_inter_sharding =
hlo_sharding_util::ReshapeSharding(shape, *inter_shape, dst_sharding);
if (!src_inter_sharding.has_value() || !dst_inter_sharding.has_value()) {
src_inter_sharding = HloSharding::Replicate();
dst_inter_sharding = HloSharding::Replicate();
}
result = std::max(
result,
static_cast<double>(std::max(
ByteSizeOfShapeWithSharding(*inter_shape, src_inter_sharding),
ByteSizeOfShapeWithSharding(*inter_shape, dst_inter_sharding))));
}
}
return result - src_sharded_bytes;
}
std::vector<double> MemoryReshardingCostVector(
const StrategyGroup& strategy_group, const Shape& operand_shape,
const HloSharding& required_sharding,
const ClusterEnvironment& cluster_env) {
CHECK(!strategy_group.is_tuple) << "Only works with strategy vector.";
std::vector<double> ret;
ret.reserve(strategy_group.GetStrategies().size());
auto required_sharding_for_resharding = required_sharding.IsTileMaximal()
? HloSharding::Replicate()
: required_sharding;
CHECK_OK(required_sharding.Validate(operand_shape))
<< strategy_group.ToString();
for (const ShardingStrategy& x : strategy_group.GetStrategies()) {
ret.push_back(ComputeMemoryReshardingCost(operand_shape, x.output_sharding,
required_sharding_for_resharding,
cluster_env.device_mesh_));
}
return ret;
}
std::unique_ptr<StrategyGroup> CreateLeafStrategyGroupWithoutInNodes(
const size_t instruction_id, StrategyGroups& strategy_groups) {
auto strategy_group = std::make_unique<StrategyGroup>();
strategy_group->is_tuple = false;
strategy_group->node_idx = strategy_groups.size();
strategy_groups.push_back(strategy_group.get());
strategy_group->instruction_id = instruction_id;
return strategy_group;
}
std::unique_ptr<StrategyGroup> CreateLeafStrategyGroup(
const size_t instruction_id, const HloInstruction* ins,
const StrategyMap& strategy_map, StrategyGroups& strategy_groups) {
auto strategy_group =
CreateLeafStrategyGroupWithoutInNodes(instruction_id, strategy_groups);
for (int64_t i = 0; i < ins->operand_count(); ++i) {
strategy_group->in_nodes.push_back(strategy_map.at(ins->operand(i)).get());
}
return strategy_group;
}
std::unique_ptr<StrategyGroup> CreateTupleStrategyGroup(
const size_t instruction_id) {
auto strategy_group = std::make_unique<StrategyGroup>();
strategy_group->is_tuple = true;
strategy_group->node_idx = -1;
strategy_group->instruction_id = instruction_id;
return strategy_group;
}
std::pair<ReshardingCosts, ReshardingCosts>
GenerateReshardingCostsAndMissingShardingsForAllOperands(
const HloInstruction* ins, const HloSharding& output_sharding,
const StrategyMap& strategy_map, const ClusterEnvironment& cluster_env,
const CallGraph& call_graph, InputShardings& input_shardings) {
ReshardingCosts communication_resharding_costs;
ReshardingCosts memory_resharding_costs;
if (input_shardings.shardings.empty() && ins->operand_count() > 0) {
input_shardings.shardings.resize(ins->operand_count());
}
for (int64_t k = 0; k < ins->operand_count(); ++k) {
const HloInstruction* operand = ins->operand(k);
const Shape& operand_shape = operand->shape();
const StrategyGroup& operand_strategy_group = *strategy_map.at(operand);
const auto& operand_strategies = operand_strategy_group.GetStrategies();
const std::vector<double> zeros(operand_strategies.size(), 0.0);
if (operand_shape.IsToken() || operand_shape.rank() == 0) {
communication_resharding_costs.push_back(zeros);
memory_resharding_costs.push_back(zeros);
if (!input_shardings.shardings[k].has_value()) {
input_shardings.shardings[k] = HloSharding::Replicate();
}
} else {
std::optional<HloSharding> cur_input_sharding;
CHECK_EQ(input_shardings.shardings.size(), ins->operand_count());
if (input_shardings.shardings[k].has_value()) {
cur_input_sharding = input_shardings.shardings[k];
} else {
cur_input_sharding = GetInputSharding(
ins, k, output_sharding, call_graph, cluster_env.NumDevices());
}
bool is_sharding_default_replicated = false;
if (!cur_input_sharding.has_value()) {
if ((ins->opcode() == HloOpcode::kGather && k == 0) ||
(ins->opcode() == HloOpcode::kScatter && k != 0)) {
is_sharding_default_replicated = true;
cur_input_sharding = HloSharding::Replicate();
} else if (ins->opcode() == HloOpcode::kCustomCall) {
is_sharding_default_replicated = true;
cur_input_sharding = HloSharding::Replicate();
} else if (ins->opcode() == HloOpcode::kRngBitGenerator) {
cur_input_sharding = HloSharding::Replicate();
}
}
CHECK(cur_input_sharding.has_value());
if (!input_shardings.shardings[k].has_value()) {
input_shardings.shardings[k] = cur_input_sharding;
}
if (ins->opcode() == HloOpcode::kGather && k == 0 &&
is_sharding_default_replicated) {
VLOG(2) << "Zeroing out operand 0 resharding costs for gather sharding "
<< output_sharding.ToString();
communication_resharding_costs.push_back(zeros);
memory_resharding_costs.push_back(zeros);
input_shardings.shardings[k] = std::nullopt;
} else {
communication_resharding_costs.push_back(
CommunicationReshardingCostVector(
operand_strategy_group, operand_shape, *cur_input_sharding,
cluster_env));
memory_resharding_costs.push_back(
MemoryReshardingCostVector(operand_strategy_group, operand_shape,
*cur_input_sharding, cluster_env));
}
}
}
return std::make_pair(communication_resharding_costs,
memory_resharding_costs);
}
std::tuple<ReshardingCosts, ReshardingCosts, InputShardings>
GenerateReshardingCostsAndShardingsForAllOperands(
const HloInstruction* ins, const HloSharding& output_sharding,
const StrategyMap& strategy_map, const ClusterEnvironment& cluster_env,
const CallGraph& call_graph) {
InputShardings input_shardings_optional;
std::pair<ReshardingCosts, ReshardingCosts> resharding_costs =
GenerateReshardingCostsAndMissingShardingsForAllOperands(
ins, output_sharding, strategy_map, cluster_env, call_graph,
input_shardings_optional);
for (const auto& sharding_optional : input_shardings_optional.shardings) {
CHECK(sharding_optional.has_value());
}
return {resharding_costs.first, resharding_costs.second,
input_shardings_optional};
}
void FollowArrayOrTokenStrategyGroup(
const StrategyGroup& src_strategy_group, const Shape& shape,
const size_t instruction_id, const ClusterEnvironment& cluster_env,
const StableMap<NodeIdx, std::vector<ShardingStrategy>>&
pretrimmed_strategy_map,
StrategyGroup& strategy_group) {
CHECK(shape.IsArray() || shape.IsToken());
std::vector<ShardingStrategy> pretrimmed_strategies;
auto pretrimmed_strategy_map_it =
pretrimmed_strategy_map.find(src_strategy_group.node_idx);
if (pretrimmed_strategy_map_it != pretrimmed_strategy_map.end()) {
pretrimmed_strategies = pretrimmed_strategy_map_it->second;
} else {
strategy_group.following = &src_strategy_group;
}
const auto& src_strategies = src_strategy_group.GetStrategies();
for (int64_t sid = 0;
sid < src_strategies.size() + pretrimmed_strategies.size(); ++sid) {
const HloSharding* output_spec;
if (sid < src_strategies.size()) {
output_spec = &src_strategies[sid].output_sharding;
} else {
output_spec =
&pretrimmed_strategies[sid - src_strategies.size()].output_sharding;
VLOG(1) << "Adding outspec from the trimmed strategy map: "
<< output_spec->ToString();
}
const std::string name = ToStringSimple(*output_spec);
double compute_cost = 0, communication_cost = 0;
double memory_cost = ByteSizeOfShapeWithSharding(shape, *output_spec);
size_t num_in_nodes = strategy_group.in_nodes.size();
InputShardings input_shardings{name, {num_in_nodes, *output_spec}};
ReshardingCosts communication_resharding_costs;
ReshardingCosts memory_resharding_costs;
for (size_t i = 0; i < strategy_group.in_nodes.size(); ++i) {
communication_resharding_costs.push_back(
CommunicationReshardingCostVector(*strategy_group.in_nodes[i], shape,
*output_spec, cluster_env));
memory_resharding_costs.push_back(MemoryReshardingCostVector(
*strategy_group.in_nodes[i], shape, *output_spec, cluster_env));
}
strategy_group.AddStrategy(
ShardingStrategy({*output_spec, compute_cost, communication_cost,
memory_cost, communication_resharding_costs,
memory_resharding_costs}),
input_shardings);
}
}
std::unique_ptr<StrategyGroup> HandlePartialReduce(
const HloInstruction* ins, const size_t instruction_id,
StrategyGroups& strategy_groups, const ClusterEnvironment& cluster_env,
StrategyMap& strategy_map, const CallGraph& call_graph) {
absl::StatusOr<int64_t> reduction_dim = GetPartialReduceReductionDim(ins);
CHECK_OK(reduction_dim);
const Shape& shape = ins->shape();
const HloInstruction* operand = ins->operand(0);
const StrategyGroup* src_strategy_group = strategy_map.at(operand).get();
std::unique_ptr<StrategyGroup> strategy_group =
CreateTupleStrategyGroup(instruction_id);
int64_t output_size = shape.tuple_shapes_size();
for (size_t i = 0; i < output_size; ++i) {
std::unique_ptr<StrategyGroup> child_strategy_group =
CreateLeafStrategyGroupWithoutInNodes(instruction_id, strategy_groups);
child_strategy_group->in_nodes.push_back(src_strategy_group);
child_strategy_group->following = src_strategy_group;
for (const auto& src_strategy : src_strategy_group->GetStrategies()) {
const HloSharding& input_spec = src_strategy.output_sharding;
if (input_spec.IsManual() || input_spec.IsManualSubgroup()) {
continue;
}
HloSharding output_spec = input_spec;
if (!(input_spec.IsReplicated() || input_spec.IsTileMaximal())) {
output_spec = hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(
input_spec, {*reduction_dim});
}
std::string name = ToStringSimple(output_spec);
InputShardings input_shardings = {std::move(name)};
for (int64_t k = 0; k < output_size * 2; ++k) {
if (k < output_size) {
input_shardings.shardings.push_back(input_spec);
} else {
input_shardings.shardings.push_back(HloSharding::Replicate());
}
}
double compute_cost = 0, communication_cost = 0;
double memory_cost = ByteSizeOfShapeWithSharding(
ins->shape().tuple_shapes(i), output_spec);
std::pair<ReshardingCosts, ReshardingCosts> resharding_costs =
GenerateReshardingCostsAndMissingShardingsForAllOperands(
ins, output_spec, strategy_map, cluster_env, call_graph,
input_shardings);
child_strategy_group->AddStrategy(
ShardingStrategy({std::move(output_spec), compute_cost,
communication_cost, memory_cost,
std::move(resharding_costs.first),
std::move(resharding_costs.second)}),
std::move(input_shardings));
}
strategy_group->AddChild(std::move(child_strategy_group));
}
return strategy_group;
}
std::unique_ptr<StrategyGroup> MaybeFollowInsStrategyGroup(
const StrategyGroup& src_strategy_group, const Shape& shape,
const size_t instruction_id, StrategyGroups& strategy_groups,
const ClusterEnvironment& cluster_env,
const StableMap<NodeIdx, std::vector<ShardingStrategy>>&
pretrimmed_strategy_map) {
const auto& children = src_strategy_group.GetChildren();
std::unique_ptr<StrategyGroup> strategy_group;
if (src_strategy_group.is_tuple) {
CHECK(shape.IsTuple());
CHECK_EQ(shape.tuple_shapes_size(), children.size());
strategy_group = CreateTupleStrategyGroup(instruction_id);
for (size_t i = 0; i < children.size(); ++i) {
auto child_strategies = MaybeFollowInsStrategyGroup(
*children[i], shape.tuple_shapes(i), instruction_id, strategy_groups,
cluster_env, pretrimmed_strategy_map);
child_strategies->tuple_element_idx = i;
strategy_group->AddChild(std::move(child_strategies));
}
} else {
strategy_group =
CreateLeafStrategyGroupWithoutInNodes(instruction_id, strategy_groups);
strategy_group->in_nodes.push_back(&src_strategy_group);
FollowArrayOrTokenStrategyGroup(src_strategy_group, shape, instruction_id,
cluster_env, pretrimmed_strategy_map,
*strategy_group);
}
return strategy_group;
}
absl::StatusOr<std::unique_ptr<StrategyGroup>> FollowReduceStrategy(
const HloInstruction* ins, const Shape& output_shape,
const HloInstruction* operand, const HloInstruction* unit,
const size_t instruction_id, StrategyMap& strategy_map,
StrategyGroups& strategy_groups, const ClusterEnvironment& cluster_env,
const bool allow_mixed_mesh_shape, const bool crash_at_error) {
std::unique_ptr<StrategyGroup> strategy_group;
if (output_shape.IsTuple()) {
strategy_group = CreateTupleStrategyGroup(instruction_id);
for (size_t i = 0; i < ins->shape().tuple_shapes_size(); ++i) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<StrategyGroup> child_strategy,
FollowReduceStrategy(
ins, ins->shape().tuple_shapes().at(i), ins->operand(i),
ins->operand(i + ins->shape().tuple_shapes_size()),
instruction_id, strategy_map, strategy_groups, cluster_env,
allow_mixed_mesh_shape, crash_at_error));
child_strategy->tuple_element_idx = i;
strategy_group->AddChild(std::move(child_strategy));
}
} else if (output_shape.IsArray()) {
strategy_group = CreateLeafStrategyGroup(instruction_id, ins, strategy_map,
strategy_groups);
const StrategyGroup* src_strategy_group = strategy_map.at(operand).get();
strategy_group->following = src_strategy_group;
std::vector<int64_t> op_dim_to_output_dim =
GetDimensionMapping(ins->dimensions(),
operand->shape().rank());
CHECK_EQ(ins->dimensions().size() + output_shape.rank(),
operand->shape().rank())
<< "Invalid kReduce: output size + reduced dimensions size != op count";
for (const auto& src_strategy : src_strategy_group->GetStrategies()) {
const HloSharding& input_sharding = src_strategy.output_sharding;
const auto& tensor_dim_to_mesh = cluster_env.GetTensorDimToMeshDimWrapper(
operand->shape(), input_sharding,
true,
crash_at_error);
if (tensor_dim_to_mesh.size() != operand->shape().rank()) {
return absl::InvalidArgumentError(
"Cannot generate tensor dim to mesh dim mapping");
}
std::vector<int64_t> all_reduce_dims;
for (int64_t op_dim = 0; op_dim < operand->shape().rank(); ++op_dim) {
int64_t mesh_dim = tensor_dim_to_mesh[op_dim];
if (mesh_dim == -1) {
continue;
}
if (op_dim_to_output_dim[op_dim] == -1) {
all_reduce_dims.push_back(mesh_dim);
}
}
std::unique_ptr<HloInstruction> operand_clone = operand->Clone();
std::unique_ptr<HloInstruction> unit_clone = unit->Clone();
std::unique_ptr<HloInstruction> new_reduce = HloInstruction::CreateReduce(
output_shape, operand_clone.get(), unit_clone.get(),
ins->dimensions(), ins->to_apply());
operand_clone->set_sharding(src_strategy.output_sharding);
if (!new_reduce->ReplaceOperandWith(0, operand_clone.get()).ok()) {
continue;
}
CHECK(InferReduceShardingFromOperand(new_reduce.get(), false, true));
HloSharding output_spec = new_reduce->sharding();
new_reduce.reset();
operand_clone.reset();
unit_clone.reset();
const std::string name = ToStringSimple(output_spec);
double compute_cost = 0, communication_cost = 0;
double memory_cost =
ByteSizeOfShapeWithSharding(output_shape, output_spec);
for (int64_t mesh_dim : all_reduce_dims) {
communication_cost += cluster_env.AllReduceCost(memory_cost, mesh_dim);
}
ReshardingCosts communication_resharding_costs;
ReshardingCosts memory_resharding_costs;
for (int64_t k = 0; k < ins->operand_count(); ++k) {
const HloInstruction* cur_operand = ins->operand(k);
const auto& operand_strategy_group = *strategy_map.at(cur_operand);
const auto& operand_strategies = operand_strategy_group.GetStrategies();
if (ToString(cur_operand->shape().dimensions()) ==
ToString(operand->shape().dimensions())) {
communication_resharding_costs.push_back(
CommunicationReshardingCostVector(operand_strategy_group,
cur_operand->shape(),
input_sharding, cluster_env));
memory_resharding_costs.push_back(MemoryReshardingCostVector(
operand_strategy_group, cur_operand->shape(), input_sharding,
cluster_env));
} else {
const std::vector<double> zeros(operand_strategies.size(), 0);
communication_resharding_costs.push_back(zeros);
memory_resharding_costs.push_back(zeros);
}
}
const ShardingStrategy strategy = ShardingStrategy(
{output_spec, compute_cost, communication_cost, memory_cost,
communication_resharding_costs, memory_resharding_costs});
strategy_group->AddStrategy(strategy, {name, {input_sharding}});
}
} else {
LOG(FATAL) << "Unhandled kReduce shape: " << ins->shape().ToString();
}
return strategy_group;
}
std::vector<size_t> FindReplicateStrategyIndices(
const std::vector<ShardingStrategy>& strategies) {
std::vector<size_t> indices;
for (size_t i = 0; i < strategies.size(); i++) {
if (strategies.at(i).output_sharding.IsReplicated()) {
indices.push_back(i);
}
}
return indices;
}
std::tuple<ReshardingCosts, ReshardingCosts, InputShardings>
ReshardingCostsForTupleOperand(const HloInstruction* operand,
const StrategyGroup& operand_strategy_vector) {
ReshardingCosts communication_resharding_costs;
ReshardingCosts memory_resharding_costs;
std::vector<HloSharding> tuple_element_shardings;
for (size_t tuple_element_idx = 0;
tuple_element_idx < operand->shape().tuple_shapes_size();
tuple_element_idx++) {
const StrategyGroup& tuple_element_strategy_group =
*operand_strategy_vector.GetChildren()[tuple_element_idx];
const auto& tuple_element_strategies =
tuple_element_strategy_group.GetStrategies();
std::vector<size_t> indices =
FindReplicateStrategyIndices(tuple_element_strategies);
CHECK_GT(indices.size(), 0)
<< "There is no replicated strategy in instruction "
<< operand->ToString() << ".\nStrategies:\n"
<< tuple_element_strategy_group.ToString();
memory_resharding_costs.push_back(
std::vector<double>(tuple_element_strategies.size(), 0));
communication_resharding_costs.push_back(
std::vector<double>(tuple_element_strategies.size(), kInfinityCost));
tuple_element_shardings.push_back(HloSharding::Replicate());
for (const size_t i : indices) {
communication_resharding_costs.back().at(i) = 0.0;
}
}
return {
communication_resharding_costs,
memory_resharding_costs,
{{}, {HloSharding::Tuple(operand->shape(), tuple_element_shardings)}}};
}
ReshardingCosts CreateZeroReshardingCostsForAllOperands(
const HloInstruction* ins, const StrategyMap& strategy_map) {
ReshardingCosts resharding_costs;
for (size_t i = 0; i < ins->operand_count(); ++i) {
const HloInstruction* operand = ins->operand(i);
const StrategyGroup& operand_strategy_group = *strategy_map.at(operand);
if (operand->shape().IsTuple()) {
if (ins->opcode() == HloOpcode::kConditional ||
ins->opcode() == HloOpcode::kOutfeed) {
resharding_costs.push_back(std::vector<double>(1, 0));
} else {
CHECK_EQ(ins->operand_count(), 0)
<< "Do not support instructions with more than one tuple "
"operand.";
for (size_t tuple_element_idx = 0;
tuple_element_idx < operand->shape().tuple_shapes_size();
tuple_element_idx++) {
const StrategyGroup& tuple_element_strategy_group =
*operand_strategy_group.GetChildren().at(tuple_element_idx);
resharding_costs.push_back(std::vector<double>(
tuple_element_strategy_group.GetStrategies().size(), 0));
}
}
} else {
const auto& strategies = operand_strategy_group.GetStrategies();
resharding_costs.push_back(std::vector<double>(strategies.size(), 0));
}
}
return resharding_costs;
}
void GenerateOutfeedStrategy(const HloInstruction* ins, const Shape& shape,
const ClusterEnvironment& cluster_env,
const StrategyMap& strategy_map,
const double replicated_penalty,
StrategyGroup& strategy_group) {
HloSharding output_spec = HloSharding::Replicate();
ReshardingCosts communication_resharding_costs;
ReshardingCosts memory_resharding_costs;
InputShardings input_shardings = {"R"};
const int tuple_size = ins->operand(0)->shape().tuple_shapes_size();
const auto& operand_strategy_group = strategy_map.at(ins->operand(0));
const auto& operand_children = operand_strategy_group->GetChildren();
if (ins->has_sharding()) {
std::vector<Shape> operand_shapes(ins->operand_count());
for (int i = 0; i < ins->operand_count(); ++i) {
operand_shapes[i] = ins->operand(i)->shape();
}
auto all_operands_tuple_shape = ShapeUtil::MakeTupleShape(operand_shapes);
auto get_input_sharding = [&](int index) {
auto sharding = ins->sharding();
if (sharding.IsTuple()) {
return (index >= 0)
? sharding.GetSubSharding(all_operands_tuple_shape,
{0, static_cast<int64_t>(index)})
: sharding.GetSubSharding(all_operands_tuple_shape, {1});
} else {
return sharding;
}
};
for (size_t i = 0; i < tuple_size; ++i) {
const StrategyGroup& child = *operand_children[i];
const Shape& tuple_shape = ins->operand(0)->shape().tuple_shapes(i);
const HloSharding& input_sharding = get_input_sharding(i);
input_shardings.shardings.push_back(input_sharding);
communication_resharding_costs.push_back(
CommunicationReshardingCostVector(child, tuple_shape, input_sharding,
cluster_env));
memory_resharding_costs.push_back(MemoryReshardingCostVector(
child, tuple_shape, input_sharding, cluster_env));
}
const HloSharding& input_sharding = get_input_sharding(-1);
input_shardings.shardings.push_back(input_sharding);
} else {
for (size_t i = 0; i < tuple_size; ++i) {
const StrategyGroup& child = *operand_children[i];
const std::vector<double> zeros(child.GetStrategies().size(), 0);
communication_resharding_costs.push_back(zeros);
memory_resharding_costs.push_back(zeros);
}
}
communication_resharding_costs.push_back({});
memory_resharding_costs.push_back({});
double memory_cost = ByteSizeOfShapeWithSharding(shape, output_spec);
strategy_group.AddStrategy(
ShardingStrategy({HloSharding::Replicate(), replicated_penalty, 0,
memory_cost, std::move(communication_resharding_costs),
std::move(memory_resharding_costs)}),
input_shardings);
}
double ComputeCommunicationCost(const HloInstruction* ins,
const InputShardings& operand_shardings,
const ClusterEnvironment& cluster_env) {
switch (ins->opcode()) {
case HloOpcode::kGather: {
if (operand_shardings.shardings[0].has_value() &&
!operand_shardings.shardings[0]->IsReplicated()) {
auto mesh_shape = cluster_env.device_mesh_.dimensions();
auto mesh_dim = std::distance(
mesh_shape.begin(),
std::max_element(mesh_shape.begin(), mesh_shape.end()));
return cluster_env.AllReduceCost(ByteSizeOfShape(ins->shape()),
mesh_dim);
}
return 0;
}
default:
LOG(FATAL) << "Unhandled instruction " << ins->ToString();
}
}
void AddReplicatedStrategy(
const HloInstruction* ins, const Shape& shape,
const ClusterEnvironment& cluster_env, const StrategyMap& strategy_map,
const double replicated_penalty,
absl::flat_hash_set<int64_t> operands_to_consider_all_strategies_for,
StrategyGroup& strategy_group) {
HloSharding replicated_strategy = HloSharding::Replicate();
HloSharding output_spec = replicated_strategy;
double memory_cost = ByteSizeOfShapeWithSharding(shape, output_spec);
CHECK_LE(operands_to_consider_all_strategies_for.size(), 1);
if (!operands_to_consider_all_strategies_for.empty()) {
int64_t operand_to_consider_all_strategies_for =
*operands_to_consider_all_strategies_for.begin();
auto operand = ins->operand(operand_to_consider_all_strategies_for);
CHECK(!operand->shape().IsTuple());
const auto& operand_strategy_group = strategy_map.at(operand).get();
const auto& operand_strategies = operand_strategy_group->GetStrategies();
InputShardings input_shardings = {"R"};
input_shardings.shardings.resize(ins->operand_count());
std::vector<InputShardings> possible_input_shardings(
operand_strategies.size(), input_shardings);
std::vector<ReshardingCosts> possible_communication_resharding_costs(
operand_strategies.size(), ReshardingCosts(ins->operand_count()));
std::vector<ReshardingCosts> possible_memory_resharding_costs(
operand_strategies.size(), ReshardingCosts(ins->operand_count()));
for (int64_t k = 0; k < ins->operand_count(); ++k) {
const HloInstruction* operand = ins->operand(k);
const Shape& operand_shape = operand->shape();
CHECK(!operand_shape.IsTuple());
const StrategyGroup& operand_strategy_group = *strategy_map.at(operand);
if (k == operand_to_consider_all_strategies_for) {
CHECK_EQ(possible_input_shardings.size(), operand_strategies.size());
for (size_t j = 0; j < possible_input_shardings.size(); ++j) {
const auto& operand_sharding = operand_strategies[j].output_sharding;
possible_input_shardings[j].shardings[k] = operand_sharding;
possible_communication_resharding_costs[j][k] =
CommunicationReshardingCostVector(operand_strategy_group,
operand_shape, operand_sharding,
cluster_env);
possible_memory_resharding_costs[j][k] =
MemoryReshardingCostVector(operand_strategy_group, operand_shape,
operand_sharding, cluster_env);
}
} else {
for (size_t j = 0; j < possible_input_shardings.size(); ++j) {
possible_input_shardings[j].shardings[k] = replicated_strategy;
possible_communication_resharding_costs[j][k] =
CommunicationReshardingCostVector(
operand_strategy_group, operand_shape, replicated_strategy,
cluster_env);
possible_memory_resharding_costs[j][k] =
MemoryReshardingCostVector(operand_strategy_group, operand_shape,
replicated_strategy, cluster_env);
}
}
}
for (size_t j = 0; j < possible_input_shardings.size(); ++j) {
double communication_cost = ComputeCommunicationCost(
ins, possible_input_shardings[j], cluster_env);
strategy_group.AddStrategy(
ShardingStrategy(
{replicated_strategy, replicated_penalty, communication_cost,
memory_cost,
std::move(possible_communication_resharding_costs[j]),
std::move(possible_memory_resharding_costs[j])}),
std::move(possible_input_shardings[j]));
}
} else {
ReshardingCosts communication_resharding_costs;
ReshardingCosts memory_resharding_costs;
InputShardings input_shardings = {"R"};
if (ins->operand_count() > 0 && ins->operand(0)->shape().IsTuple()) {
CHECK_EQ(ins->operand_count(), 1)
<< "Do not support instructions with more than one tuple "
"operand. If this CHECK fails, we will need to fix "
"b/233412625.";
std::tie(communication_resharding_costs, memory_resharding_costs,
input_shardings) =
ReshardingCostsForTupleOperand(ins->operand(0),
*strategy_map.at(ins->operand(0)));
} else {
for (int64_t k = 0; k < ins->operand_count(); ++k) {
const HloInstruction* operand = ins->operand(k);
const Shape& operand_shape = operand->shape();
const StrategyGroup& operand_strategy_group = *strategy_map.at(operand);
const auto& operand_strategies = operand_strategy_group.GetStrategies();
if (ins->opcode() == HloOpcode::kConditional) {
std::vector<double> zeros(operand_strategies.size(), 0);
communication_resharding_costs.push_back(zeros);
memory_resharding_costs.push_back(zeros);
} else {
communication_resharding_costs.push_back(
CommunicationReshardingCostVector(operand_strategy_group,
operand_shape, output_spec,
cluster_env));
memory_resharding_costs.push_back(MemoryReshardingCostVector(
operand_strategy_group, operand_shape, output_spec, cluster_env));
input_shardings.shardings.push_back(output_spec);
}
}
}
strategy_group.AddStrategy(
ShardingStrategy({HloSharding::Replicate(), replicated_penalty, 0,
memory_cost,
std::move(communication_resharding_costs),
std::move(memory_resharding_costs)}),
input_shardings);
}
}
double ComputeSortCommunicationCost(const int64_t sort_dim,
const int64_t operand_sharded_dim,
const int64_t mesh_sharding_dim,
const Shape& shape,
const ClusterEnvironment& cluster_env) {
if (sort_dim == operand_sharded_dim) {
return cluster_env.AllToAllCost(ByteSizeOfShape(shape), mesh_sharding_dim);
}
return 0;
}
void EnumerateAll1DPartition(
const HloInstruction* ins, const Shape& shape,
const DeviceMesh& device_mesh, const ClusterEnvironment& cluster_env,
const StrategyMap& strategy_map, const bool only_allow_divisible,
bool allow_shardings_small_dims_across_many_devices,
const std::string& suffix, const CallGraph& call_graph,
StrategyGroup& strategy_group) {
for (int64_t i = 0; i < shape.rank(); ++i) {
for (int64_t j = 0; j < device_mesh.num_dimensions(); ++j) {
bool small_dims_sharding_check =
!allow_shardings_small_dims_across_many_devices &&
shape.dimensions(i) < device_mesh.dim(j);
bool divisibility_check =
(only_allow_divisible &&
!IsDivisible(shape.dimensions(i), device_mesh.dim(j)));
if (device_mesh.dim(j) == 1 || small_dims_sharding_check ||
divisibility_check) {
continue;
}
const std::string name = absl::StrFormat("S%d @ %d", i, j) + suffix;
HloSharding output_spec = Tile(shape, {i}, {j}, device_mesh);
double compute_cost = 0, communication_cost = 0;
double memory_cost = ByteSizeOfShapeWithSharding(shape, output_spec);
ReshardingCosts communication_resharding_costs;
ReshardingCosts memory_resharding_costs;
InputShardings input_shardings = {name};
if (ins->opcode() == HloOpcode::kConditional) {
communication_resharding_costs =
CreateZeroReshardingCostsForAllOperands(ins, strategy_map);
memory_resharding_costs =
CreateZeroReshardingCostsForAllOperands(ins, strategy_map);
} else if (ins->operand_count() > 0 &&
ins->operand(0)->shape().IsTuple()) {
CHECK_EQ(ins->operand_count(), 1)
<< "Do not support instructions with more than one tuple "
"operand.";
std::tie(communication_resharding_costs, memory_resharding_costs,
input_shardings) =
ReshardingCostsForTupleOperand(ins->operand(0),
*strategy_map.at(ins->operand(0)));
} else if (ins->opcode() == HloOpcode::kRngBitGenerator &&
ins->operand(0)->shape().IsArray()) {
input_shardings.shardings.push_back(HloSharding::Replicate());
std::tie(communication_resharding_costs, memory_resharding_costs) =
GenerateReshardingCostsAndMissingShardingsForAllOperands(
ins, output_spec, strategy_map, cluster_env, call_graph,
input_shardings);
} else {
std::tie(communication_resharding_costs, memory_resharding_costs,
input_shardings) =
GenerateReshardingCostsAndShardingsForAllOperands(
ins, output_spec, strategy_map, cluster_env, call_graph);
}
if (ins->opcode() == HloOpcode::kSort) {
auto sort_ins = xla::DynCast<HloSortInstruction>(ins);
CHECK(sort_ins);
communication_cost = ComputeSortCommunicationCost(
sort_ins->sort_dimension(), i, j, shape, cluster_env);
} else if (IsTopKCustomCall(ins)) {
communication_cost = ComputeSortCommunicationCost(
ins->operand(0)->shape().rank() - 1, i, j, shape, cluster_env);
}
strategy_group.AddStrategy(
ShardingStrategy({output_spec, compute_cost, communication_cost,
memory_cost,
std::move(communication_resharding_costs),
std::move(memory_resharding_costs)}),
input_shardings);
}
}
}
void BuildStrategyAndCostForOp(const HloInstruction* ins, const Shape& shape,
const DeviceMesh& device_mesh,
const ClusterEnvironment& cluster_env,
const StrategyMap& strategy_map,
const CallGraph& call_graph,
absl::Span<const int64_t> tensor_dims,
StrategyGroup& strategy_group);
void EnumerateAllPartition(
const HloInstruction* ins, const Shape& shape,
const DeviceMesh& device_mesh, const ClusterEnvironment& cluster_env,
const StrategyMap& strategy_map, bool only_allow_divisible,
bool allow_shardings_small_dims_across_many_devices,
const CallGraph& call_graph, const int64_t partition_dimensions,
const std::vector<int64_t>& tensor_dims, StrategyGroup& strategy_group) {
const auto tensor_dims_size = tensor_dims.size();
if (tensor_dims_size == partition_dimensions) {
BuildStrategyAndCostForOp(ins, shape, device_mesh, cluster_env,
strategy_map, call_graph, tensor_dims,
strategy_group);
return;
}
for (int64_t i = 0; i < shape.rank(); ++i) {
auto tensor_it = std::find(tensor_dims.begin(), tensor_dims.end(), i);
if (tensor_it != tensor_dims.end()) {
continue;
}
if (!allow_shardings_small_dims_across_many_devices &&
shape.dimensions(i) < device_mesh.dim(tensor_dims_size)) {
continue;
}
if (only_allow_divisible &&
!IsDivisible(shape.dimensions(i), device_mesh.dim(tensor_dims_size))) {
continue;
}
std::vector<int64_t> next_tensor_dims = tensor_dims;
next_tensor_dims.push_back(i);
EnumerateAllPartition(
ins, shape, device_mesh, cluster_env, strategy_map,
only_allow_divisible, allow_shardings_small_dims_across_many_devices,
call_graph, partition_dimensions, next_tensor_dims, strategy_group);
}
}
void BuildStrategyAndCostForOp(const HloInstruction* ins, const Shape& shape,
const DeviceMesh& device_mesh,
const ClusterEnvironment& cluster_env,
const StrategyMap& strategy_map,
const CallGraph& call_graph,
absl::Span<const int64_t> tensor_dims,
StrategyGroup& strategy_group) {
std::vector<int64_t> mesh_dims(tensor_dims.size());
std::iota(mesh_dims.begin(), mesh_dims.end(), 0);
const std::string name =
absl::StrFormat("S{%s} @ {%s}", absl::StrJoin(tensor_dims, ","),
absl::StrJoin(mesh_dims, ","));
HloSharding output_spec = Tile(shape, tensor_dims, mesh_dims, device_mesh);
double compute_cost = 0, communication_cost = 0;
double memory_cost = ByteSizeOfShapeWithSharding(shape, output_spec);
InputShardings input_shardings = {name};
ReshardingCosts communication_resharding_costs;
ReshardingCosts memory_resharding_costs;
if (ins->opcode() == HloOpcode::kConditional) {
communication_resharding_costs =
CreateZeroReshardingCostsForAllOperands(ins, strategy_map);
memory_resharding_costs =
CreateZeroReshardingCostsForAllOperands(ins, strategy_map);
} else if (ins->operand_count() > 0 && ins->operand(0)->shape().IsTuple()) {
CHECK_EQ(ins->operand_count(), 1)
<< "Do not support instructions with more than one tuple "
"operand. If this CHECK fails, we will need to fix "
"b/233412625.";
std::tie(communication_resharding_costs, memory_resharding_costs,
input_shardings) =
ReshardingCostsForTupleOperand(ins->operand(0),
*strategy_map.at(ins->operand(0)));
} else {
std::tie(communication_resharding_costs, memory_resharding_costs,
input_shardings) =
GenerateReshardingCostsAndShardingsForAllOperands(
ins, output_spec, strategy_map, cluster_env, call_graph);
}
int64_t sort_or_topk_dim = -1;
if (ins->opcode() == HloOpcode::kSort) {
auto sort_ins = xla::DynCast<HloSortInstruction>(ins);
CHECK(sort_ins);
sort_or_topk_dim = sort_ins->sort_dimension();
} else if (IsTopKCustomCall(ins)) {
sort_or_topk_dim = ins->operand(0)->shape().rank() - 1;
}
if (sort_or_topk_dim != -1) {
if (auto index = GetIndex(tensor_dims, sort_or_topk_dim); index != -1) {
communication_cost = ComputeSortCommunicationCost(
sort_or_topk_dim, sort_or_topk_dim, index, shape, cluster_env);
}
}
strategy_group.AddStrategy(
ShardingStrategy({output_spec, compute_cost, communication_cost,
memory_cost, std::move(communication_resharding_costs),
std::move(memory_resharding_costs)}),
input_shardings);
}
void EnumerateAll1DPartitionReshape(const HloInstruction* ins,
const DeviceMesh& device_mesh,
const ClusterEnvironment& cluster_env,
const StrategyMap& strategy_map,
bool only_allow_divisible,
const std::string& suffix,
StrategyGroup& strategy_group) {
const HloInstruction* operand = ins->operand(0);
const Shape& operand_shape = operand->shape();
const StrategyGroup& operand_strategy_group = *strategy_map.at(operand);
for (int64_t i = 0; i < ins->shape().rank(); ++i) {
for (int64_t j = 0; j < device_mesh.num_dimensions(); ++j) {
if (device_mesh.dim(j) == 1 ||
(only_allow_divisible &&
!IsDivisible(ins->shape().dimensions(i), device_mesh.dim(j)))) {
continue;
}
HloSharding output_spec = Tile(ins->shape(), {i}, {j}, device_mesh);
std::optional<HloSharding> input_spec =
hlo_sharding_util::ReshapeSharding(ins->shape(), operand_shape,
output_spec);
if (!input_spec.has_value()) {
continue;
}
if (cluster_env.IsDeviceMesh1D() &&
VectorGreaterThanOneElementCount(
input_spec->tile_assignment().dimensions()) > 1) {
continue;
}
const std::string name = absl::StrFormat("S%d @ %d", i, j) + suffix;
double compute_cost = 0, communication_cost = 0;
double memory_cost =
ByteSizeOfShapeWithSharding(ins->shape(), output_spec);
ReshardingCosts communication_resharding_costs{
CommunicationReshardingCostVector(
operand_strategy_group, operand_shape, *input_spec, cluster_env)};
ReshardingCosts memory_resharding_costs{MemoryReshardingCostVector(
operand_strategy_group, operand_shape, *input_spec, cluster_env)};
strategy_group.AddStrategy(
ShardingStrategy({output_spec, compute_cost, communication_cost,
memory_cost,
std::move(communication_resharding_costs),
std::move(memory_resharding_costs)}),
{name, {*input_spec}});
}
}
}
int64_t MaxNumTiles(const StrategyMap& strategy_map,
const HloInstruction* ins) {
const StrategyGroup* strategy_group = strategy_map.at(ins).get();
while (strategy_group->following != nullptr) {
strategy_group = strategy_group->following;
}
int64_t max_num_tiles = -1;
for (const ShardingStrategy& strategy : strategy_group->GetStrategies()) {
max_num_tiles =
std::max(max_num_tiles, strategy.output_sharding.NumTiles());
}
return max_num_tiles;
}
std::pair<int64_t, bool> ChooseOperandToFollow(
const StrategyMap& strategy_map, const InstructionDepthMap& depth_map,
const AliasMap& alias_map, const int64_t max_depth,
const HloInstruction* ins) {
auto it = alias_map.find(ins);
if (it != alias_map.end()) {
for (int64_t i = 0; i < ins->operand_count(); ++i) {
const HloInstruction* operand = ins->operand(i);
if (operand == it->second) {
return {i, false};
}
}
}
std::optional<int64_t> follow_idx;
bool tie = false;
double max_priority = -1e20;
double depth_normalizer = 0.1 / max_depth;
double range_delta = 4 * depth_normalizer;
for (int64_t i = 0; i < ins->operand_count(); ++i) {
const HloInstruction* operand = ins->operand(i);
double priority = MaxNumTiles(strategy_map, operand) +
depth_map.at(operand) * depth_normalizer;
if (priority > max_priority + range_delta) {
follow_idx = i;
tie = false;
max_priority = priority;
} else if (priority >= max_priority - range_delta) {
tie = true;
}
}
CHECK(follow_idx.has_value());
return {*follow_idx, tie};
}
bool AllowTieFollowing(const HloInstruction* ins) {
if (ins->opcode() == HloOpcode::kCompare ||
ins->opcode() == HloOpcode::kAnd) {
return false;
}
if (ins->operand_count() == 3) {
return false;
}
return true;
}
void FillAllStrategiesForArray(
const HloInstruction* ins, const Shape& shape,
const ClusterEnvironment& cluster_env, const StrategyMap& strategy_map,
const AutoShardingOption& option, const double replicated_penalty,
const CallGraph& call_graph, const bool only_allow_divisible,
const bool create_replicated_strategies,
const bool create_partially_replicated_strategies,
StrategyGroup& strategy_group) {
if (create_partially_replicated_strategies || cluster_env.IsDeviceMesh1D()) {
EnumerateAll1DPartition(
ins, shape, cluster_env.device_mesh_, cluster_env, strategy_map,
only_allow_divisible,
option.allow_shardings_small_dims_across_many_devices, "", call_graph,
strategy_group);
}
if (cluster_env.IsDeviceMesh2D()) {
EnumerateAllPartition(ins, shape, cluster_env.device_mesh_, cluster_env,
strategy_map, only_allow_divisible,
option.allow_shardings_small_dims_across_many_devices,
call_graph, 2, {},
strategy_group);
}
if (cluster_env.IsDeviceMesh3D()) {
EnumerateAllPartition(ins, shape, cluster_env.device_mesh_, cluster_env,
strategy_map, only_allow_divisible,
option.allow_shardings_small_dims_across_many_devices,
call_graph,
3, {}, strategy_group);
}
if (option.allow_mixed_mesh_shape && cluster_env.IsDeviceMesh2D()) {
for (size_t i = 0; i < strategy_group.GetStrategies().size(); ++i) {
strategy_group.GetStrategy(i).compute_cost += replicated_penalty * 0.8;
}
EnumerateAll1DPartition(
ins, shape, cluster_env.device_mesh_1d_, cluster_env, strategy_map,
only_allow_divisible,
option.allow_shardings_small_dims_across_many_devices, " 1d",
call_graph, strategy_group);
}
if (create_replicated_strategies || strategy_group.GetStrategies().empty()) {
AddReplicatedStrategy(ins, shape, cluster_env, strategy_map,
replicated_penalty, {}, strategy_group);
}
}
absl::StatusOr<std::unique_ptr<StrategyGroup>> CreateAllStrategiesGroup(
const HloInstruction* ins, const Shape& shape, const size_t instruction_id,
StrategyGroups& strategy_groups, const ClusterEnvironment& cluster_env,
const StrategyMap& strategy_map, const AutoShardingOption& option,
const double replicated_penalty, const CallGraph& call_graph,
const bool only_allow_divisible, const bool create_replicated_strategies,
const bool create_partially_replicated_strategies) {
std::unique_ptr<StrategyGroup> strategy_group;
if (shape.IsTuple()) {
strategy_group = CreateTupleStrategyGroup(instruction_id);
for (size_t i = 0; i < shape.tuple_shapes_size(); ++i) {
auto child_strategies =
CreateAllStrategiesGroup(
ins, shape.tuple_shapes(i), instruction_id, strategy_groups,
cluster_env, strategy_map, option, replicated_penalty, call_graph,
only_allow_divisible, create_replicated_strategies,
create_partially_replicated_strategies)
.value();
child_strategies->tuple_element_idx = i;
strategy_group->AddChild(std::move(child_strategies));
}
} else if (shape.IsArray()) {
strategy_group = CreateLeafStrategyGroup(instruction_id, ins, strategy_map,
strategy_groups);
FillAllStrategiesForArray(
ins, shape, cluster_env, strategy_map, option, replicated_penalty,
call_graph, only_allow_divisible, create_replicated_strategies,
create_partially_replicated_strategies, *strategy_group);
} else if (shape.IsToken()) {
strategy_group = CreateLeafStrategyGroup(instruction_id, ins, strategy_map,
strategy_groups);
AddReplicatedStrategy(ins, shape, cluster_env, strategy_map,
replicated_penalty, {}, *strategy_group);
} else {
LOG(FATAL) << "Unsupported instruction shape: " << shape.DebugString();
}
return strategy_group;
}
bool ShardingIsConsistent(const HloSharding& partial_sharding,
const HloSharding& complete_sharding, bool strict) {
if (partial_sharding.tile_assignment().num_dimensions() >
complete_sharding.tile_assignment().num_dimensions()) {
return false;
}
for (size_t i = 0; i < partial_sharding.tile_assignment().num_dimensions();
++i) {
if (strict && partial_sharding.tile_assignment().dim(i) > 1 &&
partial_sharding.tile_assignment().dim(i) ==
complete_sharding.tile_assignment().dim(i)) {
return true;
}
if (!strict && partial_sharding.tile_assignment().dim(i) > 1 &&
complete_sharding.tile_assignment().dim(i) > 1) {
return true;
}
}
return false;
}
void TrimOrGenerateStrategiesBasedOnExistingSharding(
const Shape& output_shape, const StrategyMap& strategy_map,
const std::vector<HloInstruction*>& instructions,
const HloSharding& existing_sharding, const ClusterEnvironment& cluster_env,
StableMap<int64_t, std::vector<ShardingStrategy>>& pretrimmed_strategy_map,
const CallGraph& call_graph, const bool strict,
StrategyGroup& strategy_group) {
if (strategy_group.is_tuple) {
for (size_t i = 0; i < strategy_group.GetChildren().size(); ++i) {
TrimOrGenerateStrategiesBasedOnExistingSharding(
output_shape.tuple_shapes(i), strategy_map, instructions,
existing_sharding.tuple_elements().at(i), cluster_env,
pretrimmed_strategy_map, call_graph, strict,
strategy_group.GetChild(i));
}
} else {
if (existing_sharding.IsUnknown()) {
return;
}
if (spmd::ShardingIsComplete(existing_sharding,
cluster_env.device_mesh_.num_elements())) {
strategy_group.following = nullptr;
std::vector<std::pair<ShardingStrategy, InputShardings>> new_strategies;
const auto& strategy_input_shardings =
strategy_group.GetStrategyInputShardings();
for (size_t iid = 0; iid < strategy_input_shardings.size(); ++iid) {
const InputShardings& input_shardings = strategy_input_shardings[iid];
const ShardingStrategy& strategy =
strategy_group.GetStrategyForInputShardings(iid);
if (strategy.output_sharding == existing_sharding) {
VLOG(1) << "Keeping strategy: " << strategy.ToString();
new_strategies.push_back({strategy, input_shardings});
}
}
if (!new_strategies.empty()) {
pretrimmed_strategy_map[strategy_group.node_idx] =
strategy_group.GetStrategies();
strategy_group.ClearStrategies();
for (const auto& [strategy, input_shardings] : new_strategies) {
strategy_group.AddStrategy(strategy, input_shardings);
}
} else {
VLOG(1) << "Generate a new strategy based on user sharding.";
std::string name = ToStringSimple(existing_sharding);
ReshardingCosts communication_resharding_costs;
ReshardingCosts memory_resharding_costs;
InputShardings input_shardings = {name};
if (!strategy_group.in_nodes.empty()) {
HloInstruction* ins = instructions.at(strategy_group.instruction_id);
for (size_t i = 0; i < strategy_group.in_nodes.size(); i++) {
HloInstruction* operand =
instructions.at(strategy_group.in_nodes.at(i)->instruction_id);
std::optional<HloSharding> input_sharding =
ShardingPropagation::GetShardingFromUser(
*operand, *ins, 10, true, call_graph,
nullptr);
StrategyGroup* operand_strategy_group =
strategy_map.at(operand).get();
Shape operand_shape = operand->shape();
if (ins->opcode() == HloOpcode::kGetTupleElement) {
if (input_sharding && input_sharding->IsTuple()) {
input_sharding = input_sharding->GetSubSharding(
operand->shape(), {ins->tuple_index()});
}
operand_strategy_group =
&operand_strategy_group->GetChild(ins->tuple_index());
operand_shape = operand->shape().tuple_shapes(ins->tuple_index());
}
if (!input_sharding) {
if (existing_sharding.Validate(operand_shape).ok()) {
input_sharding = existing_sharding;
} else {
input_sharding = HloSharding::Replicate();
}
}
CHECK(input_sharding.has_value());
input_shardings.shardings.push_back(*input_sharding);
communication_resharding_costs.push_back(
CommunicationReshardingCostVector(
*operand_strategy_group, operand_shape, *input_sharding,
cluster_env));
memory_resharding_costs.push_back(MemoryReshardingCostVector(
*operand_strategy_group, operand_shape, *input_sharding,
cluster_env));
}
}
double memory_cost =
ByteSizeOfShapeWithSharding(output_shape, existing_sharding);
if (!strategy_group.GetStrategies().empty()) {
pretrimmed_strategy_map[strategy_group.node_idx] =
strategy_group.GetStrategies();
}
strategy_group.ClearStrategies();
strategy_group.AddStrategy(
ShardingStrategy({existing_sharding, 0, 0, memory_cost,
communication_resharding_costs,
memory_resharding_costs}),
input_shardings);
}
if (strategy_group.GetStrategies().size() == 1) {
for (auto& operand_communication_resharding_costs :
strategy_group.GetStrategy(0).communication_resharding_costs) {
if (operand_communication_resharding_costs.size() == 1 &&
operand_communication_resharding_costs[0] >= kInfinityCost) {
operand_communication_resharding_costs[0] = 0;
}
}
}
} else if (!strategy_group.following) {
std::vector<std::pair<ShardingStrategy, InputShardings>> new_vector;
const auto& strategy_input_shardings =
strategy_group.GetStrategyInputShardings();
for (size_t iid = 0; iid < strategy_input_shardings.size(); ++iid) {
const InputShardings& input_shardings = strategy_input_shardings[iid];
const ShardingStrategy& strategy =
strategy_group.GetStrategyForInputShardings(iid);
if (strategy.output_sharding.IsReplicated() ||
ShardingIsConsistent(existing_sharding, strategy.output_sharding,
strict) ||
(VectorGreaterThanOneElementCount(
strategy.output_sharding.tile_assignment().dimensions()) ==
1 &&
spmd::ShardingIsComplete(
strategy.output_sharding,
cluster_env.original_device_mesh_.num_elements()))) {
new_vector.push_back({strategy, input_shardings});
}
}
if (!new_vector.empty() &&
new_vector.size() != strategy_group.GetStrategies().size()) {
strategy_group.following = nullptr;
strategy_group.ClearStrategies();
for (const auto& [strategy, input_shardings] : new_vector) {
strategy_group.AddStrategy(strategy, input_shardings);
}
}
}
}
}
void CheckMemoryCosts(const StrategyGroup& strategy_group, const Shape& shape) {
if (strategy_group.is_tuple) {
for (size_t i = 0; i < strategy_group.GetChildren().size(); i++) {
CheckMemoryCosts(*strategy_group.GetChildren()[i],
shape.tuple_shapes().at(i));
}
} else {
double full_mem = 0.0;
for (const ShardingStrategy& strategy : strategy_group.GetStrategies()) {
if (strategy.output_sharding.IsReplicated()) {
full_mem = strategy.memory_cost;
size_t size = ByteSizeOfShape(shape);
CHECK_EQ(strategy.memory_cost, size);
}
}
for (const ShardingStrategy& strategy : strategy_group.GetStrategies()) {
if (!strategy.output_sharding.IsReplicated() && full_mem > 0.0) {
CHECK_GE(strategy.memory_cost * strategy.output_sharding.NumTiles(),
full_mem);
}
}
}
}
void RemoveShardingsWhereSmallDimsShardedAcrossManyDevices(
const Shape& shape, const bool instruction_has_user_sharding,
StrategyGroup& strategy_group) {
if (strategy_group.is_tuple) {
const auto& children = strategy_group.GetChildren();
for (size_t i = 0; i < children.size(); i++) {
RemoveShardingsWhereSmallDimsShardedAcrossManyDevices(
shape.tuple_shapes().at(i), instruction_has_user_sharding,
*children[i]);
}
return;
}
if (instruction_has_user_sharding &&
strategy_group.GetStrategies().size() == 1) {
return;
}
std::vector<int> invalid_strategy_indices;
for (size_t sid = 0; sid < strategy_group.GetStrategies().size(); ++sid) {
const ShardingStrategy& strategy = strategy_group.GetStrategy(sid);
if (strategy.output_sharding.IsReplicated()) {
continue;
}
const auto& tile_assignment = strategy.output_sharding.tile_assignment();
for (int64_t i = 0; i < shape.rank(); ++i) {
if (tile_assignment.dim(i) > 1 &&
tile_assignment.dim(i) > shape.dimensions(i)) {
invalid_strategy_indices.push_back(sid);
break;
}
}
}
if (invalid_strategy_indices.size() < strategy_group.GetStrategies().size()) {
for (size_t sid : invalid_strategy_indices) {
ShardingStrategy& strategy = strategy_group.GetStrategy(sid);
VLOG(1) << "Removing invalid strategy: " << strategy.ToString();
strategy.compute_cost = kInfinityCost;
}
}
}
void ScaleCostsWithExecutionCounts(const int64_t execution_count,
StrategyGroup& strategy_group) {
if (strategy_group.is_tuple) {
for (const auto& child : strategy_group.GetChildren()) {
ScaleCostsWithExecutionCounts(execution_count, *child);
}
} else {
for (size_t sid = 0; sid < strategy_group.GetStrategies().size(); ++sid) {
ShardingStrategy& strategy = strategy_group.GetStrategy(sid);
strategy.compute_cost *= execution_count;
strategy.communication_cost *= execution_count;
for (auto i = 0; i < strategy.communication_resharding_costs.size();
++i) {
for (auto j = 0; j < strategy.communication_resharding_costs[i].size();
++j) {
strategy.communication_resharding_costs[i][j] *= execution_count;
}
}
}
}
}
std::unique_ptr<StrategyGroup> CreateElementwiseOperatorStrategies(
const size_t instruction_id, const HloInstruction* ins,
const StrategyMap& strategy_map, const ClusterEnvironment& cluster_env,
const InstructionDepthMap& depth_map, const AliasMap& alias_map,
const StableMap<int64_t, std::vector<ShardingStrategy>>&
pretrimmed_strategy_map,
const int64_t max_depth, StrategyGroups& strategy_groups,
AssociativeDotPairs& associative_dot_pairs) {
std::unique_ptr<StrategyGroup> strategy_group = CreateLeafStrategyGroup(
instruction_id, ins, strategy_map, strategy_groups);
int64_t follow_idx;
bool tie;
std::tie(follow_idx, tie) =
ChooseOperandToFollow(strategy_map, depth_map, alias_map, max_depth, ins);
if (!tie || AllowTieFollowing(ins)) {
strategy_group->following = strategy_map.at(ins->operand(follow_idx)).get();
} else {
strategy_group->following = nullptr;
}
for (int64_t i = 0; i < ins->operand_count(); ++i) {
if (strategy_group->following != nullptr && i != follow_idx) {
continue;
}
StrategyGroup* src_strategy_group = strategy_map.at(ins->operand(i)).get();
CHECK(!src_strategy_group->is_tuple);
FollowArrayOrTokenStrategyGroup(*src_strategy_group, ins->shape(),
instruction_id, cluster_env,
pretrimmed_strategy_map, *strategy_group);
}
if (ins->opcode() == HloOpcode::kAdd) {
if (ins->operand(0)->opcode() == HloOpcode::kDot &&
ins->operand(1)->opcode() == HloOpcode::kDot) {
associative_dot_pairs.push_back({strategy_map.at(ins->operand(0)).get(),
strategy_map.at(ins->operand(1)).get()});
}
}
return strategy_group;
}
std::unique_ptr<StrategyGroup> HandleManuallyShardedInstruction(
const HloInstruction* ins, const Shape& shape, const size_t instruction_id,
StrategyGroups& strategy_groups, StrategyMap& strategy_map) {
std::unique_ptr<StrategyGroup> strategy_group;
if (shape.IsTuple()) {
strategy_group = CreateTupleStrategyGroup(instruction_id);
for (size_t i = 0; i < shape.tuple_shapes_size(); ++i) {
std::unique_ptr<StrategyGroup> child_strategies =
HandleManuallyShardedInstruction(ins, shape.tuple_shapes(i),
instruction_id, strategy_groups,
strategy_map);
child_strategies->tuple_element_idx = i;
strategy_group->AddChild(std::move(child_strategies));
}
} else if (shape.IsToken() || shape.IsArray()) {
strategy_group = CreateLeafStrategyGroup(instruction_id, ins, strategy_map,
strategy_groups);
ReshardingCosts communication_resharding_costs;
ReshardingCosts memory_resharding_costs;
InputShardings input_shardings = {"MANUAL"};
if (ins->operand_count() > 0 && ins->operand(0)->shape().IsTuple()) {
CHECK_EQ(ins->operand_count(), 1)
<< "Do not support instructions with more than one tuple "
"operand. If this CHECK fails, we will need to fix "
"b/233412625.";
std::tie(communication_resharding_costs, memory_resharding_costs,
input_shardings) =
ReshardingCostsForTupleOperand(ins->operand(0),
*strategy_map.at(ins->operand(0)));
} else {
for (int64_t k = 0; k < ins->operand_count(); ++k) {
const HloInstruction* operand = ins->operand(k);
const StrategyGroup& operand_strategy_group = *strategy_map.at(operand);
const auto& strategies = operand_strategy_group.GetStrategies();
const std::vector<double> zeros(strategies.size(), 0);
communication_resharding_costs.push_back(zeros);
memory_resharding_costs.push_back(zeros);
}
}
strategy_group->AddStrategy(
ShardingStrategy({HloSharding::Replicate(), 0, 0,
static_cast<double>(ShapeUtil::ByteSizeOf(shape)),
std::move(communication_resharding_costs),
std::move(memory_resharding_costs)}),
std::move(input_shardings));
} else {
LOG(FATAL) << "Unsupported instruction shape: " << shape.DebugString();
}
return strategy_group;
}
std::unique_ptr<StrategyGroup> CreateReshapeStrategies(
const size_t instruction_id, const HloInstruction* ins,
const StrategyMap& strategy_map, const ClusterEnvironment& cluster_env,
const bool only_allow_divisible, const double replicated_penalty,
const AutoShardingOption& option, StrategyGroups& strategy_groups,
const CallGraph& call_graph) {
std::unique_ptr<StrategyGroup> strategy_group = CreateLeafStrategyGroup(
instruction_id, ins, strategy_map, strategy_groups);
const HloInstruction* operand = ins->operand(0);
const StrategyGroup& operand_strategy_group = *strategy_map.at(operand);
CHECK(!operand_strategy_group.is_tuple);
for (const ShardingStrategy& operand_strategy :
operand_strategy_group.GetStrategies()) {
std::optional<HloSharding> output_sharding =
hlo_sharding_util::ReshapeSharding(operand->shape(), ins->shape(),
operand_strategy.output_sharding);
if (!output_sharding.has_value() ||
!IsValidTileAssignment(*output_sharding) ||
!TileAssignmentMatchesMesh(*output_sharding,
cluster_env.device_mesh_)) {
continue;
}
const std::string name = ToStringSimple(*output_sharding);
double compute_cost = 0, communication_cost = 0;
double memory_cost =
ByteSizeOfShapeWithSharding(ins->shape(), output_sharding);
std::vector<double> communication_resharding_costs =
CommunicationReshardingCostVector(
operand_strategy_group, operand->shape(),
operand_strategy.output_sharding, cluster_env);
std::vector<double> memory_resharding_costs = MemoryReshardingCostVector(
operand_strategy_group, operand->shape(),
operand_strategy.output_sharding, cluster_env);
strategy_group->AddStrategy(
ShardingStrategy({*output_sharding,
compute_cost,
communication_cost,
memory_cost,
{communication_resharding_costs},
{memory_resharding_costs}}),
{name, {operand_strategy.output_sharding}});
}
if (strategy_group->GetStrategies().empty()) {
VLOG(2) << "Enumerating all strategies for reshape";
FillAllStrategiesForArray(
ins, ins->shape(), cluster_env, strategy_map, option,
replicated_penalty, call_graph, only_allow_divisible,
true,
true, *strategy_group);
}
return strategy_group;
}
absl::StatusOr<AutoShardingSolverOutput>
CreateAutoShardingSolverRequestAndCallSolver(
const HloModule& hlo_module, const HloLiveRange& hlo_live_range,
const StrategyMap& strategy_map, const StrategyGroups& strategy_groups,
const CostGraph& cost_graph, const AliasSet& alias_set,
const std::vector<std::pair<LivenessIdx, LivenessIdx>>& node_intervals,
const std::vector<std::pair<LivenessIdx, LivenessIdx>>& edge_intervals,
const std::vector<absl::btree_set<int64_t>>& node_groups,
const std::vector<absl::btree_set<int64_t>>& edge_groups,
const std::vector<NodeStrategyIdx>& s_hint, const bool compute_iis,
const int64_t solver_timeout_in_seconds, const AutoShardingOption& option,
std::optional<double> max_cost, absl::string_view request_name,
const absl::flat_hash_map<std::string, HloSharding>&
sharding_propagation_solution,
bool deterministic_mode) {
AutoShardingSolverRequest request;
request.set_module_name(hlo_module.name());
request.set_num_nodes(strategy_groups.size());
request.set_memory_budget(option.memory_budget_per_device);
request.mutable_s_len()->Add(cost_graph.node_lens_.begin(),
cost_graph.node_lens_.end());
request.mutable_s_follow()->Add(cost_graph.follow_idx_.begin(),
cost_graph.follow_idx_.end());
request.mutable_s_hint()->Add(s_hint.begin(), s_hint.end());
request.mutable_solver_timeout()->set_solver_timeout_in_seconds(
solver_timeout_in_seconds);
if (option.memory_overbudget_coeff >= 0.0) {
request.mutable_overbudget_coeff()->set_coeff(
option.memory_overbudget_coeff);
}
request.set_crash_at_infinity_costs_check(!option.try_multiple_mesh_shapes);
request.set_compute_iis(compute_iis);
request.set_saltiplier(kSaltiplier);
request.set_deterministic_mode(deterministic_mode);
request.set_request_name(std::string(request_name));
request.set_enable_memory_edge_costs(option.model_resharding_memory_costs);
request.set_enable_output(
option.preserve_shardings ==
AutoShardingOption::PreserveShardingsType::kRemoveAllShardings);
if (max_cost) {
request.mutable_max_cost()->set_coeff(*max_cost);
}
for (const auto& [edge, edge_cost] : cost_graph.edge_costs_) {
const auto normalized_edge_cost = Normalize(edge_cost);
AutoShardingSolverRequest_Pair raw_edge;
raw_edge.set_first(edge.first);
raw_edge.set_second(edge.second);
*request.add_edges() = raw_edge;
AutoShardingSolverRequest_Costs rij;
AutoShardingSolverRequest_Costs mij;
for (NodeStrategyIdx i = 0; i < edge_cost.n_; i++) {
for (NodeStrategyIdx j = 0; j < edge_cost.m_; j++) {
rij.add_costs(normalized_edge_cost(i, j).communication_cost);
mij.add_costs(normalized_edge_cost(i, j).memory_cost);
}
}
request.mutable_resharding_costs()->Add(std::move(rij));
request.mutable_memory_edge_costs()->Add(std::move(mij));
}
const HloInstructionSequence& sequence =
hlo_live_range.flattened_instruction_sequence();
const std::vector<HloInstruction*>& instructions = sequence.instructions();
int num_nodes_without_default = 0;
for (NodeIdx node_idx = 0; node_idx < request.num_nodes(); ++node_idx) {
const StrategyGroup* strategy_group = strategy_groups[node_idx];
const auto instruction = instructions.at(strategy_group->instruction_id);
const auto instruction_name = instruction->name();
const auto opcode = HloOpcodeString(instruction->opcode());
request.add_instruction_names(
absl::StrCat(instruction_name, " (id: ", node_idx, ")"));
request.add_opcodes(std::string(opcode));
request.add_metadata_source_files(instruction->metadata().source_file());
AutoShardingSolverRequest_Costs ci, di, mi, pi;
AutoShardingSolverRequest_Names strategy_names;
std::optional<HloSharding> default_strategy;
auto iter = sharding_propagation_solution.find(instruction_name);
if (iter != sharding_propagation_solution.end()) {
default_strategy = iter->second;
if (strategy_group->tuple_element_idx) {
const auto& tuple_elements = iter->second.tuple_elements();
CHECK_LT(*strategy_group->tuple_element_idx, tuple_elements.size());
default_strategy =
tuple_elements.at(*strategy_group->tuple_element_idx);
}
}
for (auto j = 0; j < strategy_group->GetStrategies().size(); ++j) {
const ShardingStrategy& strategy = strategy_group->GetStrategies()[j];
const HloSharding& sharding = strategy.output_sharding;
ci.add_costs(strategy.compute_cost);
di.add_costs(strategy.communication_cost +
cost_graph.extra_node_costs_[node_idx][j]);
mi.add_costs(strategy.memory_cost);
pi.add_costs(default_strategy && sharding == *default_strategy ? 0 : 1);
strategy_names.add_names(sharding.ToString());
}
if (option.use_sharding_propagation_for_default_shardings &&
*std::min_element(pi.costs().begin(), pi.costs().end()) > 0) {
LOG(WARNING) << "No default strategy for {node_idx " << node_idx
<< ", instruction ID " << strategy_group->instruction_id
<< ", instruction name " << instruction_name << "}";
++num_nodes_without_default;
}
request.mutable_computation_costs()->Add(std::move(ci));
request.mutable_communication_costs()->Add(std::move(di));
request.mutable_memory_costs()->Add(std::move(mi));
request.mutable_departure_costs()->Add(std::move(pi));
request.mutable_strategy_names()->Add(std::move(strategy_names));
}
LOG(INFO) << "Total nodes without default: " << num_nodes_without_default;
std::vector<std::pair<NodeIdx, NodeIdx>> new_followers;
for (const auto& pair : alias_set) {
const StrategyGroup* src_strategy_group = strategy_groups[pair.first];
const StrategyGroup* dst_strategy_group = strategy_groups[pair.second];
const auto& src_strategies = src_strategy_group->GetStrategies();
const auto& dst_strategies = dst_strategy_group->GetStrategies();
Matrix<double> raw_cost(src_strategies.size(), dst_strategies.size());
for (NodeStrategyIdx i = 0; i < src_strategies.size(); ++i) {
for (NodeStrategyIdx j = 0; j < dst_strategies.size(); ++j) {
if (src_strategies[i].output_sharding ==
dst_strategies[j].output_sharding) {
raw_cost(i, j) = 0.0;
} else {
raw_cost(i, j) = 1.0;
}
}
}
NodeIdx idx_a = pair.first;
NodeIdx idx_b = pair.second;
std::vector<NodeStrategyIdx> row_indices;
std::vector<NodeStrategyIdx> col_indices;
if (request.s_follow(idx_a) >= 0) {
row_indices = cost_graph.reindexing_vector_.at(idx_a);
idx_a = request.s_follow(idx_a);
} else {
row_indices.assign(request.s_len(idx_a), 0);
std::iota(row_indices.begin(), row_indices.end(), 0);
}
if (request.s_follow(idx_b) >= 0) {
col_indices = cost_graph.reindexing_vector_.at(idx_b);
idx_b = request.s_follow(idx_b);
} else {
col_indices.assign(request.s_len(idx_b), 0);
std::iota(col_indices.begin(), col_indices.end(), 0);
}
CHECK_EQ(request.s_len(idx_a), row_indices.size());
CHECK_EQ(request.s_len(idx_b), col_indices.size());
AutoShardingSolverRequest_Costs vij;
for (NodeStrategyIdx i : row_indices) {
for (NodeStrategyIdx j : col_indices) {
vij.add_costs(raw_cost(i, j));
}
}
bool convertible = (row_indices.size() == col_indices.size());
for (NodeStrategyIdx i = 0; i < row_indices.size() && convertible; ++i) {
if (vij.costs(i * col_indices.size() + i) != 0.0) convertible = false;
}
if (convertible && option.allow_alias_to_follower_conversion) {
new_followers.push_back({idx_a, idx_b});
} else {
AutoShardingSolverRequest_Pair alias;
alias.set_first(idx_a);
alias.set_second(idx_b);
*request.add_aliases() = alias;
request.mutable_value_costs()->Add(std::move(vij));
}
}
auto s_follow = request.mutable_s_follow();
for (auto [follower, followee] : new_followers) {
while (s_follow->at(follower) >= 0) follower = s_follow->at(follower);
while (s_follow->at(followee) >= 0) followee = s_follow->at(followee);
if (follower != followee) s_follow->Set(follower, followee);
}
for (NodeIdx node_idx = 0; node_idx < request.num_nodes(); ++node_idx) {
if (s_follow->at(node_idx) < 0) continue;
while (s_follow->at(s_follow->at(node_idx)) >= 0) {
s_follow->Set(node_idx, s_follow->at(s_follow->at(node_idx)));
}
}
for (const auto& interval : node_intervals) {
AutoShardingSolverRequest_Pair pair;
pair.set_first(interval.first);
pair.set_second(interval.second);
*request.add_node_intervals() = std::move(pair);
}
for (const auto& interval : edge_intervals) {
AutoShardingSolverRequest_Pair pair;
pair.set_first(interval.first);
pair.set_second(interval.second);
*request.add_edge_intervals() = std::move(pair);
}
for (const auto& reduced_group : node_groups) {
AutoShardingSolverRequest_Group group;
group.mutable_prims()->Add(reduced_group.begin(), reduced_group.end());
*request.add_node_groups() = std::move(group);
}
for (const auto& reduced_group : edge_groups) {
AutoShardingSolverRequest_Group group;
group.mutable_prims()->Add(reduced_group.begin(), reduced_group.end());
*request.add_edge_groups() = std::move(group);
}
PopulateTemporalValues(cost_graph, request);
return FormulateAndSolveMIPFromSolverRequest(request);
}
void CheckHloSharding(
const HloInstructionSequence& sequence,
const absl::flat_hash_set<const HloInstruction*>& instructions_to_shard,
const size_t total_num_devices) {
const std::vector<HloInstruction*>& instructions = sequence.instructions();
std::vector<std::pair<size_t, std::string>> size_string;
for (const HloInstruction* ins : instructions) {
if (!instructions_to_shard.contains(ins) || !ins->has_sharding()) {
continue;
}
if (!ins->shape().IsTuple() &&
ins->opcode() != HloOpcode::kGetTupleElement) {
double size = ByteSizeOfShape(ins->shape()) / 1024 / 1024 / 1024;
if ((!spmd::ShardingIsComplete(ins->sharding(), total_num_devices) ||
ins->sharding().IsReplicated()) &&
size > 1) {
LOG(INFO) << "Instruction is not fully sharded: (" << size << " GB) "
<< ins->ToString();
} else if (!ins->has_sharding()) {
LOG(INFO) << "Instruction does not have sharding: " << ins->name();
}
for (const auto& op : ins->operands()) {
if (op->has_sharding()) {
if (op->sharding().IsReplicated() || ins->sharding().IsReplicated()) {
continue;
}
const std::vector<int64_t> ins_sharded_dims =
VectorGreaterThanOneElementIndices(
ins->sharding().tile_assignment().dimensions(),
ins->sharding().ReplicateOnLastTileDim());
const std::vector<int64_t> op_sharded_dims =
VectorGreaterThanOneElementIndices(
op->sharding().tile_assignment().dimensions(),
op->sharding().ReplicateOnLastTileDim());
bool not_consistent = false;
if (ins_sharded_dims.size() != op_sharded_dims.size()) {
not_consistent = true;
} else {
for (size_t i = 0; i < ins_sharded_dims.size(); i++) {
if (op->shape().dimensions().at(op_sharded_dims.at(i)) !=
ins->shape().dimensions().at(ins_sharded_dims.at(i))) {
not_consistent = true;
}
}
}
if (not_consistent) {
size_t op_size =
ByteSizeOfShape(op->shape()) / (1024.0 * 1024 * 1024);
std::string str = absl::StrCat("Shardings not consistent (op size ",
op_size, " GB):", ins->ToString(),
"\n Operand: ", op->ToString());
size_string.push_back({op_size, std::move(str)});
}
} else {
LOG(INFO) << "Instruction " << op->name()
<< " does not have sharding.";
}
}
}
}
struct {
bool operator()(const std::pair<size_t, std::string>& a,
const std::pair<size_t, std::string>& b) const {
return a.first > b.first;
}
} MemLarger;
std::sort(size_string.begin(), size_string.end(), MemLarger);
size_t k = 10;
k = std::min(k, size_string.size());
for (size_t t = 0; t < k; ++t) {
LOG(INFO) << size_string.at(t).second;
}
}
void SetHloSharding(
const HloInstructionSequence& sequence,
const absl::flat_hash_set<const HloInstruction*>& instructions_to_shard,
const StrategyMap& strategy_map, const CostGraph& cost_graph,
absl::Span<const NodeStrategyIdx> s_val, bool last_iteration) {
if (!last_iteration) {
LOG(INFO) << "Skip setting shardings (since not the last iteration)";
}
const std::vector<HloInstruction*>& instructions = sequence.instructions();
for (HloInstruction* inst : instructions) {
if (!instructions_to_shard.contains(inst)) {
continue;
}
if (inst->opcode() == HloOpcode::kOutfeed ||
inst->opcode() == HloOpcode::kRecv ||
inst->opcode() == HloOpcode::kRecvDone ||
inst->opcode() == HloOpcode::kSend ||
inst->opcode() == HloOpcode::kSendDone) {
continue;
}
auto iter = strategy_map.find(inst);
if (iter == strategy_map.end()) {
continue;
}
const StrategyGroup* strategy_group = iter->second.get();
if (strategy_group->is_tuple) {
const Shape& out_shape = inst->shape();
ShapeTree<HloSharding> output_tuple_sharding(out_shape, Undefined());
std::vector<HloSharding> output_flattened_shardings;
std::function<void(const StrategyGroup*)> extract_tuple_shardings;
bool set_tuple_sharding = true;
extract_tuple_shardings = [&](const StrategyGroup* strategy_group) {
if (strategy_group->is_tuple) {
for (const auto& child_strategies : strategy_group->GetChildren()) {
extract_tuple_shardings(child_strategies.get());
}
} else {
NodeIdx node_idx = strategy_group->node_idx;
NodeStrategyIdx stra_idx = s_val[node_idx];
const auto& strategy = strategy_group->GetStrategies()[stra_idx];
if (strategy.output_sharding.IsReplicated() && !last_iteration) {
set_tuple_sharding = false;
}
output_flattened_shardings.push_back(strategy.output_sharding);
}
};
extract_tuple_shardings(strategy_group);
int i = 0;
for (auto& leaf : output_tuple_sharding.leaves()) {
leaf.second = output_flattened_shardings[i++];
}
if (set_tuple_sharding) {
inst->set_sharding(HloSharding::Tuple(output_tuple_sharding));
}
} else {
const HloSharding& sharding_spec =
GetShardingStrategy(inst, strategy_map, cost_graph, s_val)
.output_sharding;
if (IsUndefined(sharding_spec)) {
continue;
}
if (sharding_spec.IsReplicated() && !last_iteration) {
VLOG(5) << "skip setting shardings for inst " << inst->name();
} else {
inst->set_sharding(sharding_spec);
}
}
}
}
absl::Status InsertReshardReshapes(
const HloInstructionSequence& sequence,
const absl::flat_hash_set<const HloInstruction*>& instructions_to_shard,
const StrategyMap& strategy_map, const CostGraph& cost_graph,
absl::Span<const NodeStrategyIdx> s_val,
const ClusterEnvironment& cluster_env, bool crash_at_error,
bool insert_resharding_reshapes_for_non_dot_ops,
absl::flat_hash_map<std::string, std::vector<HloSharding>>&
preserve_shardings) {
const std::vector<HloInstruction*>& instructions = sequence.instructions();
const DeviceMesh& device_mesh = cluster_env.device_mesh_;
ReshardingCache resharding_cache_entity;
ReshardingCache* resharding_cache = &resharding_cache_entity;
for (HloInstruction* inst : instructions) {
if (!instructions_to_shard.contains(inst) ||
spmd::IsSPMDShardToFullShapeCustomCall(inst)) {
continue;
}
if (inst->opcode() == HloOpcode::kDot ||
inst->opcode() == HloOpcode::kConvolution) {
const HloInstruction* lhs = inst->operand(0);
const HloInstruction* rhs = inst->operand(1);
const HloSharding& lhs_sharding = lhs->sharding();
const HloSharding& rhs_sharding = rhs->sharding();
std::vector<int64_t> lhs_con_dims;
std::vector<int64_t> rhs_con_dims;
if (inst->opcode() == HloOpcode::kDot) {
const DotDimensionNumbers& dot_dnums = inst->dot_dimension_numbers();
lhs_con_dims.push_back(dot_dnums.lhs_contracting_dimensions()[0]);
rhs_con_dims.push_back(dot_dnums.rhs_contracting_dimensions()[0]);
} else {
const ConvolutionDimensionNumbers& conv_dnums =
inst->convolution_dimension_numbers();
lhs_con_dims.push_back(conv_dnums.input_feature_dimension());
rhs_con_dims.push_back(conv_dnums.kernel_input_feature_dimension());
}
const std::vector<int64_t>& lhs_tensor_dim_to_mesh_dim =
cluster_env.GetTensorDimToMeshDimWrapper(
lhs->shape(), lhs_sharding,
true, crash_at_error);
const std::vector<int64_t>& rhs_tensor_dim_to_mesh_dim =
cluster_env.GetTensorDimToMeshDimWrapper(
rhs->shape(), rhs_sharding,
true, crash_at_error);
if (lhs_tensor_dim_to_mesh_dim.size() != lhs->shape().rank() ||
rhs_tensor_dim_to_mesh_dim.size() != rhs->shape().rank()) {
return absl::InvalidArgumentError(
"Cannot generate tensor dim to mesh dim mapping");
}
const InputShardings& input_shardings =
GetInputShardings(inst, strategy_map, cost_graph, s_val);
if (absl::StrContains(input_shardings.name, "allreduce") &&
std::any_of(lhs_con_dims.begin(), lhs_con_dims.end(),
[&lhs_tensor_dim_to_mesh_dim](int64_t dim) {
return lhs_tensor_dim_to_mesh_dim[dim] == -1;
}) &&
std::any_of(rhs_con_dims.begin(), rhs_con_dims.end(),
[&rhs_tensor_dim_to_mesh_dim](int64_t dim) {
return rhs_tensor_dim_to_mesh_dim[dim] == -1;
})) {
} else {
CHECK(input_shardings.shardings.size() == 2)
<< "Dot op requires both operands to have input shardings, "
"but get instruction: "
<< inst->ToString()
<< ", input shardings : " << input_shardings.ToString();
if (input_shardings.shardings[0].has_value()) {
TF_RETURN_IF_ERROR(FixMixedMeshShapeResharding(
inst, 0, *input_shardings.shardings[0], device_mesh,
resharding_cache));
}
if (input_shardings.shardings[1].has_value()) {
TF_RETURN_IF_ERROR(FixMixedMeshShapeResharding(
inst, 1, *input_shardings.shardings[1], device_mesh,
resharding_cache));
}
}
}
if (!insert_resharding_reshapes_for_non_dot_ops) {
continue;
}
if (inst->opcode() == HloOpcode::kOutfeed ||
inst->opcode() == HloOpcode::kSendDone ||
inst->opcode() == HloOpcode::kSend ||
inst->opcode() == HloOpcode::kRecv ||
inst->opcode() == HloOpcode::kRecvDone) {
} else {
if (inst->shape().IsTuple()) {
if (absl::c_any_of(
inst->shape().tuple_shapes(),
[](const Shape& shape) { return shape.IsTuple(); })) {
continue;
}
switch (inst->opcode()) {
case HloOpcode::kReduce:
case HloOpcode::kCustomCall:
case HloOpcode::kRngBitGenerator:
case HloOpcode::kSort: {
for (size_t i = 0; i < inst->shape().tuple_shapes_size(); ++i) {
const InputShardings& input_shardings =
GetInputShardingsForTuple(inst, {static_cast<int64_t>(i)},
strategy_map, cost_graph, s_val);
if (input_shardings.shardings.size() > i &&
input_shardings.shardings[i].has_value()) {
TF_RETURN_IF_ERROR(FixMixedMeshShapeResharding(
inst, i, *input_shardings.shardings[i], device_mesh,
resharding_cache));
}
}
break;
}
case HloOpcode::kTuple: {
for (size_t i = 0; i < inst->shape().tuple_shapes_size(); ++i) {
const InputShardings& input_shardings =
GetInputShardingsForTuple(inst, {static_cast<int64_t>(i)},
strategy_map, cost_graph, s_val);
CHECK_EQ(input_shardings.shardings.size(), 1);
CHECK(input_shardings.shardings[0].has_value());
TF_RETURN_IF_ERROR(FixMixedMeshShapeResharding(
inst, i, *input_shardings.shardings[0], device_mesh,
resharding_cache));
}
break;
}
case HloOpcode::kGetTupleElement: {
std::vector<std::optional<HloSharding>> dst_shardings(
inst->shape().tuple_shapes_size(), std::nullopt);
for (size_t i = 0; i < inst->shape().tuple_shapes_size(); ++i) {
CHECK(!inst->shape().tuple_shapes(i).IsTuple())
<< "We currently do not support ops with nested tuples as "
"output. See b/332951306.";
const InputShardings& input_shardings =
GetInputShardingsForTuple(inst, {static_cast<int64_t>(i)},
strategy_map, cost_graph, s_val);
if (!input_shardings.shardings.empty() &&
input_shardings.shardings[0].has_value()) {
dst_shardings[i] = *input_shardings.shardings[0];
}
}
TF_RETURN_IF_ERROR(
FixMixedMeshShapeReshardingGetTupleElementWithTupleOutput(
inst, dst_shardings, device_mesh));
break;
}
case HloOpcode::kWhile:
case HloOpcode::kInfeed:
case HloOpcode::kOptimizationBarrier:
case HloOpcode::kConditional:
case HloOpcode::kParameter: {
break;
}
default:
LOG(FATAL) << "Unhandled instruction: " + inst->ToString();
}
} else {
const InputShardings& input_shardings =
GetInputShardings(inst, strategy_map, cost_graph, s_val);
if (input_shardings.shardings.empty()) {
continue;
}
if (inst->opcode() == HloOpcode::kGetTupleElement) {
TF_RETURN_IF_ERROR(FixMixedMeshShapeReshardingGetTupleElement(
inst, inst->sharding(), device_mesh, preserve_shardings));
continue;
}
for (size_t i = 0; i < inst->operand_count(); ++i) {
if (input_shardings.shardings.size() > i &&
input_shardings.shardings[i].has_value()) {
TF_RETURN_IF_ERROR(FixMixedMeshShapeResharding(
inst, i, *input_shardings.shardings[i], device_mesh,
resharding_cache));
}
}
}
}
}
return absl::OkStatus();
}
absl::Status SetHloShardingPostProcessing(
const HloInstructionSequence& sequence,
const absl::flat_hash_set<const HloInstruction*>& instructions_to_shard,
absl::flat_hash_map<std::string, std::vector<HloSharding>>&
preserve_shardings) {
for (HloInstruction* inst : sequence.instructions()) {
if (!instructions_to_shard.contains(inst) ||
spmd::IsSPMDShardToFullShapeCustomCall(inst)) {
continue;
}
auto preserved_sharding_iter = preserve_shardings.find(inst->name());
if (preserved_sharding_iter == preserve_shardings.end()) {
continue;
}
const std::vector<HloSharding>& preserved_sharding =
preserved_sharding_iter->second;
if (inst->opcode() == HloOpcode::kOutfeed ||
inst->opcode() == HloOpcode::kSendDone) {
if (preserved_sharding.size() <= 1) {
CHECK_EQ(preserved_sharding.size(), 1);
inst->set_sharding(preserved_sharding[0]);
continue;
}
std::vector<Shape> tuple_elements_shape(
inst->operand(0)->shape().tuple_shapes().begin(),
inst->operand(0)->shape().tuple_shapes().end());
tuple_elements_shape.push_back(inst->operand(1)->shape());
Shape output_tuple_sharding_shape =
ShapeUtil::MakeTupleShape(tuple_elements_shape);
ShapeTree<HloSharding> output_tuple_sharding(output_tuple_sharding_shape,
Undefined());
size_t i = 0;
for (std::pair<ShapeIndex, HloSharding>& leaf :
output_tuple_sharding.leaves()) {
leaf.second = preserved_sharding.at(i++);
}
inst->set_sharding(HloSharding::Tuple(output_tuple_sharding));
} else if (inst->opcode() == HloOpcode::kSend ||
inst->opcode() == HloOpcode::kRecv ||
inst->opcode() == HloOpcode::kRecvDone) {
if (preserved_sharding.size() > 1) {
inst->set_sharding(
HloSharding::Tuple(inst->shape(), preserved_sharding));
continue;
}
if (preserved_sharding.size() != 1) {
return absl::InternalError(
absl::StrCat("An empty sharding was preserved for ", inst->name(),
". This should be reported as a bug."));
}
inst->set_sharding(preserved_sharding[0]);
}
}
return absl::OkStatus();
}
std::string PrintLivenessSet(const LivenessSet& liveness_set) {
std::string str("Liveness Set\n");
for (LivenessIdx time_idx = 0; time_idx < liveness_set.size(); ++time_idx) {
std::vector<std::string> names;
names.reserve(liveness_set[time_idx].size());
for (const HloValue* value : liveness_set[time_idx]) {
names.push_back(absl::StrCat(value->instruction()->name(),
value->index().ToString()));
}
std::sort(names.begin(), names.end());
absl::StrAppend(&str, "Time ", time_idx, ": ", absl::StrJoin(names, ", "),
"\n");
}
return str;
}
std::string PrintInstructions(const HloInstructionSequence& sequence) {
std::string str;
const std::vector<HloInstruction*>& instructions = sequence.instructions();
for (size_t i = 0; i < instructions.size(); ++i) {
absl::StrAppend(&str, "Instruction ", i, ": ", instructions[i]->ToString(),
"\n");
}
return str;
}
std::string PrintStrategyMap(const StrategyMap& strategy_map,
const HloInstructionSequence& sequence) {
std::string str("Strategy Map\n");
const std::vector<HloInstruction*>& instructions = sequence.instructions();
for (size_t i = 0; i < instructions.size(); ++i) {
absl::StrAppend(&str, "Instruction ", i, ": ", instructions[i]->ToString(),
"\n", strategy_map.at(instructions[i])->ToString());
}
return str;
}
std::string PrintAutoShardingSolution(const HloInstructionSequence& sequence,
const LivenessSet& liveness_set,
const StrategyMap& strategy_map,
const StrategyGroups& strategy_groups,
const CostGraph& cost_graph,
absl::Span<const NodeStrategyIdx> s_val,
const double objective) {
std::string str("=== Auto sharding strategy ===\n");
const std::vector<HloInstruction*>& instructions = sequence.instructions();
size_t N = strategy_groups.size();
for (NodeIdx node_idx = 0; node_idx < N; ++node_idx) {
const StrategyGroup& strategy_group = *strategy_groups[node_idx];
absl::StrAppend(
&str, node_idx, " ",
ToAdaptiveString(instructions[strategy_group.instruction_id]), " ");
NodeStrategyIdx stra_idx = cost_graph.RemapIndex(node_idx, s_val[node_idx]);
const ShardingStrategy& strategy = strategy_group.GetStrategies()[stra_idx];
absl::StrAppend(&str, strategy.ToString());
if (cost_graph.follow_idx_[node_idx] >= 0) {
absl::StrAppend(&str, " follow ", cost_graph.follow_idx_[node_idx]);
}
absl::StrAppend(&str, "\n");
}
return str;
}
std::string PrintSolutionMemoryUsage(const LivenessSet& liveness_set,
const StrategyMap& strategy_map,
const CostGraph& cost_graph,
absl::Span<const NodeStrategyIdx> s_val) {
std::string str("=== Memory ===\n");
std::vector<std::pair<LivenessIdx, double>> time_memory_usage;
std::function<double(const StrategyGroup&)> calculate_memory_usage;
calculate_memory_usage = [&](const StrategyGroup& strategy_group) {
if (strategy_group.is_tuple) {
double m = 0.0;
for (const auto& child : strategy_group.GetChildren()) {
m += calculate_memory_usage(*child);
}
return m;
}
NodeIdx ins_idx = strategy_group.node_idx;
NodeStrategyIdx stra_idx = cost_graph.RemapIndex(ins_idx, s_val[ins_idx]);
const auto& strategies = strategy_group.GetStrategies();
const ShardingStrategy& strategy = strategies[stra_idx];
return strategy.memory_cost;
};
for (LivenessIdx time_idx = 0; time_idx < liveness_set.size(); ++time_idx) {
double mem = 0.0;
for (const auto& val : liveness_set.at(time_idx)) {
const HloInstruction* ins = val->instruction();
auto tmp = calculate_memory_usage(*strategy_map.at(ins));
mem += tmp;
if (VLOG_IS_ON(6) && tmp / (1024 * 1024) > 1) {
absl::StrAppend(&str, " ", ins->name(),
": mem += ", tmp / (1024 * 1024),
" MB; mem=", mem / (1024 * 1024), " MB\n");
}
}
time_memory_usage.push_back({time_idx, mem});
if (VLOG_IS_ON(6)) {
absl::StrAppend(&str, "Time ", time_idx, ": ", mem / (1024 * 1024),
" MB\n");
}
}
struct {
bool operator()(std::pair<LivenessIdx, double> a,
std::pair<LivenessIdx, double> b) const {
return a.second > b.second;
}
} TimeMemLarger;
std::sort(time_memory_usage.begin(), time_memory_usage.end(), TimeMemLarger);
absl::StrAppend(&str,
"Using memory costs from ShardingStrategy, the max memory "
"consumption is ",
time_memory_usage.front().second / (1024 * 1024 * 1024),
" GB at time ", time_memory_usage.front().first, "\n");
size_t k = 3;
k = std::min(k, time_memory_usage.size());
std::vector<std::pair<std::string, double>> instruction_mem;
for (LivenessIdx time_idx = 0; time_idx < k; time_idx++) {
for (const auto& val : liveness_set[time_memory_usage.at(time_idx).first]) {
const HloInstruction* ins = val->instruction();
auto mem = calculate_memory_usage(*strategy_map.at(ins));
if (mem > 100 * 1024 * 1024) {
instruction_mem.push_back(
{absl::StrCat(ins->name(), val->index().ToString()), mem});
}
}
}
struct {
bool operator()(std::pair<std::string, double> a,
std::pair<std::string, double> b) const {
return a.second > b.second;
}
} NameMemLarger;
std::sort(instruction_mem.begin(), instruction_mem.end(), NameMemLarger);
size_t top_tensors = 10;
top_tensors = std::min(top_tensors, instruction_mem.size());
absl::StrAppend(&str, "Top ", top_tensors, " largest tensors:\n");
for (size_t i = 0; i < top_tensors; i++) {
absl::StrAppend(
&str, "instruction name: ", instruction_mem.at(i).first,
" memory usage: ", instruction_mem.at(i).second / (1024 * 1024 * 1024),
"GB\n");
}
return str;
}
absl::Status SaveShardingForInstruction(
const HloInstruction* inst, bool save_for_copy_users,
absl::flat_hash_map<std::string, std::vector<HloSharding>>&
preserve_shardings) {
auto save_sharding =
[&preserve_shardings](const HloInstruction* inst) -> absl::Status {
if (!inst->has_sharding()) {
return absl::OkStatus();
}
if (inst->sharding().IsUnknown() &&
(inst->sharding().IsShardLike() || inst->sharding().IsShardAs())) {
return absl::UnimplementedError(
"Auto-sharding currently does not support shard_as/shard_like "
"sharding annotations");
}
if (!inst->sharding().IsTuple()) {
preserve_shardings[inst->name()] = {inst->sharding()};
} else {
preserve_shardings[inst->name()] = inst->sharding().tuple_elements();
}
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(save_sharding(inst));
if (save_for_copy_users) {
for (const auto user : inst->users()) {
if (user->opcode() == HloOpcode::kCopy) {
TF_RETURN_IF_ERROR(save_sharding(user));
}
}
}
return absl::OkStatus();
}
void CheckUserShardingPreservation(
HloModule* module,
const absl::flat_hash_map<std::string, std::vector<HloSharding>>&
preserve_shardings) {
for (const auto computation : module->computations()) {
for (const auto inst : computation->instructions()) {
if (preserve_shardings.find(inst->name()) == preserve_shardings.end()) {
continue;
}
if (!inst->has_sharding()) {
LOG(FATAL) << "User sharding is not preserved! Instruction with name "
<< inst->name() << " should be: "
<< preserve_shardings.at(inst->name())[0].ToString()
<< "\nbut it's empty.";
} else if (!inst->sharding().IsTuple() &&
!preserve_shardings.at(inst->name())[0].IsUnknown() &&
preserve_shardings.at(inst->name())[0] != inst->sharding()) {
LOG(FATAL) << "User sharding is not preserved! Instruction with name "
<< inst->name() << " should be: "
<< preserve_shardings.at(inst->name())[0].ToString()
<< "\nbut it's: " << inst->sharding().ToString();
} else if (inst->sharding().IsTuple()) {
const std::vector<HloSharding>* preserve_shardings_tuple =
&preserve_shardings.at(inst->name());
for (size_t i = 0; i < inst->shape().tuple_shapes_size(); i++) {
if (!preserve_shardings_tuple->at(i).IsUnknown() &&
preserve_shardings_tuple->at(i) !=
inst->sharding().tuple_elements().at(i)) {
LOG(FATAL) << "Tuple sharding is not preserved! Instruction "
"with name "
<< inst->name() << " " << i << "th tuple element "
<< " should be: "
<< preserve_shardings_tuple->at(i).ToString()
<< "\nbut it's: "
<< inst->sharding().tuple_elements().at(i).ToString();
}
}
}
}
}
}
int64_t MemoryBudgetLowerBound(
const HloModule& module,
const absl::flat_hash_set<const HloInstruction*>& instructions_to_shard,
const LivenessSet& liveness_set, const HloAliasAnalysis& alias_analysis,
const int64_t num_devices,
const absl::flat_hash_map<std::string, std::vector<HloSharding>>&
preserved_shardings) {
auto get_value_sharding = [](const HloValue* value) -> HloSharding {
return !value->index().empty()
? value->instruction()->sharding().GetSubSharding(
value->instruction()->shape(), value->index())
: value->instruction()->sharding();
};
absl::flat_hash_map<HloBuffer::Id, const HloValue*>
buffer_to_sharded_value_mapping;
bool vlog_is_on_5 = VLOG_IS_ON(5);
for (const HloBuffer& buffer : alias_analysis.buffers()) {
for (const HloValue* value : buffer.values()) {
if (value->instruction()->has_sharding()) {
if (vlog_is_on_5) {
const HloSharding& this_value_sharding = get_value_sharding(value);
auto iter = buffer_to_sharded_value_mapping.find(buffer.id());
if (iter != buffer_to_sharded_value_mapping.end()) {
const HloSharding& buffer_value_sharding =
get_value_sharding(iter->second);
if (this_value_sharding != buffer_value_sharding) {
VLOG(1)
<< "We have a situation where two HloValues alias, but "
"they have different shardings. This can happen in the "
"presence of user-specified shardings, and is expected. "
"This, however, means that the memory budget estimate "
"is not very accurate. The aliasing HLOs are "
<< value->ToShortString() << " and "
<< iter->second->ToShortString();
}
}
}
buffer_to_sharded_value_mapping[buffer.id()] = value;
}
}
}
int64_t max_memory_usage = 0;
absl::flat_hash_map<const HloValue*, int64_t> value_to_memory_size_mapping;
for (LivenessIdx time_idx = 0; time_idx < liveness_set.size(); ++time_idx) {
int64_t memory_usage = 0;
for (const HloValue* value : liveness_set[time_idx]) {
if (value->instruction()->shape().IsTuple() && value->index().empty()) {
continue;
}
if (!instructions_to_shard.contains(value->instruction())) {
memory_usage += ShapeUtil::ByteSizeOf(value->shape());
continue;
}
auto iter1 = value_to_memory_size_mapping.find(value);
if (iter1 != value_to_memory_size_mapping.end()) {
memory_usage += iter1->second;
continue;
}
std::optional<HloSharding> optional_sharding = std::nullopt;
const HloBuffer& buffer = alias_analysis.GetBufferContainingValue(*value);
auto iter2 = buffer_to_sharded_value_mapping.find(buffer.id());
if (iter2 != buffer_to_sharded_value_mapping.end()) {
if (preserved_shardings.find(value->instruction()->name()) !=
preserved_shardings.end()) {
optional_sharding = get_value_sharding(iter2->second);
} else {
const HloSharding& value_sharding = get_value_sharding(iter2->second);
if (!value_sharding.IsTiled() ||
value_sharding.TotalNumTiles() == num_devices) {
optional_sharding = value_sharding;
}
}
}
const Shape& shape =
ShapeUtil::GetSubshape(value->instruction()->shape(), value->index());
int64_t value_memory_usage = ByteSizeOfShapeIfShardedAcrossDevices(
shape, num_devices, optional_sharding);
value_to_memory_size_mapping[value] = value_memory_usage;
memory_usage += value_memory_usage;
}
max_memory_usage = std::max(max_memory_usage, memory_usage);
}
return max_memory_usage;
}
void RecoverShardingsFromPartialMesh(
const HloInstructionSequence& sequence,
const absl::flat_hash_map<std::string, std::vector<HloSharding>>&
preserve_shardings) {
const std::vector<HloInstruction*>& instructions = sequence.instructions();
for (HloInstruction* ins : instructions) {
auto preserved_sharding_iter = preserve_shardings.find(ins->name());
if (preserved_sharding_iter != preserve_shardings.end()) {
const auto& preserved_sharding = preserved_sharding_iter->second;
if (ins->shape().IsTuple() || (ins->opcode() == HloOpcode::kOutfeed &&
preserved_sharding.size() > 1)) {
Shape output_tuple_sharding_shape = ins->shape();
if (ins->opcode() == HloOpcode::kOutfeed) {
std::vector<Shape> tuple_elements_shape(
ins->operand(0)->shape().tuple_shapes().begin(),
ins->operand(0)->shape().tuple_shapes().end());
tuple_elements_shape.push_back(ins->operand(1)->shape());
output_tuple_sharding_shape =
ShapeUtil::MakeTupleShape(tuple_elements_shape);
}
ShapeTree<HloSharding> output_tuple_sharding(
output_tuple_sharding_shape, Undefined());
size_t i = 0;
for (auto& leaf : output_tuple_sharding.leaves()) {
leaf.second = preserved_sharding.at(i++);
}
ins->set_sharding(HloSharding::Tuple(output_tuple_sharding));
} else {
ins->set_sharding(preserved_sharding.at(0));
}
}
}
}
void FindReplicateSet(
HloInstruction* cur, const AliasMap& alias_map, const CostGraph& cost_graph,
absl::Span<const NodeStrategyIdx> s_val, const StrategyMap& strategy_map,
const ShardingStrategy& strategy, const HloInstruction* output,
const bool do_all_gather_after_backward, HloInstruction*& transpose_inst,
InstructionSet& replicated_set, InstructionSet& boundary_set,
InstructionSet& consumer_set, ConstInstructionSet& visited) {
visited.insert(cur);
InstructionSet users = UsersWithAlias(cur, alias_map, output);
for (HloInstruction* consumer : users) {
const HloInstruction* shape_inst = cur;
if (consumer->opcode() == HloOpcode::kTranspose &&
(transpose_inst == nullptr ||
DimensionsEqual(transpose_inst->shape(), consumer->shape()))) {
shape_inst = consumer;
transpose_inst = consumer;
}
if (consumer->opcode() == HloOpcode::kTuple ||
(do_all_gather_after_backward && IsParameterConvert(consumer)) ||
GetShardingStrategy(consumer, strategy_map, cost_graph, s_val)
.output_sharding != strategy.output_sharding ||
!DimensionsEqual(consumer->shape(), shape_inst->shape())) {
boundary_set.insert(cur);
return;
}
}
replicated_set.insert(cur);
for (HloInstruction* consumer : users) {
if (!visited.contains(consumer)) {
consumer_set.insert(consumer);
FindReplicateSet(consumer, alias_map, cost_graph, s_val, strategy_map,
strategy, output, do_all_gather_after_backward,
transpose_inst, replicated_set, boundary_set,
consumer_set, visited);
}
}
for (size_t i = 0; i < cur->operand_count(); ++i) {
HloInstruction* operand = cur->mutable_operand(i);
if (!visited.contains(operand) && !IsAlwaysReplicated(operand) &&
GetShardingStrategy(operand, strategy_map, cost_graph, s_val)
.output_sharding == strategy.output_sharding &&
DimensionsEqual(operand->shape(), cur->shape())) {
FindReplicateSet(operand, alias_map, cost_graph, s_val, strategy_map,
strategy, output, do_all_gather_after_backward,
transpose_inst, replicated_set, boundary_set,
consumer_set, visited);
}
}
}
absl::Status GenerateReduceScatter(
const HloInstructionSequence& sequence, const AliasMap& alias_map,
const InstructionDepthMap& depth_map, const StrategyMap& strategy_map,
const CostGraph& cost_graph, absl::Span<const NodeStrategyIdx> s_val,
const ClusterEnvironment& cluster_env, const AutoShardingOption& option) {
const std::vector<HloInstruction*>& instructions = sequence.instructions();
const HloInstruction* output = instructions.back();
bool do_all_gather_after_backward = true;
bool use_all_reduce_for_grad_acc = option.reduce_scatter_grad_acc_friendly;
std::vector<HloInstruction*> insert_all_gather;
ConstInstructionSet modified;
for (HloInstruction* inst : instructions) {
if (!HasReduceScatterOpportunity(inst, strategy_map, cost_graph, s_val,
modified)) {
continue;
}
const ShardingStrategy& strategy =
GetShardingStrategy(inst, strategy_map, cost_graph, s_val);
const InputShardings& input_shardings =
GetInputShardings(inst, strategy_map, cost_graph, s_val);
if (!absl::StrContains(input_shardings.name, "allreduce")) {
continue;
}
InstructionSet replicated_set;
InstructionSet boundary_set;
InstructionSet consumer_set;
ConstInstructionSet visited;
HloInstruction* transpose_inst = nullptr;
visited.insert(output);
FindReplicateSet(inst, alias_map, cost_graph, s_val, strategy_map, strategy,
output, do_all_gather_after_backward, transpose_inst,
replicated_set, boundary_set, consumer_set, visited);
TryReduceWithCommonAncestor(replicated_set, boundary_set, consumer_set,
alias_map);
std::vector<HloInstruction*> need_all_gather;
for (HloInstruction* node : boundary_set) {
if (consumer_set.contains(node)) {
if (AllUsersAreReduce(node)) {
replicated_set.insert(node);
} else {
need_all_gather.push_back(node);
}
}
}
if (do_all_gather_after_backward && need_all_gather.size() == 1) {
HloInstruction* point = need_all_gather.front();
std::vector<HloInstruction*> path;
HloInstruction* root = point;
while (true) {
path.push_back(root);
if (root->opcode() == HloOpcode::kGetTupleElement) {
root = root->mutable_operand(0);
} else {
break;
}
}
if (root->opcode() == HloOpcode::kParameter) {
for (auto x : path) {
replicated_set.erase(x);
boundary_set.erase(x);
}
need_all_gather.clear();
for (auto x : replicated_set) {
auto iter = alias_map.find(x);
if (iter != alias_map.end() && iter->second == root) {
boundary_set.insert(x);
need_all_gather.push_back(x);
break;
}
}
}
}
int num_replicated_parameters = 0;
for (const HloInstruction* node : replicated_set) {
if (node->opcode() == HloOpcode::kParameter) {
num_replicated_parameters++;
}
}
for (const HloInstruction* to_split : need_all_gather) {
if (to_split->users().size() == 1 &&
to_split->users().front() == output && alias_map.contains(to_split)) {
num_replicated_parameters++;
}
}
VLOG(10) << inst->ToString(HloPrintOptions::ShortParsable()) << "\n";
VLOG(10) << "replicated set (#parameter: " << num_replicated_parameters
<< "):\n";
for (auto x : replicated_set) {
VLOG(10) << " " << x->ToString(HloPrintOptions::ShortParsable()) << "\n";
}
VLOG(10) << "boundary set (#incompatible: " << need_all_gather.size()
<< "):\n";
for (auto x : boundary_set) {
VLOG(10) << " " << x->ToString(HloPrintOptions::ShortParsable()) << " "
<< absl::c_linear_search(need_all_gather, x) << "\n";
}
if (num_replicated_parameters >= 1 && need_all_gather.size() <= 1 &&
replicated_set.size() >= 5) {
HloSharding output_spec =
GetReduceScatterOutput(inst, input_shardings, strategy, cluster_env);
if (IsUndefined(output_spec)) {
continue;
}
VLOG(10) << "SET: " << output_spec.ToString();
if (absl::StartsWith(input_shardings.name, "RR = RS x SR")) {
replicated_set.erase(inst);
}
if (use_all_reduce_for_grad_acc) {
UseAllReduceForGradAcc(replicated_set, inst);
}
for (HloInstruction* to_split : replicated_set) {
SetSharding(to_split, output_spec, inst, transpose_inst, modified);
}
if (!option.reduce_scatter_aggressive_partition) {
for (HloInstruction* to_split : need_all_gather) {
SetSharding(to_split, output_spec, inst, transpose_inst, modified);
if (!do_all_gather_after_backward && to_split->users().size() == 1 &&
to_split->users().front() == output &&
alias_map.contains(to_split)) {
SetSharding(alias_map.at(to_split), output_spec, inst,
transpose_inst, modified);
insert_all_gather.push_back(alias_map.at(to_split));
} else {
insert_all_gather.push_back(to_split);
}
}
} else {
for (HloInstruction* to_split : need_all_gather) {
SetSharding(to_split, output_spec, inst, transpose_inst, modified);
if (to_split->users().size() == 1 &&
to_split->users().front() == output &&
alias_map.contains(to_split)) {
HloInstruction* param = alias_map.at(to_split);
HloInstruction* cur = param;
while (cur->users().size() == 1) {
CHECK(cur->shape().IsArray());
SetSharding(cur, output_spec, inst, transpose_inst, modified);
cur = cur->users().front();
}
SetSharding(cur, output_spec, inst, transpose_inst, modified);
CHECK(!cur->users().empty());
HloInstruction* first_user = nullptr;
int64_t min_depth = ((int64_t)1) << 50;
for (const auto& x : cur->users()) {
auto iter = depth_map.find(x);
if (iter == depth_map.end()) {
LOG(FATAL) << "ERROR: " << x->ToString();
}
if (x->opcode() != HloOpcode::kConvolution &&
x->opcode() != HloOpcode::kDot) {
continue;
}
if (iter->second < min_depth) {
first_user = x;
min_depth = iter->second;
}
}
if (first_user != nullptr) {
HloInstruction* identity = inst->parent()->AddInstruction(
HloInstruction::CreateCustomCall(cur->shape(), {cur},
kIdentityMarker));
SetSharding(identity, output_spec, inst, transpose_inst,
modified);
ReplaceOperand(first_user, cur, identity);
}
}
}
}
}
VLOG(10) << "-----------------------done\n";
}
for (HloInstruction* inst : insert_all_gather) {
HloInstruction* replace_with = inst->parent()->AddInstruction(
HloInstruction::CreateReshape(inst->shape(), inst));
replace_with->set_sharding(
GetShardingStrategy(inst, strategy_map, cost_graph, s_val)
.output_sharding);
TF_RETURN_IF_ERROR(inst->ReplaceAllUsesWith(replace_with));
}
return absl::OkStatus();
}
HloSharding GetReduceScatterOutput(const HloInstruction* ins,
const InputShardings& input_shardings,
const ShardingStrategy& strategy,
const ClusterEnvironment& cluster_env) {
const DeviceMesh& device_mesh = cluster_env.device_mesh_;
const DeviceMesh& device_mesh_1d = cluster_env.device_mesh_1d_;
if (ins->opcode() == HloOpcode::kDot) {
const DotDimensionNumbers& dot_dnums = ins->dot_dimension_numbers();
int64_t space_base_dim = dot_dnums.lhs_batch_dimensions_size();
if (absl::StartsWith(input_shardings.name, "SR = SS x SR") ||
absl::StartsWith(input_shardings.name, "RS = RS x SS")) {
int mesh_dim0, mesh_dim1;
std::tie(mesh_dim0, mesh_dim1) = ParseMeshDims(input_shardings.name);
if (!IsDivisible(ins, device_mesh, {space_base_dim, space_base_dim + 1},
{mesh_dim0, mesh_dim1})) {
return Undefined();
}
return Tile(ins->shape(), {space_base_dim, space_base_dim + 1},
{mesh_dim0, mesh_dim1}, device_mesh);
}
if (absl::StartsWith(input_shardings.name, "SbR = SbSk x SbSk")) {
int mesh_dim0, mesh_dim1;
std::tie(mesh_dim0, mesh_dim1) = ParseMeshDims(input_shardings.name);
if (!IsDivisible(ins, device_mesh, {0, space_base_dim},
{mesh_dim0, mesh_dim1})) {
return Undefined();
}
return Tile(ins->shape(), {0, space_base_dim}, {mesh_dim0, mesh_dim1},
device_mesh);
}
if (absl::StartsWith(input_shardings.name, "RR = RS x SR")) {
int mesh_dim = absl::StrContains(input_shardings.name, "{0}") ? 0 : 1;
if (!IsDivisible(ins, device_mesh, {space_base_dim}, {mesh_dim})) {
return Undefined();
}
return Tile(ins->shape(), {space_base_dim}, {mesh_dim}, device_mesh);
}
if (absl::StartsWith(input_shardings.name, "R = Sk x Sk")) {
int mesh_dim = 0;
if (!IsDivisible(ins, device_mesh_1d, {space_base_dim}, {mesh_dim})) {
return Undefined();
}
return Tile(ins->shape(), {space_base_dim}, {mesh_dim}, device_mesh_1d);
}
} else if (ins->opcode() == HloOpcode::kConvolution) {
const ConvolutionDimensionNumbers& conv_dnums =
ins->convolution_dimension_numbers();
int out_batch_dim = conv_dnums.output_batch_dimension();
int out_out_channel_dim = conv_dnums.output_feature_dimension();
if (absl::StartsWith(input_shardings.name, "SR = SS x SR") ||
absl::StartsWith(input_shardings.name, "RS = RS x SS")) {
int mesh_dim0, mesh_dim1;
std::tie(mesh_dim0, mesh_dim1) = ParseMeshDims(input_shardings.name);
if (!IsDivisible(ins, device_mesh, {out_batch_dim, out_out_channel_dim},
{mesh_dim0, mesh_dim1})) {
return Undefined();
}
return Tile(ins->shape(), {out_batch_dim, out_out_channel_dim},
{mesh_dim0, mesh_dim1}, device_mesh);
}
if (absl::StartsWith(input_shardings.name, "R = Sk x Sk")) {
int mesh_dim = 0;
if (!IsDivisible(ins, device_mesh_1d, {out_batch_dim}, {mesh_dim})) {
return Undefined();
}
return Tile(ins->shape(), {out_batch_dim}, {mesh_dim}, device_mesh_1d);
}
} else if (ins->opcode() == HloOpcode::kReduce) {
CHECK_EQ(ins->shape().rank(), 1);
int mesh_dim;
if (absl::StrContains(input_shardings.name, "allreduce @ [0]")) {
mesh_dim = 0;
} else {
mesh_dim = 1;
}
if (strategy.output_sharding.IsReplicated()) {
if (absl::StrContains(input_shardings.name, "1d")) {
if (!IsDivisible(ins, device_mesh_1d, {0}, {mesh_dim})) {
return Undefined();
}
return Tile(ins->shape(), {0}, {mesh_dim}, device_mesh_1d);
}
if (!IsDivisible(ins, device_mesh, {0}, {mesh_dim})) {
return Undefined();
}
return Tile(ins->shape(), {0}, {mesh_dim}, device_mesh);
}
if (!IsDivisible(ins, device_mesh_1d, {0}, {0})) {
return Undefined();
}
auto tile_assignment = strategy.output_sharding.tile_assignment().Reshape(
{cluster_env.total_devices_});
return HloSharding::Tile(std::move(tile_assignment));
} else {
LOG(FATAL) << "Invalid instruction: " << ins->ToString();
}
return Undefined();
}
bool HasReduceScatterOpportunity(const HloInstruction* inst,
const StrategyMap& strategy_map,
const CostGraph& cost_graph,
absl::Span<const NodeStrategyIdx> s_val,
const ConstInstructionSet& modified) {
for (const HloInstruction* operand : inst->operands()) {
if (modified.contains(operand)) {
return false;
}
}
if (modified.contains(inst)) {
return false;
}
if (inst->opcode() == HloOpcode::kReduce && inst->shape().rank() == 1) {
return true;
}
if (inst->opcode() == HloOpcode::kDot) {
if (GetShardingStrategy(inst->operand(0), strategy_map, cost_graph, s_val)
.output_sharding.IsReplicated() &&
GetShardingStrategy(inst->operand(1), strategy_map, cost_graph, s_val)
.output_sharding.IsReplicated()) {
return false;
}
return true;
}
if (inst->opcode() == HloOpcode::kConvolution) {
return true;
}
return false;
}
}
absl::StatusOr<AutoShardingImplementation::SaveShardingAnnotationsResult>
AutoShardingImplementation::SaveAndRemoveShardingAnnotation(
HloModule* module,
const absl::flat_hash_set<const HloInstruction*>& instructions_to_shard,
const absl::flat_hash_set<std::string>& replicated_small_tensors,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
absl::flat_hash_map<std::string, std::vector<HloSharding>>
preserved_shardings;
absl::flat_hash_set<HloInstruction*> keep_inst;
for (const HloComputation* computation :
module->computations(execution_threads)) {
for (const auto inst : computation->instructions()) {
if (inst->opcode() == HloOpcode::kOutfeed ||
inst->opcode() == HloOpcode::kRecv ||
inst->opcode() == HloOpcode::kRecvDone ||
inst->opcode() == HloOpcode::kSend ||
inst->opcode() == HloOpcode::kSendDone) {
TF_RETURN_IF_ERROR(spmd::SaveShardingForInstruction(
inst,
false, preserved_shardings));
continue;
}
if (spmd::IsInstructionBeforeSPMDFullToShardShapeCustomCall(inst) ||
spmd::IsSPMDShardToFullShapeCustomCall(inst)) {
TF_RETURN_IF_ERROR(spmd::SaveShardingForInstruction(
inst,
false, preserved_shardings));
}
if (inst->has_sharding() &&
spmd::IsShardingMisaligned(inst->sharding(), inst->shape()) &&
!instructions_to_shard.contains(inst)) {
LOG(WARNING)
<< "Instruction " << inst->name()
<< " has a user sharding annotation that is misaligned. Shape: "
<< inst->shape().ToString()
<< ". Sharding:" << inst->sharding().ToString();
}
}
}
if (option_.preserve_shardings ==
AutoShardingOption::PreserveShardingsType::kKeepAllShardings) {
for (const HloComputation* computation :
module->computations(execution_threads)) {
for (const auto inst : computation->instructions()) {
TF_RETURN_IF_ERROR(spmd::SaveShardingForInstruction(
inst,
true, preserved_shardings));
}
}
return SaveShardingAnnotationsResult{preserved_shardings, false};
}
bool module_is_changed = false;
for (HloComputation* computation : module->computations(execution_threads)) {
bool is_entry_computation = computation->IsEntryComputation();
for (HloInstruction* ins : computation->instructions()) {
if (replicated_small_tensors.count(ins->name())) {
keep_inst.insert(ins);
TF_RETURN_IF_ERROR(spmd::SaveShardingForInstruction(
ins,
false, preserved_shardings));
continue;
}
if (option_.preserve_shardings ==
AutoShardingOption::PreserveShardingsType::
kKeepInputOutputShardings &&
is_entry_computation &&
(ins->opcode() == HloOpcode::kParameter || ins->IsRoot())) {
keep_inst.insert(ins);
TF_RETURN_IF_ERROR(spmd::SaveShardingForInstruction(
ins,
ins->opcode() == HloOpcode::kParameter,
preserved_shardings));
continue;
}
if (ins->opcode() == HloOpcode::kCopy &&
keep_inst.find(ins->operand(0)) != keep_inst.end()) {
continue;
}
if (ins->opcode() == HloOpcode::kOutfeed ||
ins->opcode() == HloOpcode::kSend ||
ins->opcode() == HloOpcode::kSendDone ||
spmd::IsInstructionBeforeSPMDFullToShardShapeCustomCall(ins) ||
spmd::IsSPMDShardToFullShapeCustomCall(ins) ||
!instructions_to_shard.contains(ins)) {
continue;
}
if (ins->has_sharding()) {
module_is_changed |= true;
ins->clear_sharding();
}
}
}
return SaveShardingAnnotationsResult{preserved_shardings, module_is_changed};
}
absl::Status AutoShardingImplementation::CanonicalizeLayouts(
HloModule* module) {
if (!module->layout_canonicalization_callback()) {
LOG(INFO) << "There is no registered layout_canonicalization_callback.";
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(auto layouts,
module->layout_canonicalization_callback()(*module));
std::vector<Shape>& argument_shapes = layouts.first;
Shape& result_shape = layouts.second;
ComputationLayout entry_computation_layout =
module->config().entry_computation_layout();
TF_RETURN_IF_ERROR(
entry_computation_layout.mutable_result_layout()->CopyLayoutFromShape(
result_shape));
CHECK_NE(entry_computation_layout.parameter_count(), 0);
CHECK_EQ(argument_shapes.size(), entry_computation_layout.parameter_count());
for (int32_t i = 0; i < entry_computation_layout.parameter_count(); i++) {
TF_RETURN_IF_ERROR(entry_computation_layout.mutable_parameter_layout(i)
->CopyLayoutFromShape(argument_shapes.at(i)));
}
*module->mutable_config().mutable_entry_computation_layout() =
entry_computation_layout;
return absl::OkStatus();
}
absl::flat_hash_set<const HloInstruction*> ComputeInstructionsToShard(
const HloModule& module, const HloInstructionSequence& sequence) {
std::queue<const HloInstruction*> queue;
for (HloInstruction* instruction : sequence.instructions()) {
if (spmd::IsSPMDFullToShardShapeCustomCall(instruction)) {
for (const HloInstruction* user : instruction->users()) {
if (spmd::IsSPMDShardToFullShapeCustomCall(user)) {
continue;
}
queue.push(user);
}
}
}
absl::flat_hash_set<const HloInstruction*> visited;
while (!queue.empty()) {
const HloInstruction* instruction = queue.front();
queue.pop();
if (visited.contains(instruction)) {
continue;
}
visited.insert(instruction);
for (const HloComputation* computation :
instruction->called_computations()) {
for (const HloInstruction* parameter :
computation->parameter_instructions()) {
if (spmd::IsSPMDShardToFullShapeCustomCall(parameter) ||
spmd::IsSPMDFullToShardShapeCustomCall(parameter) ||
parameter == instruction || visited.contains(parameter)) {
continue;
}
queue.push(parameter);
}
}
for (const HloInstruction* user : instruction->users()) {
if (spmd::IsSPMDShardToFullShapeCustomCall(user) ||
spmd::IsSPMDFullToShardShapeCustomCall(user) ||
visited.contains(user)) {
continue;
}
queue.push(user);
}
for (const HloInstruction* operand : instruction->operands()) {
if (spmd::IsSPMDShardToFullShapeCustomCall(operand) ||
spmd::IsSPMDFullToShardShapeCustomCall(operand) ||
operand == instruction || visited.contains(operand)) {
continue;
}
queue.push(operand);
}
}
absl::flat_hash_set<const HloInstruction*> to_shard;
for (HloInstruction* instruction : sequence.instructions()) {
if (!visited.contains(instruction) &&
!spmd::IsSPMDFullToShardShapeCustomCall(instruction)) {
if (HloCollectiveInstruction::ClassOf(instruction)) {
LOG(FATAL) << "The module contains collective ops not contained within "
"a graph surrounded by SPMDFullToShardShape and "
"SPMDShardToFullShape custom calls. This case is not yet "
"supported.";
}
to_shard.insert(instruction);
}
}
return to_shard;
}
AutoShardingImplementation::AutoShardingImplementation(
const AutoShardingOption& option)
: option_(option) {}
std::pair<int64_t, int64_t> ReduceMemoryTerms(
int64_t num_primitives,
const std::vector<std::pair<spmd::LivenessIdx, spmd::LivenessIdx>>&
intervals,
std::vector<std::pair<spmd::LivenessIdx, spmd::LivenessIdx>>&
reduced_intervals,
std::vector<absl::btree_set<int64_t>>& reduced_groups) {
int64_t num_lives = 0;
for (const auto& interval : intervals) {
if (interval.first > interval.second) continue;
num_lives = std::max(num_lives, interval.second + 1);
}
auto Intervals =
[intervals](int64_t prim_idx) -> std::pair<int64_t, int64_t> {
return intervals.at(prim_idx);
};
spmd::MemoryTermReducer reducer;
auto num_terms =
reducer.Reduce(num_lives, num_primitives, std::move(Intervals));
reduced_intervals = reducer.GetReducedIntervals();
reduced_groups = reducer.GetReducedGroups();
return num_terms;
}
absl::StatusOr<bool> AutoShardingImplementation::RunAutoSharding(
HloModule* module,
const absl::flat_hash_set<std::string>& replicated_small_tensors,
const absl::flat_hash_set<absl::string_view>& execution_threads,
const absl::flat_hash_map<std::string, HloSharding>&
sharding_propagation_solution) {
if (!option_.enable) {
return false;
}
bool module_is_changed = false;
bool set_to_memory_lower_bound = (option_.memory_budget_per_device == 0);
absl::flat_hash_map<const HloInstruction*, std::vector<int64_t>>
unspecified_dims;
TF_ASSIGN_OR_RETURN(
bool changed,
ProcessShardingInstruction(
module, execution_threads, true,
&unspecified_dims, nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
true));
DumpHloModuleIfEnabled(*module, "after_spmd_calls");
if (changed) {
module_is_changed = true;
VLOG(3) << "CustomCalls with custom_call_target=Sharding are removed and "
"their shardings are moved to their input ops.";
} else {
VLOG(3) << "This workload does not have CustomCalls with "
"custom_call_target=Sharding.";
}
auto size_fn = [](const BufferValue& buffer) {
return spmd::ByteSizeOfShape(buffer.shape());
};
TF_ASSIGN_OR_RETURN(
HloSchedule schedule,
ScheduleModule(module, size_fn,
ComputationSchedulerToModuleScheduler(DFSMemoryScheduler),
execution_threads));
const HloComputation* entry_computation = module->entry_computation();
std::unique_ptr<HloAliasAnalysis> alias_analysis =
HloAliasAnalysis::Run(module).value();
std::unique_ptr<HloModule> module_clone = module->Clone("");
TF_RETURN_IF_ERROR(
spmd::EnsureEntryComputationLayoutHasShapeLayouts(module_clone.get()));
OptimizeInputOutputBufferAlias input_output_buffer_alias_optimizer(
true);
CHECK_OK(input_output_buffer_alias_optimizer.Run(module_clone.get()));
const HloInputOutputAliasConfig& input_output_alias_config =
module_clone->input_output_alias_config();
spmd::AliasMap alias_map =
spmd::BuildAliasMap(module, input_output_alias_config);
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloLiveRange> hlo_live_range,
HloLiveRange::Run(schedule, *alias_analysis, entry_computation));
absl::flat_hash_map<const HloValue*, HloLiveRange::TimeBound>&
buffer_live_ranges = hlo_live_range->buffer_live_ranges();
spmd::LivenessSet liveness_set(hlo_live_range->schedule_end_time() + 1);
for (const auto& [hlo_value, live_range] : buffer_live_ranges) {
for (spmd::LivenessIdx i = live_range.start; i <= live_range.end; ++i) {
liveness_set[i].push_back(hlo_value);
}
}
VLOG(10) << hlo_live_range->ToString();
XLA_VLOG_LINES(10, spmd::PrintLivenessSet(liveness_set));
const HloInstructionSequence& sequence =
hlo_live_range->flattened_instruction_sequence();
const absl::flat_hash_set<const HloInstruction*>& instructions_to_shard =
ComputeInstructionsToShard(*module, sequence);
TF_ASSIGN_OR_RETURN(SaveShardingAnnotationsResult saved_sharding_result,
SaveAndRemoveShardingAnnotation(
module, instructions_to_shard,
replicated_small_tensors, execution_threads));
absl::flat_hash_map<std::string, std::vector<HloSharding>>
preserve_shardings = std::move(saved_sharding_result.preserved_shardings);
module_is_changed |= saved_sharding_result.module_is_changed;
absl::flat_hash_map<const HloInstruction*, int64_t>
instruction_execution_counts = spmd::ComputeInstructionExecutionCounts(
module, option_.loop_iteration_count_estimate);
spmd::DeviceMesh original_device_mesh(option_.device_mesh_shape);
original_device_mesh.SetValues(option_.device_mesh_ids);
const int64_t original_memory_budget = option_.memory_budget_per_device;
std::vector<std::vector<int64_t>> partial_mesh_shapes;
if (option_.solve_nd_sharding_iteratively) {
partial_mesh_shapes = spmd::DecomposeMeshShapes(option_.device_mesh_shape,
option_.device_mesh_alpha,
option_.device_mesh_beta);
} else {
partial_mesh_shapes = {option_.device_mesh_shape};
}
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
HloCostAnalysis::Options hlo_cost_analysis_options{
.shape_size = [](const Shape& shape) {
return spmd::ByteSizeOfShape(shape);
}};
HloCostAnalysis hlo_cost_analysis(hlo_cost_analysis_options);
CHECK_OK(module->entry_computation()->Accept(&hlo_cost_analysis));
for (size_t mesh_idx = 0; mesh_idx < partial_mesh_shapes.size(); ++mesh_idx) {
const std::vector<int64_t>& mesh_shape = partial_mesh_shapes[mesh_idx];
LOG(INFO) << "Processing partial mesh shape: "
<< spmd::ToString(mesh_shape);
spmd::DeviceMesh device_mesh(mesh_shape);
if (mesh_idx != partial_mesh_shapes.size() - 1) {
device_mesh.FillIota(0);
TF_ASSIGN_OR_RETURN(
bool changed,
spmd::AdjustShardingsWithPartialMeshShape(
sequence.instructions(), instructions_to_shard, mesh_shape,
original_device_mesh,
!option_.try_multiple_mesh_shapes));
LOG(INFO)
<< "Shardings are adjusted based on current partial mesh shape: "
<< changed;
} else {
device_mesh.SetValues(option_.device_mesh_ids);
}
spmd::ProfilingResult prof_result;
spmd::ClusterEnvironment cluster_env(
original_device_mesh, device_mesh, option_.device_mesh_alpha,
option_.device_mesh_beta, prof_result, option_);
XLA_VLOG_LINES(6, module->ToString());
const int64_t memory_lower_bound = spmd::MemoryBudgetLowerBound(
*module, instructions_to_shard, liveness_set, *alias_analysis,
device_mesh.num_elements(), preserve_shardings);
const float memory_lower_bound_gb =
static_cast<float>(memory_lower_bound) / (1024 * 1024 * 1024);
LOG(INFO) << "Memory consumption lower bound is " << memory_lower_bound_gb
<< " GB.";
if (set_to_memory_lower_bound) {
LOG(INFO)
<< "--xla_tpu_auto_spmd_partitioning_memory_budget_gb is 0, and "
"--xla_tpu_auto_spmd_partitioning_memory_budget_ratio is "
<< option_.memory_budget_ratio
<< ", so setting option.memory_budget_per_device to "
<< memory_lower_bound_gb << " x " << option_.memory_budget_ratio
<< " = " << memory_lower_bound_gb * option_.memory_budget_ratio
<< " GB";
option_.memory_budget_per_device =
memory_lower_bound * std::abs(option_.memory_budget_ratio);
if (option_.memory_budget_ratio < 0) {
option_.memory_overbudget_coeff = -1.0;
}
} else if (option_.memory_budget_per_device > 0) {
option_.memory_budget_per_device = original_memory_budget *
original_device_mesh.num_elements() /
device_mesh.num_elements();
LOG(INFO) << "Setting option.memory_budget_per_device to "
<< option_.memory_budget_per_device;
}
spmd::InstructionDepthMap ins_depth_map;
ins_depth_map = spmd::BuildInstructionDepthMap(sequence);
spmd::StrategyMap strategy_map;
spmd::StrategyGroups strategy_groups;
spmd::AssociativeDotPairs associative_dot_pairs;
TF_ASSIGN_OR_RETURN(
std::tie(strategy_map, strategy_groups, associative_dot_pairs),
BuildStrategyAndCost(sequence, module, instructions_to_shard,
instruction_execution_counts, ins_depth_map,
alias_map, cluster_env, option_, *call_graph,
hlo_cost_analysis,
option_.try_multiple_mesh_shapes));
spmd::AliasSet alias_set =
spmd::BuildAliasSet(module, input_output_alias_config, strategy_map);
TF_RETURN_IF_ERROR(RemoveFollowersIfMismatchedStrategies(
alias_set, strategy_groups, sequence,
!option_.try_multiple_mesh_shapes));
XLA_VLOG_LINES(8, PrintStrategyMap(strategy_map, sequence));
spmd::CostGraph cost_graph(strategy_groups, associative_dot_pairs);
cost_graph.Simplify(option_.simplify_graph);
std::vector<absl::flat_hash_set<spmd::EdgeIdx>> node_to_edges(
strategy_groups.size());
spmd::EdgeIdx edge_idx = 0;
for (const auto& [edge, _] : cost_graph.edge_costs_) {
node_to_edges[edge.second].insert(edge_idx);
++edge_idx;
}
const absl::flat_hash_map<const HloValue*, HloLiveRange::TimeBound>&
buffer_live_ranges = hlo_live_range->buffer_live_ranges();
absl::flat_hash_map<spmd::NodeIdx, HloLiveRange::TimeBound>
node_to_time_bound;
absl::flat_hash_map<spmd::EdgeIdx, HloLiveRange::TimeBound>
edge_to_time_bound;
for (const auto& [value, time_bound] : buffer_live_ranges) {
const HloInstruction* instruction = value->instruction();
const ShapeIndex& index = value->index();
if (instruction->shape().IsTuple() && index.empty()) continue;
const spmd::StrategyGroup* strategy_group =
strategy_map.at(instruction).get();
const spmd::NodeIdx node_idx =
strategy_group->GetSubStrategyGroup(index)->node_idx;
if (node_idx < 0) continue;
node_to_time_bound[node_idx] = time_bound;
for (const spmd::EdgeIdx edge_idx : node_to_edges[node_idx]) {
edge_to_time_bound[edge_idx] = time_bound;
}
}
std::vector<std::pair<spmd::LivenessIdx, spmd::LivenessIdx>> node_intervals,
edge_intervals;
for (spmd::NodeIdx node_idx = 0; node_idx < strategy_groups.size();
++node_idx) {
std::pair<spmd::LivenessIdx, spmd::LivenessIdx> interval;
if (auto time_bound = node_to_time_bound.find(node_idx);
time_bound != node_to_time_bound.end()) {
interval.first = time_bound->second.start;
interval.second = time_bound->second.end;
} else {
interval.first = std::numeric_limits<int64_t>::max();
interval.second = 0;
}
node_intervals.push_back(std::move(interval));
}
for (spmd::EdgeIdx edge_idx = 0; edge_idx < cost_graph.edge_costs_.size();
++edge_idx) {
std::pair<spmd::LivenessIdx, spmd::LivenessIdx> interval;
if (auto time_bound = edge_to_time_bound.find(edge_idx);
time_bound != edge_to_time_bound.end()) {
interval.first = time_bound->second.start;
interval.second = time_bound->second.end;
} else {
interval.first = std::numeric_limits<int64_t>::max();
interval.second = 0;
}
edge_intervals.push_back(std::move(interval));
}
const absl::Time term_reduction_start_time = absl::Now();
std::vector<std::pair<spmd::LivenessIdx, spmd::LivenessIdx>>
reduced_node_intervals, reduced_edge_intervals;
std::vector<absl::btree_set<int64_t>> reduced_node_groups,
reduced_edge_groups;
auto num_node_terms =
ReduceMemoryTerms(strategy_groups.size(), node_intervals,
reduced_node_intervals, reduced_node_groups);
auto num_edge_terms =
ReduceMemoryTerms(cost_graph.edge_costs_.size(), edge_intervals,
reduced_edge_intervals, reduced_edge_groups);
const absl::Time term_reduction_end_time = absl::Now();
const auto term_reduction_duration =
term_reduction_end_time - term_reduction_start_time;
LOG(INFO) << "Memory Term Reducer took "
<< absl::ToInt64Milliseconds(term_reduction_duration)
<< " ms and reduced the number of terms from "
<< num_node_terms.first + num_edge_terms.first << " to "
<< num_node_terms.second + num_edge_terms.second;
std::string request_name = absl::StrCat("mesh_idx_", mesh_idx);
TF_ASSIGN_OR_RETURN(
spmd::AutoShardingSolverOutput output,
Solve(*module, *hlo_live_range, strategy_map, strategy_groups,
cost_graph, alias_set, reduced_node_intervals,
reduced_edge_intervals, reduced_node_groups, reduced_edge_groups,
option_, request_name, sharding_propagation_solution));
if (mesh_idx == partial_mesh_shapes.size() - 1) {
this->solver_optimal_objective_value_ = output.cost;
} else {
TF_RET_CHECK(output.is_optimal)
<< "The solver did not find an optimal solution for a partial mesh "
<< "shape.";
}
XLA_VLOG_LINES(5, PrintAutoShardingSolution(
sequence, liveness_set, strategy_map, strategy_groups,
cost_graph, output.s_val, output.cost));
XLA_VLOG_LINES(6, PrintSolutionMemoryUsage(liveness_set, strategy_map,
cost_graph, output.s_val));
if (option_.prefer_reduce_scatter) {
TF_RETURN_IF_ERROR(GenerateReduceScatter(
sequence, alias_map, ins_depth_map, strategy_map, cost_graph,
output.s_val, cluster_env, option_));
}
SetHloSharding(sequence, instructions_to_shard, strategy_map, cost_graph,
output.s_val, (mesh_idx == partial_mesh_shapes.size() - 1));
if (mesh_idx == partial_mesh_shapes.size() - 1) {
TF_RETURN_IF_ERROR(spmd::SetHloShardingPostProcessing(
sequence, instructions_to_shard, preserve_shardings));
TF_RETURN_IF_ERROR(InsertReshardReshapes(
sequence, instructions_to_shard, strategy_map, cost_graph,
output.s_val, cluster_env,
!option_.try_multiple_mesh_shapes,
option_.insert_resharding_reshapes_for_non_dot_ops,
preserve_shardings));
} else {
spmd::RecoverShardingsFromPartialMesh(sequence, preserve_shardings);
}
}
if (VLOG_IS_ON(1)) {
spmd::CheckHloSharding(sequence, instructions_to_shard,
original_device_mesh.num_elements());
}
module_is_changed = true;
if (VLOG_IS_ON(1)) {
spmd::CheckUserShardingPreservation(module, preserve_shardings);
}
TF_RETURN_IF_ERROR(CanonicalizeLayouts(module));
for (HloInstruction* instruction : sequence.instructions()) {
if (!instructions_to_shard.contains(instruction)) {
instruction->set_sharding(
HloSharding::Single(instruction->shape(), HloSharding::Manual()));
}
}
for (HloInstruction* instruction : sequence.instructions()) {
if (spmd::IsSPMDFullToShardShapeCustomCall(instruction)) {
CHECK(instruction->has_sharding());
CHECK(instruction->sharding().IsManual());
CHECK(instruction->operand(0)->has_sharding());
CHECK(!instruction->operand(0)->sharding().IsManual());
} else if (spmd::IsSPMDShardToFullShapeCustomCall(instruction)) {
CHECK(instruction->has_sharding());
CHECK(!instruction->sharding().IsManual());
CHECK(instruction->operand(0)->has_sharding());
CHECK(instruction->operand(0)->sharding().IsManual())
<< instruction->ToString();
}
}
return module_is_changed;
}
bool ModuleIsManuallyPartitioned(const HloModule* module) {
for (const HloComputation* computation : module->computations()) {
for (const HloInstruction* instruction : computation->instructions()) {
if (spmd::IsSPMDFullToShardShapeCustomCall(instruction) ||
spmd::IsSPMDShardToFullShapeCustomCall(instruction)) {
return true;
}
}
}
return false;
}
bool IsSmallTensor(const HloInstruction* ins,
const AutoShardingOption& option) {
return spmd::ByteSizeOfShape(ins->shape()) <= option.small_tensor_byte_size;
}
bool HasUnsupportedNestedTuples(const HloModule& module) {
for (const auto* computation : module.computations()) {
for (const auto* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kConditional) {
for (const HloInstruction* operand : instruction->operands()) {
if (ShapeUtil::IsNestedTuple(operand->shape())) {
return true;
}
}
}
}
}
return false;
}
std::unique_ptr<HloModule> CloneModule(const HloModule* module) {
auto module_clone = module->Clone("");
module_clone->set_layout_canonicalization_callback(
module->layout_canonicalization_callback());
return module_clone;
}
absl::Status MoveComputationsFromModuleToModule(HloModule* from_module,
HloModule* to_module) {
TF_RETURN_IF_ERROR(from_module->RemoveUnusedComputations());
const std::vector<HloComputation*>& original_module_computations =
to_module->MakeComputationSorted();
const std::vector<HloComputation*>& clone_module_computations =
from_module->MakeComputationSorted();
if (original_module_computations.size() != clone_module_computations.size()) {
return absl::InternalError(
"The cloned and the original modules do not have the same number "
"of computations. This is a bug and should be reported.");
}
absl::flat_hash_map<HloComputation*, HloComputation*>
computation_replacements;
for (size_t i = 0; i < original_module_computations.size(); ++i) {
HloComputation* original_computation = original_module_computations[i];
HloComputation* new_computation = clone_module_computations[i];
computation_replacements[original_computation] = new_computation;
}
to_module->ReplaceComputations(computation_replacements);
to_module->MoveComputationsFrom(from_module);
*to_module->mutable_config().mutable_entry_computation_layout() =
from_module->entry_computation_layout();
to_module->input_output_alias_config() =
from_module->input_output_alias_config();
to_module->buffer_donor_config() = from_module->buffer_donor_config();
return absl::OkStatus();
}
AutoSharding::AutoSharding(const AutoShardingOption& option)
: option_(option) {}
absl::Time DumpModuleAndRecordPassStart(const HloModule* module) {
XLA_VLOG_LINES(6,
absl::StrCat("Before auto sharding:\n", module->ToString()));
DumpHloModuleIfEnabled(*module, "before_auto_spmd_sharding");
#if !defined(__APPLE__)
metrics::RecordAutoShardingInvocations();
#endif
return absl::Now();
}
void RecordPassEndAndDumpModule(absl::Time start_time,
const HloModule* module) {
absl::Time end_time = absl::Now();
absl::Duration duration = end_time - start_time;
LOG(INFO) << "Auto Sharding took " << absl::ToInt64Seconds(duration)
<< " seconds";
#if !defined(__APPLE__)
metrics::RecordAutoShardingCompilationTime(
absl::ToInt64Microseconds(duration));
#endif
XLA_VLOG_LINES(6, absl::StrCat("After auto sharding:\n", module->ToString()));
DumpHloModuleIfEnabled(*module, "after_auto_spmd_sharding");
}
absl::StatusOr<bool> AutoSharding::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
if (!option_.enable) {
return false;
}
LOG(INFO) << "Starting the auto sharding pass";
if (HasUnsupportedNestedTuples(*module)) {
LOG(FATAL) << "The input module contains nested tuples "
"which we do not currently support well. See b/332951306 to "
"track progress on this.";
return false;
}
absl::Time start_time = DumpModuleAndRecordPassStart(module);
TF_RETURN_IF_ERROR(module->RemoveUnusedComputations());
TF_RETURN_IF_ERROR(option_.CheckAndSetup());
LOG(INFO) << "AutoShardingOptions:\n" << option_.ToString();
absl::flat_hash_set<std::string> replicated_small_tensors;
if (option_.small_tensor_byte_size > 0) {
for (auto computation : module->computations()) {
for (auto instruction : computation->instructions()) {
if (!instruction->has_sharding() &&
IsSmallTensor(instruction, option_)) {
VLOG(1) << "Replicated small tensor: " << instruction->name();
instruction->set_sharding(
instruction->shape().IsTuple()
? HloSharding::SingleTuple(instruction->shape(),
HloSharding::Replicate())
: HloSharding::Replicate());
replicated_small_tensors.insert(std::string(instruction->name()));
}
}
}
}
bool module_is_manually_partitioned = ModuleIsManuallyPartitioned(module);
if (module_is_manually_partitioned) {
HloConstantSplitter constant_splitter(
option_.enable_expression_constant_splitter,
spmd::OpEncountersShardToFull);
CHECK_OK(constant_splitter.Run(module, execution_threads));
CHECK_OK(HloDCE().Run(module, execution_threads));
}
std::vector<std::vector<int64_t>> mesh_shapes;
if (option_.try_multiple_mesh_shapes || module_is_manually_partitioned) {
mesh_shapes = spmd::InferOrEnumerateMeshShapesToTry(
*module, Product(option_.device_mesh_shape),
option_.device_mesh_shape.size(),
false);
} else {
mesh_shapes.push_back(option_.device_mesh_shape);
}
CHECK(option_.try_multiple_mesh_shapes || mesh_shapes.size() == 1)
<< "Auto-sharding cannot infer a single appropriate mesh shape for this "
"HLO, and AutoShardingption::try_multiple_mesh_shapes is set to "
"false. Please re-run with the option set to true.";
if (module->entry_computation()->num_parameters() > 0) {
HloInstruction* parameter_instruction =
module->entry_computation()->parameter_instruction(0);
if (parameter_instruction->shape().IsTuple() &&
parameter_instruction->has_sharding()) {
CHECK_EQ(module->entry_computation()->num_parameters(), 1);
parameter_instruction->set_sharding(
spmd::ReplaceGivenShardingsWithUnknownForTuple(
parameter_instruction->sharding(), parameter_instruction->shape(),
module->config()
.allow_spmd_sharding_propagation_to_parameters()));
}
}
HloInstruction* root_instruction =
module->entry_computation()->root_instruction();
if (root_instruction->shape().IsTuple() && root_instruction->has_sharding()) {
root_instruction->set_sharding(
spmd::ReplaceGivenShardingsWithUnknownForTuple(
root_instruction->sharding(), root_instruction->shape(),
module->config().allow_spmd_sharding_propagation_to_output()));
}
absl::flat_hash_map<std::string, HloSharding> sharding_propagation_solution;
if (option_.use_sharding_propagation_for_default_shardings) {
std::unique_ptr<HloModule> module_with_default_solution =
CloneModule(module);
ShardingPropagation sharding_propagation(
true, false,
module->config().allow_spmd_sharding_propagation_to_output(),
module->config().allow_spmd_sharding_propagation_to_parameters(),
false,
nullptr);
CHECK_OK(sharding_propagation.Run(module_with_default_solution.get(),
execution_threads));
VLOG(6) << module_with_default_solution->ToString();
for (const auto computation :
module_with_default_solution->computations()) {
for (const auto instruction : computation->instructions()) {
if (instruction->has_sharding()) {
sharding_propagation_solution.insert(
{std::string(instruction->name()), instruction->sharding()});
}
}
}
}
bool module_is_changed = false;
VLOG(1) << "Original mesh shape "
<< spmd::ToString(option_.device_mesh_shape);
double min_objective_value = std::numeric_limits<double>::max();
int min_mesh_shape_index = -1;
std::unique_ptr<HloModule> min_mesh_shape_module;
for (size_t i = 0; i < mesh_shapes.size(); ++i) {
VLOG(1) << "Trying mesh shape " << spmd::ToString(mesh_shapes[i]);
AutoShardingOption this_option = option_;
this_option.device_mesh_shape = mesh_shapes[i];
if (this_option.device_mesh_shape.size() !=
this_option.device_mesh_alpha.size()) {
this_option.device_mesh_alpha.clear();
this_option.device_mesh_beta.clear();
TF_RETURN_IF_ERROR(this_option.CheckAndSetup());
}
auto pass = std::make_unique<AutoShardingImplementation>(this_option);
std::unique_ptr<HloModule> module_clone = CloneModule(module);
absl::StatusOr<bool> pass_result =
pass->RunAutoSharding(module_clone.get(), replicated_small_tensors,
execution_threads, sharding_propagation_solution);
if (!pass_result.ok()) {
VLOG(1) << "Mesh shape " << spmd::ToString(mesh_shapes[i])
<< " led to the following error: "
<< pass_result.status().message();
continue;
}
double this_mesh_objective_value = pass->GetSolverOptimalObjectiveValue();
VLOG(1) << "Mesh shape " << spmd::ToString(mesh_shapes[i])
<< " has objective value " << this_mesh_objective_value;
if (this_mesh_objective_value >= 0 &&
min_objective_value > this_mesh_objective_value) {
min_mesh_shape_index = i;
min_mesh_shape_module = std::move(module_clone);
min_objective_value = this_mesh_objective_value;
CHECK_OK(pass_result);
module_is_changed = *pass_result;
}
}
std::string trying_to_find =
option_.try_multiple_mesh_shapes
? "a device mesh (and the corresponding shardings)"
: "shardings";
CHECK_GE(min_mesh_shape_index, 0)
<< "The auto-sharding pass could not find " << trying_to_find
<< " that works for this input. This could be the result of a low memory "
"budget (please refer to the "
"`--xla_tpu_auto_spmd_partitioning_memory_budget_ratio` flag to set a "
"higher budget). If you think you have set a reasonably large memory "
"budget, please report this as a bug.";
solver_optimal_objective_value_ = min_objective_value;
if (module_is_changed) {
VLOG(1) << "Choosing mesh shape "
<< spmd::ToString(mesh_shapes[min_mesh_shape_index])
<< " which had the minimal solver objective value of "
<< min_objective_value;
chosen_mesh_shape_ = mesh_shapes[min_mesh_shape_index];
TF_RETURN_IF_ERROR(MoveComputationsFromModuleToModule(
min_mesh_shape_module.get(), module));
}
RecordPassEndAndDumpModule(start_time, module);
return module_is_changed;
}
} | #include "xla/hlo/experimental/auto_sharding/auto_sharding.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <numeric>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_cost_graph.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_device_mesh.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_option.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_strategy.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_util.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/buffer_value.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_memory_scheduler.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_value.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace spmd {
namespace {
using ::testing::Contains;
using ::testing::Each;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::Eq;
using ::testing::FieldsAre;
using ::testing::IsEmpty;
using ::testing::IsFalse;
using ::testing::IsTrue;
using ::testing::Not;
using ::testing::Pair;
using ::testing::ResultOf;
using ::testing::UnorderedElementsAre;
TEST(DeviceMeshTest, IotaDeviceMesh2DStartsWith0) {
DeviceMesh device_mesh({2, 4});
device_mesh.FillIota(0);
EXPECT_TRUE(device_mesh.is_iota);
EXPECT_THAT(device_mesh.dimensions(), ElementsAre(2, 4));
EXPECT_EQ(device_mesh.num_elements(), 8);
}
TEST(DeviceMeshTest, IotaDeviceMesh3DStartsWithNonZero) {
DeviceMesh device_mesh({2, 4, 8});
device_mesh.FillIota(55);
EXPECT_TRUE(device_mesh.is_iota);
EXPECT_THAT(device_mesh.dimensions(), ElementsAre(2, 4, 8));
EXPECT_EQ(device_mesh.num_elements(), 64);
}
TEST(DeviceMeshTest, ExplicitSetValuesInferIotaIotaValues) {
DeviceMesh device_mesh({2, 4, 8});
std::vector<int64_t> device_mesh_values(64);
absl::c_iota(device_mesh_values, 34);
device_mesh.SetValues(device_mesh_values);
EXPECT_TRUE(device_mesh.is_iota);
EXPECT_THAT(device_mesh.dimensions(), ElementsAre(2, 4, 8));
EXPECT_EQ(device_mesh.num_elements(), 64);
}
TEST(DeviceMeshTest, ExplicitSetValuesInferIotaNonIotaValues) {
DeviceMesh device_mesh({2, 4, 8});
std::vector<int64_t> device_mesh_values(64);
absl::c_iota(device_mesh_values, 34);
device_mesh_values[54] = 54;
device_mesh.SetValues(device_mesh_values);
EXPECT_FALSE(device_mesh.is_iota);
EXPECT_THAT(device_mesh.dimensions(), ElementsAre(2, 4, 8));
EXPECT_EQ(device_mesh.num_elements(), 64);
}
TEST(DeviceMeshTest, ReshapeTestWithoutIota) {
DeviceMesh device_mesh({2, 4, 8});
std::vector<int64_t> device_mesh_values(64);
absl::c_iota(device_mesh_values, 34);
device_mesh_values[54] = 54;
device_mesh.SetValues(device_mesh_values);
EXPECT_FALSE(device_mesh.is_iota);
EXPECT_THAT(device_mesh.dimensions(), ElementsAre(2, 4, 8));
EXPECT_EQ(device_mesh.num_elements(), 64);
device_mesh.Reshape({2, 32});
EXPECT_FALSE(device_mesh.is_iota);
EXPECT_THAT(device_mesh.dimensions(), ElementsAre(2, 32));
EXPECT_EQ(device_mesh.num_elements(), 64);
}
TEST(DeviceMeshTest, ReshapeTestWithIota) {
DeviceMesh device_mesh({2, 4, 8});
std::vector<int64_t> device_mesh_values(64);
absl::c_iota(device_mesh_values, 34);
device_mesh.SetValues(device_mesh_values);
EXPECT_TRUE(device_mesh.is_iota);
EXPECT_THAT(device_mesh.dimensions(), ElementsAre(2, 4, 8));
EXPECT_EQ(device_mesh.num_elements(), 64);
device_mesh.Reshape({2, 32});
EXPECT_TRUE(device_mesh.is_iota);
EXPECT_THAT(device_mesh.dimensions(), ElementsAre(2, 32));
EXPECT_EQ(device_mesh.num_elements(), 64);
}
class AutoShardingTest : public HloTestBase {
protected:
const absl::string_view kDotHloString = R"(
HloModule module
ENTRY matmul {
parameter.1 = f32[32,64]{1,0} parameter(0)
parameter.2 = f32[64,128]{1,0} parameter(1)
ROOT root = f32[32,128]{1,0} dot(parameter.1, parameter.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
const absl::string_view kAddHloString = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[16,32,64]{2,1,0} parameter(0)
%param1 = f32[16,32,64]{2,1,0} parameter(1)
ROOT root = f32[16,32,64]{2,1,0} add(%param0, %param1)
})";
void RunMatMulAutoShardingWithOptions(
AutoShardingOption option, size_t expected_num_tiles,
size_t expected_sharded_dimensions = 1) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kDotHloString));
RunAutoShardingWithOptions(module.get(), option, expected_num_tiles,
expected_sharded_dimensions);
}
void RunAddAutoShardingWithOptions(AutoShardingOption option,
size_t expected_num_tiles,
size_t expected_sharded_dimensions = 1) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kAddHloString));
RunAutoShardingWithOptions(module.get(), option, expected_num_tiles,
expected_sharded_dimensions);
}
void RunAutoShardingWithOptions(HloModule* module, AutoShardingOption option,
size_t expected_num_tiles,
size_t expected_sharded_dimensions = 1) {
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module));
EXPECT_TRUE(changed);
auto* root = FindInstruction(module, "root");
ASSERT_NE(root, nullptr);
EXPECT_EQ(root->sharding().NumTiles(), expected_num_tiles);
EXPECT_EQ(VectorGreaterThanOneElementCount(
root->sharding().tile_assignment().dimensions(),
root->sharding().ReplicateOnLastTileDim()),
expected_sharded_dimensions);
}
void RunMatMulAutoShardingWithOptionsExpectFail(AutoShardingOption option) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kDotHloString));
RunAutoShardingWithOptionsExpectFail(module.get(), option);
}
void RunAutoShardingWithOptionsExpectFail(HloModule* module,
AutoShardingOption option) {
EXPECT_FALSE(AutoSharding(option).Run(module).ok());
}
void RunMatMulAutoShardingWithOptionsNoDeviceIds(
AutoShardingOption option, std::vector<int64_t> expected_tile,
bool expected_last_dim_replicate = false) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kDotHloString));
RunAutoShardingWithOptionsNoDeviceIds(module.get(), option, expected_tile,
expected_last_dim_replicate);
}
void RunAutoShardingWithOptionsNoDeviceIds(HloModule* module,
AutoShardingOption option,
std::vector<int64_t> expected_tile,
bool expected_last_dim_replicate) {
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module));
EXPECT_TRUE(changed);
HloInstruction* root = FindInstruction(module, "root");
ASSERT_NE(root, nullptr);
EXPECT_EQ(root->sharding().ReplicateOnLastTileDim(),
expected_last_dim_replicate);
EXPECT_THAT(root->sharding().tile_assignment().dimensions(),
ElementsAreArray(expected_tile));
}
};
TEST_F(AutoShardingTest, MatmulMeshShape1DMeshShape) {
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {4};
RunMatMulAutoShardingWithOptions(option, 4);
option.device_mesh_shape = {8};
RunMatMulAutoShardingWithOptions(option, 8);
}
TEST_F(AutoShardingTest, MatmulMeshShape1DMeshShapeIds) {
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {4};
option.device_mesh_ids = {0, 1, 2, 3};
RunMatMulAutoShardingWithOptions(option, 4);
option.device_mesh_shape = {8};
option.device_mesh_ids = {0, 1, 2, 3, 4, 5, 6, 7};
RunMatMulAutoShardingWithOptions(option, 8);
}
TEST_F(AutoShardingTest, MatmulMeshShape1DAllOptions) {
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {4};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0};
option.device_mesh_beta = {1.0};
RunMatMulAutoShardingWithOptions(option, 4);
option.device_mesh_shape = {8};
option.device_mesh_ids = {0, 1, 2, 3, 4, 5, 6, 7};
option.device_mesh_alpha = {1.0};
option.device_mesh_beta = {1.0};
RunMatMulAutoShardingWithOptions(option, 8);
}
TEST_F(AutoShardingTest, MatmulMeshShape2DAllOptions) {
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
option.allow_mixed_mesh_shape = false;
RunMatMulAutoShardingWithOptions(option, 4, 2);
option.enable = true;
option.device_mesh_shape = {1, 4};
RunMatMulAutoShardingWithOptions(option, 4);
option.enable = true;
option.device_mesh_shape = {4, 1};
RunMatMulAutoShardingWithOptions(option, 4);
}
TEST_F(AutoShardingTest, MatmulMeshShape2DNoAlphaBeta) {
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.allow_mixed_mesh_shape = false;
RunMatMulAutoShardingWithOptions(option, 4, 2);
option.enable = true;
option.device_mesh_shape = {1, 4};
RunMatMulAutoShardingWithOptions(option, 4);
option.enable = true;
option.device_mesh_shape = {4, 1};
RunMatMulAutoShardingWithOptions(option, 4);
}
TEST_F(AutoShardingTest, MatmulMeshShape2DNoAlphaBetaMeshIds) {
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.allow_mixed_mesh_shape = false;
RunMatMulAutoShardingWithOptions(option, 4, 2);
option.enable = true;
option.device_mesh_shape = {1, 4};
RunMatMulAutoShardingWithOptions(option, 4);
option.enable = true;
option.device_mesh_shape = {4, 1};
RunMatMulAutoShardingWithOptions(option, 4);
}
TEST_F(AutoShardingTest, MatmulMeshShape2DNoMeshIds) {
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
option.allow_mixed_mesh_shape = false;
RunMatMulAutoShardingWithOptions(option, 4, 2);
option.enable = true;
option.device_mesh_shape = {1, 4};
RunMatMulAutoShardingWithOptions(option, 4);
option.enable = true;
option.device_mesh_shape = {4, 1};
RunMatMulAutoShardingWithOptions(option, 4);
}
TEST_F(AutoShardingTest, MatmulMeshShape3DAllOptions) {
AutoShardingOption option;
option.enable = true;
option.allow_mixed_mesh_shape = false;
option.allow_recompute_heavy_op = false;
option.device_mesh_shape = {2, 2, 2};
option.device_mesh_ids = {0, 1, 2, 3, 4, 5, 6, 7};
option.device_mesh_alpha = {1.0, 1.0, 1.0};
option.device_mesh_beta = {0.01, 0.5, 1.0};
RunMatMulAutoShardingWithOptionsNoDeviceIds(option, {2, 2, 2}, true);
}
TEST_F(AutoShardingTest, Matmul3DMeshShape2DSharding) {
AutoShardingOption option;
option.enable = true;
option.allow_mixed_mesh_shape = false;
option.device_mesh_shape = {1, 2, 2};
RunMatMulAutoShardingWithOptions(option, 4, 2);
option.device_mesh_shape = {2, 1, 2};
RunMatMulAutoShardingWithOptions(option, 4, 2);
option.device_mesh_shape = {2, 2, 1};
RunMatMulAutoShardingWithOptions(option, 4, 2);
}
TEST_F(AutoShardingTest, AddMeshShape3DAllOptions) {
AutoShardingOption option;
option.enable = true;
option.allow_mixed_mesh_shape = false;
option.device_mesh_shape = {1, 2, 4};
option.device_mesh_ids = {0, 1, 2, 3, 4, 5, 6, 7};
option.device_mesh_alpha = {1.0, 1.0, 1.0};
option.device_mesh_beta = {0.01, 0.5, 1.0};
RunAddAutoShardingWithOptions(option, 8, 2);
option.device_mesh_shape = {4, 1, 2};
RunAddAutoShardingWithOptions(option, 8, 2);
option.device_mesh_shape = {1, 4, 2};
RunAddAutoShardingWithOptions(option, 8, 2);
}
TEST_F(AutoShardingTest, AddMeshShape3DNoAlphaBeta) {
AutoShardingOption option;
option.enable = true;
option.allow_mixed_mesh_shape = false;
option.device_mesh_shape = {1, 2, 4};
option.device_mesh_ids = {0, 1, 2, 3, 4, 5, 6, 7};
RunAddAutoShardingWithOptions(option, 8, 2);
option.device_mesh_shape = {4, 1, 2};
RunAddAutoShardingWithOptions(option, 8, 2);
option.device_mesh_shape = {1, 4, 2};
RunAddAutoShardingWithOptions(option, 8, 2);
}
TEST_F(AutoShardingTest, AddMeshShape3DNoAlphaBetaMeshIds) {
AutoShardingOption option;
option.allow_mixed_mesh_shape = false;
option.enable = true;
option.device_mesh_shape = {1, 2, 4};
RunAddAutoShardingWithOptions(option, 8, 2);
option.device_mesh_shape = {4, 1, 2};
RunAddAutoShardingWithOptions(option, 8, 2);
option.device_mesh_shape = {1, 4, 2};
RunAddAutoShardingWithOptions(option, 8, 2);
}
TEST_F(AutoShardingTest, AddMeshShape3DNoMeshIds) {
AutoShardingOption option;
option.allow_mixed_mesh_shape = false;
option.enable = true;
option.device_mesh_shape = {1, 2, 4};
option.device_mesh_alpha = {1.0, 1.0, 1.0};
option.device_mesh_beta = {0.01, 0.5, 1.0};
RunAddAutoShardingWithOptions(option, 8, 2);
option.device_mesh_shape = {4, 1, 2};
RunAddAutoShardingWithOptions(option, 8, 2);
option.device_mesh_shape = {1, 4, 2};
RunAddAutoShardingWithOptions(option, 8, 2);
}
TEST_F(AutoShardingTest, MatMulMeshShape2D) {
AutoShardingOption option;
option.allow_mixed_mesh_shape = false;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
RunMatMulAutoShardingWithOptions(option, 4, 2);
}
TEST_F(AutoShardingTest, AddMeshShape2D) {
AutoShardingOption option;
option.allow_mixed_mesh_shape = false;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
RunAddAutoShardingWithOptions(option, 4, 2);
}
TEST_F(AutoShardingTest, AddMeshShape3D) {
AutoShardingOption option;
option.enable = true;
option.allow_mixed_mesh_shape = false;
option.device_mesh_shape = {2, 2, 2};
option.device_mesh_alpha = {1.0, 1.0, 1.0};
option.device_mesh_beta = {0.01, 0.5, 1.0};
RunAddAutoShardingWithOptions(option, 2);
}
TEST_F(AutoShardingTest, LargeSize) {
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {1, 2, 4, 7};
option.device_mesh_alpha = {1.0, 1.0, 1.0, 1.0};
option.device_mesh_beta = {1.0, 1.0, 1.0, 1.0};
option.memory_budget_per_device = (8192 + 8192 * 2 + 8192 * 4 / 8);
RunMatMulAutoShardingWithOptions(option, 56, 1);
}
TEST_F(AutoShardingTest, InvalidOptions) {
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {1, 2, 4};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 0.5};
EXPECT_FALSE(option.CheckAndSetup().ok());
RunMatMulAutoShardingWithOptionsExpectFail(option);
AutoShardingOption empty_option;
empty_option.enable = true;
EXPECT_FALSE(empty_option.CheckAndSetup().ok());
RunMatMulAutoShardingWithOptionsExpectFail(empty_option);
AutoShardingOption option_with_non_positive_mesh;
option_with_non_positive_mesh.enable = true;
option_with_non_positive_mesh.device_mesh_shape = {0, 4};
EXPECT_FALSE(option_with_non_positive_mesh.CheckAndSetup().ok());
RunMatMulAutoShardingWithOptionsExpectFail(option_with_non_positive_mesh);
option_with_non_positive_mesh.device_mesh_shape = {-1, 4};
EXPECT_FALSE(option_with_non_positive_mesh.CheckAndSetup().ok());
RunMatMulAutoShardingWithOptionsExpectFail(option_with_non_positive_mesh);
AutoShardingOption option_not_compatible;
option_not_compatible.enable = true;
option_not_compatible.device_mesh_shape = {4, 8};
option_not_compatible.device_mesh_ids = {1, 2, 3, 4};
EXPECT_FALSE(option_not_compatible.CheckAndSetup().ok());
RunMatMulAutoShardingWithOptionsExpectFail(option_not_compatible);
}
TEST_F(AutoShardingTest, MemoryBudgetTest) {
auto compute_memory_budget_lower_bound =
[](const HloModule& module, int64_t num_devices,
const absl::flat_hash_map<std::string, std::vector<HloSharding>>&
preserved_shardings = {}) -> absl::StatusOr<int64_t> {
auto size_fn = [](const BufferValue& buffer) {
return spmd::ByteSizeOfShape(buffer.shape());
};
TF_ASSIGN_OR_RETURN(HloSchedule schedule,
ScheduleModule(&module, size_fn,
ComputationSchedulerToModuleScheduler(
DFSMemoryScheduler),
{}));
const HloComputation* entry_computation = module.entry_computation();
std::unique_ptr<HloAliasAnalysis> alias_analysis =
HloAliasAnalysis::Run(&module).value();
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloLiveRange> hlo_live_range,
HloLiveRange::Run(schedule, *alias_analysis, entry_computation));
absl::flat_hash_map<const HloValue*, HloLiveRange::TimeBound>&
buffer_live_ranges = hlo_live_range->buffer_live_ranges();
spmd::LivenessSet liveness_set(hlo_live_range->schedule_end_time() + 1);
for (const auto& [hlo_value, live_range] : buffer_live_ranges) {
for (spmd::LivenessIdx i = live_range.start; i <= live_range.end; ++i) {
liveness_set[i].push_back(hlo_value);
}
}
absl::flat_hash_set<const HloInstruction*> instructions_to_shard(
module.entry_computation()->instructions().begin(),
module.entry_computation()->instructions().end());
return spmd::MemoryBudgetLowerBound(module, instructions_to_shard,
liveness_set, *alias_analysis,
num_devices, preserved_shardings);
};
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[16384,16384]{0,1} parameter(0)
%param1 = f32[16384,16384]{0,1} parameter(1)
%add = f32[16384,16384]{0,1} add(%param0, %param1)
ROOT %copy = f32[16384,16384]{0,1} copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(HloSharding partial_sharding,
ParseSharding("{devices=[64,1]<=[64]}"));
TF_ASSERT_OK_AND_ASSIGN(
int64_t partial_mesh_64x1_budget_lower_bound,
compute_memory_budget_lower_bound(*module, 64));
for (HloInstruction* ins : module->entry_computation()->instructions()) {
ins->set_sharding(partial_sharding);
}
TF_ASSERT_OK_AND_ASSIGN(
int64_t full_mesh_64x8_budget_lower_bound,
compute_memory_budget_lower_bound(*module, 512));
CHECK_LT(full_mesh_64x8_budget_lower_bound,
partial_mesh_64x1_budget_lower_bound)
<< "The memory budget lower bound per device should be lower with a "
"larger number of devices. Instead, the bound was "
<< partial_mesh_64x1_budget_lower_bound << " bytes for 64 devices and "
<< full_mesh_64x8_budget_lower_bound << " bytes for 512 devices.";
}
TEST_F(AutoShardingTest, DISABLED_ElementWiseOperator) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[128,128]{0,1} parameter(0)
%param1 = f32[128,128]{0,1} parameter(1)
%add = f32[128,128]{0,1} add(%param0, %param1)
ROOT %copy = f32[128,128]{0,1} copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "param0");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,2]0,2,1,3}"));
}
TEST_F(AutoShardingTest, NDIterativeSolveTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %elementwise {
param = s32[512,3084]{1,0} parameter(0), sharding={devices=[256,1]<=[16,16]T(1,0)}
sharding_call = s32[512,3084]{1,0} custom-call(param), custom_call_target="Sharding", sharding={devices=[256,1]<=[256]}
ROOT slice = s32[512,2048]{1,0} slice(sharding_call), slice={[0:512], [0:2048]}
})";
AutoShardingOption option;
option.enable = true;
option.solve_nd_sharding_iteratively = true;
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings;
option.device_mesh_shape = {16, 16};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
HloInstruction* slice = FindInstruction(module.get(), "slice");
EXPECT_NE(slice, nullptr);
EXPECT_THAT(slice, op::Sharding("{devices=[256,1]<=[256]}"));
}
TEST_F(AutoShardingTest, SliceDeviceMeshTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %elementwise {
param = s32[512,3084]{1,0} parameter(0)
slice = s32[512,2048]{1,0} slice(param), slice={[0:512], [0:2048]}
ROOT copy = s32[512,2048]{1,0} copy(slice)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(
bool changed, AutoSharding( {.enable = true,
.device_mesh_shape = {2, 2},
.device_mesh_alpha = {1.0, 1.0},
.device_mesh_beta = {0.01, 1.0}})
.Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* slice = FindInstruction(module.get(), "slice");
ASSERT_NE(slice, nullptr);
EXPECT_THAT(
slice,
AnyOf(op::Sharding("{devices=[4,1]0,1,2,3}"),
op::Sharding("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[2,1,2]0,2,1,3 last_tile_dim_replicate}")));
}
TEST_F(AutoShardingTest, SliceInvalidStrategyFollowingTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %elementwise {
param = s32[512,2084]{1,0} parameter(0)
slice = s32[32,2048]{1,0} slice(param), slice={[0:32], [0:2048]}
ROOT copy = s32[32,2048]{1,0} copy(slice)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(
bool changed, AutoSharding( {.enable = true,
.device_mesh_shape = {64, 1},
.device_mesh_alpha = {1.0, 1.0},
.device_mesh_beta = {0.01, 1.0}})
.Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* slice = FindInstruction(module.get(), "slice");
ASSERT_NE(slice, nullptr);
EXPECT_THAT(slice, op::Sharding("{replicated}"));
}
TEST_F(AutoShardingTest, SliceForcedInvalidStrategyFollowingTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %elementwise {
param = s32[512,2084]{1,0} parameter(0), sharding={devices=[64,1]<=[64]}
slice = s32[32,2048]{1,0} slice(param), slice={[0:32], [0:2048]}
ROOT copy = s32[32,2048]{1,0} copy(slice)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(
bool changed, AutoSharding( {.enable = true,
.device_mesh_shape = {64, 1},
.device_mesh_alpha = {1.0, 1.0},
.device_mesh_beta = {0.01, 1.0}})
.Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* slice = FindInstruction(module.get(), "slice");
ASSERT_NE(slice, nullptr);
EXPECT_THAT(slice, op::Sharding("{devices=[64,1]<=[64]}"));
}
TEST_F(AutoShardingTest, IotaPartiallyReplicatedShardingTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %elementwise {
iota1 = s32[11,1026]{1,0} iota(), iota_dimension=1
param1 = s32[11,1026]{1,0} parameter(0), sharding={devices=[1,16,16]<=[16,16]T(1,0) last_tile_dim_replicate}
copy1 = s32[11,1026]{1,0} copy(iota1)
ROOT add1 = s32[11,1026]{1,0} add(copy1, param1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
AutoSharding(
{
.enable = true,
.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings,
.allow_mixed_mesh_shape = false,
.only_allow_divisible_input_output = false,
.device_mesh_shape = {16, 16},
.device_mesh_alpha = {1.0, 1.0},
.device_mesh_beta = {0.01, 1.0}})
.Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* iota = FindInstruction(module.get(), "iota1");
ASSERT_NE(iota, nullptr);
EXPECT_THAT(
iota, op::Sharding(
"{devices=[1,16,16]<=[16,16]T(1,0) last_tile_dim_replicate}"));
}
TEST_F(AutoShardingTest, SliceMixedUserShardingTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %elementwise {
param = s32[512,3084]{1,0} parameter(0), sharding={devices=[4,1]0,2,1,3}
slice = s32[512,2048]{1,0} slice(param), slice={[0:512], [0:2048]}
ROOT copy = s32[512,2048]{1,0} copy(slice)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
AutoSharding(
{
.enable = true,
.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings,
.solve_nd_sharding_iteratively = true,
.device_mesh_shape = {2, 2},
.device_mesh_ids = {0, 2, 1, 3},
.device_mesh_alpha = {1.0, 1.0},
.device_mesh_beta = {0.01, 1.0}})
.Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
std::vector<HloInstruction*> instructions =
module->entry_computation()->MakeInstructionPostOrder();
EXPECT_THAT(instructions,
Each(ResultOf(
[](const HloInstruction* ins) { return ins->has_sharding(); },
IsTrue())));
EXPECT_THAT(instructions, Each(op::Sharding("{devices=[4,1]0,2,1,3}")));
}
TEST_F(AutoShardingTest, SlicedTensorDimensionShardedTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %slicemodule {
param = s32[512,3084]{1,0} parameter(0), sharding={devices=[1,4]0,2,1,3}
slice = s32[512,2048]{1,0} slice(param), slice={[0:512], [0:2048]}, sharding={devices=[1,4]0,2,1,3}
ROOT copy = s32[512,2048]{1,0} copy(slice)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
AutoSharding(
{
.enable = true,
.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings,
.solve_nd_sharding_iteratively = true,
.device_mesh_shape = {2, 2},
.device_mesh_ids = {0, 2, 1, 3},
.device_mesh_alpha = {1.0, 1.0},
.device_mesh_beta = {0.01, 1.0}})
.Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
std::vector<HloInstruction*> instructions =
module->entry_computation()->MakeInstructionPostOrder();
EXPECT_THAT(instructions,
Not(Contains(ResultOf(
[](const HloInstruction* ins) { return ins->opcode(); },
Eq(HloOpcode::kReshape)))));
}
TEST_F(AutoShardingTest, UserShardingTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %elementwise {
concatenate.76306 = bf16[1,4096,8,256]{3,2,1,0} parameter(0)
constant.15158 = bf16[] constant(0)
pad.70 = bf16[1,4352,8,256]{3,2,1,0} pad(concatenate.76306, constant.15158), padding=0_0x0_256x0_0x0_0, sharding={devices=[1,1,128,1]<=[128]}
ROOT copy.45 = bf16[1,4352,8,256]{3,2,1,0} copy(pad.70)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
AutoSharding(
AutoShardingOption{
.enable = true,
.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings,
.device_mesh_shape = {128, 1},
.device_mesh_alpha = {1.0, 1.0},
.device_mesh_beta = {0.01, 1.0}})
.Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
}
TEST_F(AutoShardingTest,
AllowShardingsSmallDimsAcrossManyDevicesForFollowersTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %elementwise {
parameter.1 = bf16[8,1024]{1,0} parameter(0), sharding={devices=[16,16]<=[256]}
add.1 = bf16[8,1024]{1,0} add(parameter.1, parameter.1)
ROOT copy.45 = bf16[8,1024]{1,0} copy(add.1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
AutoSharding(
AutoShardingOption{
.enable = true,
.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings,
.solve_nd_sharding_iteratively = false,
.only_allow_divisible_input_output = false,
.device_mesh_shape = {16, 16},
.device_mesh_alpha = {1.0, 1.0},
.device_mesh_beta = {0.01, 1.0},
.allow_shardings_small_dims_across_many_devices = true})
.Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* add1 = FindInstruction(module.get(), "add.1");
EXPECT_THAT(add1, op::Sharding("{devices=[16,16]<=[256]}"));
TF_ASSERT_OK_AND_ASSIGN(module, ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(
changed,
AutoSharding(
AutoShardingOption{
.enable = true,
.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings,
.solve_nd_sharding_iteratively = false,
.only_allow_divisible_input_output = false,
.device_mesh_shape = {16, 16},
.device_mesh_alpha = {1.0, 1.0},
.device_mesh_beta = {0.01, 1.0},
.allow_shardings_small_dims_across_many_devices = false})
.Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
add1 = FindInstruction(module.get(), "add.1");
EXPECT_THAT(add1, Not(op::Sharding("{devices=[16,16]<=[256]}")));
}
TEST_F(AutoShardingTest,
AllowShardingsSmallDimsAcrossManyDevicesForSourcesTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %elementwise {
parameter.1 = bf16[8,1024]{1,0} parameter(0)
add.1 = bf16[8,1024]{1,0} add(parameter.1, parameter.1), sharding={devices=[16,1,16]<=[256] last_tile_dim_replicate}
ROOT copy.45 = bf16[8,1024]{1,0} copy(add.1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
AutoSharding(
AutoShardingOption{
.enable = true,
.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings,
.allow_replicated_parameters = false,
.allow_mixed_mesh_shape = false,
.solve_nd_sharding_iteratively = false,
.only_allow_divisible_input_output = false,
.device_mesh_shape = {16, 16},
.device_mesh_alpha = {1.0, 1.0},
.device_mesh_beta = {0.01, 1.0},
.allow_shardings_small_dims_across_many_devices = true})
.Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* parameter1 =
FindInstruction(module.get(), "parameter.1");
EXPECT_THAT(
parameter1,
op::Sharding("{devices=[16,1,16]<=[256] last_tile_dim_replicate}"));
TF_ASSERT_OK_AND_ASSIGN(module, ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(
changed,
AutoSharding(
AutoShardingOption{
.enable = true,
.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings,
.allow_replicated_parameters = false,
.allow_mixed_mesh_shape = false,
.solve_nd_sharding_iteratively = false,
.only_allow_divisible_input_output = false,
.device_mesh_shape = {16, 16},
.device_mesh_alpha = {1.0, 1.0},
.device_mesh_beta = {0.01, 1.0},
.allow_shardings_small_dims_across_many_devices = false})
.Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
parameter1 = FindInstruction(module.get(), "parameter.1");
EXPECT_THAT(
parameter1,
Not(op::Sharding("{devices=[16,1,16]<=[256] last_tile_dim_replicate}")));
}
TEST_F(AutoShardingTest, RngBitGeneratorArrayInput) {
constexpr absl::string_view kHloString = R"(
HloModule rng_bit_generator
ENTRY %RngBitGenerator (p0: u64[2]) -> (u64[2], u32[16,16]) {
%p0 = u64[2]{0} parameter(0)
ROOT %rand = (u64[2]{0}, u32[16,16]{1,0}) rng-bit-generator(u64[2]{0} %p0), algorithm=rng_three_fry
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {1.0, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* instruction = FindInstruction(module.get(), "p0");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{replicated}"));
}
TEST_F(AutoShardingTest, SPMDShardToFullShapeWithConstantTest) {
constexpr absl::string_view kHloString = R"(
HloModule rng_bit_generator
add.6.clone {
y.13 = bf16[]{:T(256)} parameter(1)
x.13 = bf16[]{:T(256)} parameter(0)
ROOT add.9011 = bf16[]{:T(256)} add(x.13, y.13)
}
ENTRY main {
input.1 = bf16[512,512]{1,0} parameter(0)
constant.1 = bf16[] constant(16.7)
broadcast.1 = bf16[128,128]{1,0} broadcast(constant.1), dimensions={}
broadcast.2 = bf16[512,512]{1,0} broadcast(constant.1), dimensions={}
custom-call.1 = bf16[512,512]{1,0} custom-call(input.1), custom_call_target="Sharding", sharding={devices=[4,4]<=[16]}
custom-call.2 = bf16[128,128]{1,0} custom-call(custom-call.1), custom_call_target="SPMDFullToShardShape", sharding={manual}
all-reduce.1 = bf16[128,128]{1,0} all-reduce(custom-call.2), channel_id=621, replica_groups={{0,1,2,3},{4,5,6,7},{8,9,10,11},{12,13,14,15}}, use_global_device_ids=true, to_apply=add.6.clone, frontend_attributes={from-cross-replica-sharding="true"}, backend_config={"flag_configs":[],"barrier_config":{"barrier_type":"CUSTOM","id":"9"},"scoped_memory_configs":[],"compute_type":"COMPUTE_TYPE_DEFAULT","device_type":"DEVICE_TYPE_INVALID","used_scoped_memory_configs":[]}
add.1 = bf16[128,128]{1,0} add(bf16[128,128]{1,0} all-reduce.1, bf16[128,128]{1,0} broadcast.1)
custom-call.3 = bf16[512,512]{1,0} custom-call(add.1), custom_call_target="SPMDShardToFullShape", sharding={devices=[4,1,4]<=[16]last_tile_dim_replicate}
add.2 = bf16[512,512]{1,0} add(bf16[512,512]{1,0} custom-call.3, bf16[512,512]{1,0} broadcast.2)
ROOT copy.1 = bf16[512,512]{1,0} copy(add.2)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kRemoveAllShardings;
option.enable = true;
option.device_mesh_shape = {4, 4};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {1.0, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* custom_call2 =
FindInstruction(module.get(), "custom-call.2");
ASSERT_NE(custom_call2, nullptr);
EXPECT_THAT(custom_call2, op::Sharding("{manual}"));
const HloInstruction* custom_call3 =
FindInstruction(module.get(), "custom-call.3");
ASSERT_NE(custom_call3, nullptr);
EXPECT_THAT(custom_call3,
op::Sharding("{devices=[4,1,4]<=[16]last_tile_dim_replicate}"));
const HloInstruction* custom_call1 = custom_call2->operand(0);
ASSERT_NE(custom_call1, nullptr);
EXPECT_THAT(custom_call1, op::Sharding("{devices=[4,4]<=[16]}"));
std::vector<const HloInstruction*> instructions(
module->entry_computation()->instructions().begin(),
module->entry_computation()->instructions().end());
EXPECT_THAT(
module->entry_computation()->instructions(),
Contains(ResultOf(
"opcode",
[](const HloInstruction* ins) { return ins->opcode(); },
Eq(HloOpcode::kConstant)))
.Times(2));
}
TEST_F(AutoShardingTest, SPMDShardToFullShapeMultipleValidMeshShapeTest) {
constexpr absl::string_view kHloString = R"(
HloModule rng_bit_generator
add.6.clone {
y.13 = bf16[]{:T(256)} parameter(1)
x.13 = bf16[]{:T(256)} parameter(0)
ROOT add.9011 = bf16[]{:T(256)} add(x.13, y.13)
}
ENTRY main {
input.1 = bf16[512,512]{1,0} parameter(0)
custom-call.1 = bf16[512,512]{1,0} custom-call(input.1), custom_call_target="Sharding", sharding={devices=[4,4]<=[16]}
custom-call.2 = bf16[128,128]{1,0} custom-call(custom-call.1), custom_call_target="SPMDFullToShardShape", sharding={manual}
all-reduce.1 = bf16[128,128]{1,0} all-reduce(custom-call.2), channel_id=621, replica_groups={{0,1,2,3},{4,5,6,7},{8,9,10,11},{12,13,14,15}}, use_global_device_ids=true, to_apply=add.6.clone, frontend_attributes={from-cross-replica-sharding="true"}, backend_config={"flag_configs":[],"barrier_config":{"barrier_type":"CUSTOM","id":"9"},"scoped_memory_configs":[],"compute_type":"COMPUTE_TYPE_DEFAULT","device_type":"DEVICE_TYPE_INVALID","used_scoped_memory_configs":[]}
reshape.1 = bf16[64,2,128]{2,1,0} reshape(bf16[128,128]{1,0} all-reduce.1)
reshape.2 = bf16[64,256]{1,0} reshape(bf16[64,2,128]{2,1,0} reshape.1)
custom-call.3 = bf16[512,512]{1,0} custom-call(reshape.2), custom_call_target="SPMDShardToFullShape", sharding={devices=[8,2]<=[16]}
ROOT copy.1 = copy(custom-call.3)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kRemoveAllShardings;
option.enable = true;
option.try_multiple_mesh_shapes = false;
option.device_mesh_shape = {4, 4};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {1.0, 1.0};
EXPECT_DEATH(auto status = AutoSharding(option).Run(module.get()),
"Auto-sharding cannot infer a single appropriate mesh shape for "
"this HLO, and AutoShardingption::try_multiple_mesh_shapes is "
"set to false. Please re-run with the option set to true.");
}
TEST_F(AutoShardingTest, RngBitGeneratorTupleInput) {
constexpr absl::string_view kHloString = R"(
HloModule rng_bit_generator
ENTRY %RngBitGenerator {
param.0 = u32[2]{0:T(128)} parameter(0)
param.1 = u32[2]{0:T(128)} parameter(1)
tuple.3 = (u32[2]{0:T(128)}, u32[2]{0:T(128)}) tuple(param.0, param.1)
ROOT rng-bit-generator = u32[100,100]{1,0:T(8,128)} rng-bit-generator(tuple.3), algorithm=rng_default
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* param0 = FindInstruction(module.get(), "param.0");
const HloInstruction* param1 = FindInstruction(module.get(), "param.1");
ASSERT_NE(param0, nullptr);
ASSERT_NE(param0, nullptr);
EXPECT_THAT(param0, op::Sharding("{replicated}"));
EXPECT_THAT(param1, op::Sharding("{replicated}"));
}
TEST_F(AutoShardingTest, DotMixedMeshStrategies) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry {
%param0 = f32[8192,23]{1,0} parameter(0), sharding={devices=[4,1]0,1,2,3}
%param1 = f32[23,23]{1,0} parameter(1)
%dot = f32[8192,23]{1,0} dot(%param0, %param1), lhs_contracting_dims={1}, rhs_contracting_dims={1}
ROOT %copy = f32[8192,23]{1,0} copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
option.solve_nd_sharding_iteratively = false;
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(2) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* param0 = FindInstruction(module.get(), "param0");
const HloInstruction* param1 = FindInstruction(module.get(), "param1");
const HloInstruction* dot = FindInstruction(module.get(), "dot");
ASSERT_NE(param0, nullptr);
ASSERT_NE(param1, nullptr);
ASSERT_NE(dot, nullptr);
EXPECT_THAT(param0, op::Sharding("{devices=[4,1]0,1,2,3}"));
EXPECT_THAT(param1, op::Sharding("{replicated}"));
EXPECT_THAT(dot, AnyOf(op::Sharding("{devices=[4,1]0,1,2,3}"),
op::Sharding("{devices=[2,2]<=[4]}")));
}
TEST_F(AutoShardingTest, DotInsertReshardingReshapes) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry {
%param0 = f32[256,256]{1,0} parameter(0), sharding={devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}
%param1 = f32[256,256]{1,0} parameter(1), sharding={devices=[2,2]0,1,2,3}
%dot = f32[256,256]{1,0} dot(%param0, %param1), lhs_contracting_dims={1}, rhs_contracting_dims={1}, sharding={devices=[2,2]0,1,2,3}
ROOT %copy = f32[256,256]{1,0} copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(2) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* param0 = FindInstruction(module.get(), "param0");
const HloInstruction* param1 = FindInstruction(module.get(), "param1");
const HloInstruction* dot = FindInstruction(module.get(), "dot");
ASSERT_NE(param0, nullptr);
ASSERT_NE(param1, nullptr);
ASSERT_NE(dot, nullptr);
EXPECT_EQ(dot->operand(0), param0);
EXPECT_NE(dot->operand(1), param1);
}
TEST_F(AutoShardingTest, DotLHSTwoNonContractingDims) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4,256,64]{2,1,0} parameter(0)
%param1 = f32[64,32]{0,1} parameter(1)
%dot = f32[4,256,32]{2,1,0} dot(f32[4,256,64]{2,1,0} %param0, f32[64,32]{0,1} %param1), lhs_contracting_dims={2}, rhs_contracting_dims={0}
ROOT %copy = f32[4,256,32]{2,1,0} copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
option.allow_mixed_mesh_shape = false;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(2) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* param0 = FindInstruction(module.get(), "param0");
const HloInstruction* param1 = FindInstruction(module.get(), "param1");
const HloInstruction* dot = FindInstruction(module.get(), "dot");
ASSERT_NE(param0, nullptr);
ASSERT_NE(param1, nullptr);
ASSERT_NE(dot, nullptr);
EXPECT_THAT(
std::make_tuple(param0, param1, dot),
AnyOf(
FieldsAre(
op::Sharding(
"{devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[1,2,2]0,2,1,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[1,2,2]0,1,2,3}")),
FieldsAre(
op::Sharding(
"{devices=[1,2,1,2]0,2,1,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[1,2,2]0,2,1,3}")),
FieldsAre(
op::Sharding(
"{devices=[2,1,1,2]0,1,2,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[1,2,2]0,2,1,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[2,1,2]0,1,2,3}")),
FieldsAre(
op::Sharding(
"{devices=[2,1,1,2]0,2,1,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[2,1,2]0,2,1,3}"))));
}
TEST_F(AutoShardingTest, DotRHSTwoNonContractingDims) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4,256,32]{2,1,0} parameter(0)
%param1 = f32[4,256,4,8]{1,3,2,0} parameter(1)
%dot = f32[32,4,8]{2,1,0} dot(f32[4,256,32]{2,1,0} %param0, f32[4,256,4,8]{1,3,2,0} %param1), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}
ROOT %copy = f32[32,4,8]{2,1,0} copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
option.allow_mixed_mesh_shape = false;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(2) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* param0 = FindInstruction(module.get(), "param0");
const HloInstruction* param1 = FindInstruction(module.get(), "param1");
const HloInstruction* dot = FindInstruction(module.get(), "dot");
ASSERT_NE(param0, nullptr);
ASSERT_NE(param1, nullptr);
ASSERT_NE(dot, nullptr);
EXPECT_THAT(
std::make_tuple(param0, param1, dot),
AnyOf(
FieldsAre(op::Sharding(
"{devices=[1,1,2,2]0,1,2,3 last_tile_dim_replicate}"),
op::Sharding(
"{devices=[1,1,2,1,2]0,2,1,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[2,2,1]0,1,2,3}")),
FieldsAre(op::Sharding(
"{devices=[1,1,2,2]0,1,2,3 last_tile_dim_replicate}"),
op::Sharding(
"{devices=[1,1,1,2,2]0,2,1,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[2,1,2]0,1,2,3}")),
FieldsAre(op::Sharding(
"{devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate}"),
op::Sharding(
"{devices=[1,1,1,2,2]0,1,2,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[2,1,2]0,2,1,3}")),
FieldsAre(op::Sharding(
"{devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate}"),
op::Sharding(
"{devices=[1,1,2,1,2]0,1,2,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[2,2,1]0,2,1,3}"))));
}
TEST_F(AutoShardingTest, DotTwoContractingDims) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4,256,64]{2,1,0} parameter(0)
%param1 = f32[4,256,32]{2,1,0} parameter(1)
%dot = f32[64,32]{1,0} dot(f32[4,256,64]{2,1,0} %param0, f32[4,256,32]{2,1,0} %param1), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}
ROOT %copy = f32[64,32]{1,0} copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
option.allow_mixed_mesh_shape = false;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(2) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* param0 = FindInstruction(module.get(), "param0");
const HloInstruction* param1 = FindInstruction(module.get(), "param1");
const HloInstruction* dot = FindInstruction(module.get(), "dot");
ASSERT_NE(param0, nullptr);
ASSERT_NE(param1, nullptr);
ASSERT_NE(dot, nullptr);
EXPECT_THAT(
std::make_tuple(param0, param1, dot),
AnyOf(FieldsAre(op::Sharding(
"{devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate}"),
op::Sharding(
"{devices=[1,1,2,2]0,1,2,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[2,2]0,2,1,3}")),
FieldsAre(op::Sharding(
"{devices=[1,1,2,2]0,1,2,3 last_tile_dim_replicate}"),
op::Sharding(
"{devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[2,2]0,1,2,3}"))));
}
TEST_F(AutoShardingTest, TwoMatmulWithoutDotReplicationEnabled) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY twomatmul {
parameter.1 = f32[64,64]{1,0} parameter(0)
parameter.2 = f32[64,128]{1,0} parameter(1)
dot.4 = f32[64,128]{1,0} dot(parameter.1, parameter.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
parameter.3 = f32[128,64]{1,0} parameter(2)
ROOT dot.5 = f32[64,64]{1,0} dot(dot.4, parameter.3), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.allow_recompute_heavy_op = false;
option.allow_mixed_mesh_shape = false;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* param1 = FindInstruction(module.get(), "parameter.1");
ASSERT_NE(param1, nullptr);
EXPECT_THAT(param1,
op::Sharding("{devices=[2,1,2]0,2,1,3 last_tile_dim_replicate}"));
const HloInstruction* param2 = FindInstruction(module.get(), "parameter.2");
ASSERT_NE(param2, nullptr);
EXPECT_THAT(param2,
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}"));
const HloInstruction* param3 = FindInstruction(module.get(), "parameter.3");
ASSERT_NE(param3, nullptr);
EXPECT_THAT(param3,
op::Sharding("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}"));
const HloInstruction* dot4 = FindInstruction(module.get(), "dot.4");
ASSERT_NE(dot4, nullptr);
EXPECT_THAT(dot4, op::Sharding("{devices=[2,2]0,2,1,3}"));
const HloInstruction* dot5 = FindInstruction(module.get(), "dot.5");
ASSERT_NE(dot5, nullptr);
EXPECT_THAT(dot5,
op::Sharding("{devices=[2,1,2]0,2,1,3 last_tile_dim_replicate}"));
}
TEST_F(AutoShardingTest, TwoMatmulWithDotReplicationEnabled) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY twomatmul {
parameter.1 = f32[64,64]{1,0} parameter(0)
parameter.2 = f32[64,128]{1,0} parameter(1)
dot.4 = f32[64,128]{1,0} dot(parameter.1, parameter.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
parameter.3 = f32[128,64]{1,0} parameter(2)
ROOT dot.5 = f32[64,64]{1,0} dot(dot.4, parameter.3), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.allow_recompute_heavy_op = true;
option.allow_mixed_mesh_shape = false;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* param1 = FindInstruction(module.get(), "parameter.1");
const HloInstruction* param2 = FindInstruction(module.get(), "parameter.2");
const HloInstruction* param3 = FindInstruction(module.get(), "parameter.3");
const HloInstruction* dot4 = FindInstruction(module.get(), "dot.4");
const HloInstruction* dot5 = FindInstruction(module.get(), "dot.5");
ASSERT_NE(param1, nullptr);
ASSERT_NE(param2, nullptr);
ASSERT_NE(param3, nullptr);
ASSERT_NE(dot4, nullptr);
ASSERT_NE(dot5, nullptr);
EXPECT_THAT(
std::make_tuple(param1, param2, param3, dot4, dot5),
AnyOf(
FieldsAre(
op::Sharding("{replicated}"), op::Sharding("{replicated}"),
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}"),
op::Sharding("{replicated}"),
op::Sharding("{devices=[2,2]0,2,1,3}")),
FieldsAre(
op::Sharding("{replicated}"), op::Sharding("{replicated}"),
op::Sharding("{devices=[1,2,2]0,2,1,3 last_tile_dim_replicate}"),
op::Sharding("{replicated}"),
op::Sharding("{devices=[2,2]0,1,2,3}"))));
}
TEST_F(AutoShardingTest, ProcessCustomCallShardings) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry {
%param0 = f32[6,3] parameter(0)
%copy = f32[6,3] copy(%param0)
%annotate = f32[6,3] custom-call(%copy), custom_call_target="Sharding",
backend_config="unspecified_dims=[1]",
sharding={devices=[2,1,2]0,2,1,3 last_tile_dim_replicate}
%copy.2 = f32[6,3] copy(%annotate)
ROOT %copy.3 = f32[6,3] copy(%copy.2)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
EXPECT_TRUE(changed);
auto* copy = FindInstruction(module.get(), "copy");
ASSERT_NE(copy, nullptr);
EXPECT_TRUE(copy->has_sharding());
EXPECT_THAT(copy,
op::Sharding("{devices=[2,1,2]0,2,1,3 last_tile_dim_replicate}"));
}
TEST_F(AutoShardingTest, SaveAndRemoveShardingAnnotationKeepAll) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry (param0: f32[4,256,64], param1: f32[4,256,32]) -> f32[64,32] {
%param0 = f32[4,256,64]{2,1,0} parameter(0), sharding={devices=[1,1,2,2]0,1,2,3 last_tile_dim_replicate}
%param1 = f32[4,256,32]{2,1,0} parameter(1), sharding={devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate}
%dot = f32[64,32]{1,0} dot(f32[4,256,64]{2,1,0} %param0, f32[4,256,32]{2,1,0} %param1), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}, sharding={devices=[2,2]0,1,2,3}
ROOT %copy = f32[64,32]{1,0} copy(f32[64,32]{1,0} %dot), sharding={devices=[2,2]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings;
absl::flat_hash_set<const HloInstruction*> instructions_to_shard(
module->entry_computation()->instructions().begin(),
module->entry_computation()->instructions().end());
TF_ASSERT_OK_AND_ASSIGN(
AutoShardingImplementation::SaveShardingAnnotationsResult
saved_shardings_result,
AutoShardingImplementation(option).SaveAndRemoveShardingAnnotation(
module.get(), instructions_to_shard,
{},
{}));
absl::flat_hash_map<std::string, std::vector<HloSharding>> saved_shardings =
saved_shardings_result.preserved_shardings;
EXPECT_FALSE(saved_shardings_result.module_is_changed);
std::vector<HloInstruction*> instructions =
module->entry_computation()->MakeInstructionPostOrder();
EXPECT_THAT(instructions,
Each(ResultOf(
[](const HloInstruction* ins) { return ins->has_sharding(); },
IsTrue())));
auto verified_parse_sharding = [](const absl::string_view sharding_str) {
absl::StatusOr<HloSharding> sharding = ParseSharding(sharding_str);
CHECK_OK(sharding);
return *sharding;
};
EXPECT_THAT(
saved_shardings,
UnorderedElementsAre(
Pair("param0",
ElementsAre(verified_parse_sharding(
"{devices=[1,1,2,2]0,1,2,3 last_tile_dim_replicate}"))),
Pair("param1",
ElementsAre(verified_parse_sharding(
"{devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate}"))),
Pair("dot",
ElementsAre(verified_parse_sharding("{devices=[2,2]0,1,2,3}"))),
Pair("copy", ElementsAre(verified_parse_sharding(
"{devices=[2,2]0,1,2,3}")))));
}
TEST_F(AutoShardingTest,
SaveAndRemoveShardingAnnotationKeepInputOutputSmallTensor) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry (param0: f32[4,256,64], param1: f32[4,256,32]) -> f32[64,32] {
%param0 = f32[4,256,64]{2,1,0} parameter(0), sharding={devices=[2,2,1]0,1,2,3}
%param1 = f32[4,256,32]{2,1,0} parameter(1), sharding={devices=[2,2,1]0,1,2,3}
%dot = f32[64,32]{1,0} dot(f32[4,256,64]{2,1,0} %param0, f32[4,256,32]{2,1,0} %param1), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}, sharding={replicated}
ROOT %copy = f32[64,32]{1,0} copy(f32[64,32]{1,0} %dot), sharding={devices=[2,2]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepInputOutputShardings;
absl::flat_hash_set<const HloInstruction*> instructions_to_shard(
module->entry_computation()->instructions().begin(),
module->entry_computation()->instructions().end());
TF_ASSERT_OK_AND_ASSIGN(
AutoShardingImplementation::SaveShardingAnnotationsResult
saved_shardings_result,
AutoShardingImplementation(option).SaveAndRemoveShardingAnnotation(
module.get(), instructions_to_shard,
{"dot"},
{}));
absl::flat_hash_map<std::string, std::vector<HloSharding>> saved_shardings =
saved_shardings_result.preserved_shardings;
EXPECT_FALSE(saved_shardings_result.module_is_changed);
std::vector<HloInstruction*> instructions =
module->entry_computation()->MakeInstructionPostOrder();
EXPECT_THAT(instructions,
Each(ResultOf(
[](const HloInstruction* ins) { return ins->has_sharding(); },
IsTrue())));
auto verified_parse_sharding = [](const absl::string_view sharding_str) {
absl::StatusOr<HloSharding> sharding = ParseSharding(sharding_str);
CHECK_OK(sharding);
return *sharding;
};
EXPECT_THAT(
saved_shardings,
UnorderedElementsAre(
Pair("param0", ElementsAre(verified_parse_sharding(
"{devices=[2,2,1]0,1,2,3}"))),
Pair("param1", ElementsAre(verified_parse_sharding(
"{devices=[2,2,1]0,1,2,3}"))),
Pair("dot", ElementsAre(verified_parse_sharding("{replicated}"))),
Pair("copy", ElementsAre(verified_parse_sharding(
"{devices=[2,2]0,1,2,3}")))));
}
TEST_F(AutoShardingTest, SaveAndRemoveShardingAnnotationKeepInputOutput) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry (param0: f32[4,256,64], param1: f32[4,256,32]) -> f32[64,32] {
%param0 = f32[4,256,64]{2,1,0} parameter(0), sharding={devices=[1,1,2,2]0,1,2,3 last_tile_dim_replicate}
%param1 = f32[4,256,32]{2,1,0} parameter(1), sharding={devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate}
%param0_copy = f32[4,256,64]{2,1,0} copy(param0), sharding={devices=[1,1,2,2]0,1,2,3 last_tile_dim_replicate}
%param1_copy = f32[4,256,32]{2,1,0} copy(param1), sharding={devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate}
%dot = f32[64,32]{1,0} dot(f32[4,256,64]{2,1,0} %param0_copy, f32[4,256,32]{2,1,0} %param1_copy), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}, sharding={devices=[2,2]0,1,2,3}
ROOT %copy = f32[64,32]{1,0} copy(f32[64,32]{1,0} %dot), sharding={devices=[2,2]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepInputOutputShardings;
absl::flat_hash_set<const HloInstruction*> instructions_to_shard(
module->entry_computation()->instructions().begin(),
module->entry_computation()->instructions().end());
TF_ASSERT_OK_AND_ASSIGN(
AutoShardingImplementation::SaveShardingAnnotationsResult
saved_shardings_result,
AutoShardingImplementation(option).SaveAndRemoveShardingAnnotation(
module.get(), instructions_to_shard,
{},
{}));
absl::flat_hash_map<std::string, std::vector<HloSharding>> saved_shardings =
saved_shardings_result.preserved_shardings;
EXPECT_TRUE(saved_shardings_result.module_is_changed);
const HloInstruction* dot = FindInstruction(module.get(), "dot");
ASSERT_NE(dot, nullptr);
EXPECT_FALSE(dot->has_sharding());
const HloInstruction* param0 = FindInstruction(module.get(), "param0");
ASSERT_NE(param0, nullptr);
EXPECT_TRUE(param0->has_sharding());
EXPECT_THAT(
param0,
op::Sharding("{devices=[1,1,2,2]0,1,2,3 last_tile_dim_replicate}"));
const HloInstruction* param0_copy =
FindInstruction(module.get(), "param0_copy");
ASSERT_NE(param0_copy, nullptr);
EXPECT_TRUE(param0_copy->has_sharding());
EXPECT_THAT(
param0_copy,
op::Sharding("{devices=[1,1,2,2]0,1,2,3 last_tile_dim_replicate}"));
const HloInstruction* param1 = FindInstruction(module.get(), "param1");
ASSERT_NE(param1, nullptr);
EXPECT_TRUE(param1->has_sharding());
EXPECT_THAT(
param1,
op::Sharding("{devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate}"));
const HloInstruction* param1_copy =
FindInstruction(module.get(), "param1_copy");
ASSERT_NE(param1_copy, nullptr);
EXPECT_TRUE(param1_copy->has_sharding());
EXPECT_THAT(
param1_copy,
op::Sharding("{devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate}"));
const HloInstruction* copy = FindInstruction(module.get(), "copy");
ASSERT_NE(copy, nullptr);
EXPECT_TRUE(copy->has_sharding());
EXPECT_THAT(copy, op::Sharding("{devices=[2,2]0,1,2,3}"));
EXPECT_THAT(
saved_shardings,
UnorderedElementsAre(Pair("param0", ElementsAre(param0->sharding())),
Pair("param0_copy", ElementsAre(param0->sharding())),
Pair("param1", ElementsAre(param1->sharding())),
Pair("param1_copy", ElementsAre(param1->sharding())),
Pair("copy", ElementsAre(copy->sharding()))));
}
TEST_F(AutoShardingTest, SaveAndRemoveShardingAnnotationRemoveAll) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry (param0: f32[4,256,64], param1: f32[4,256,32]) -> f32[64,32] {
%param0 = f32[4,256,64]{2,1,0} parameter(0),
sharding={devices=[1,1,2,2]0,1,2,3 last_tile_dim_replicate} %param1 =
f32[4,256,32]{2,1,0} parameter(1), sharding={devices=[1,1,2,2]0,2,1,3
last_tile_dim_replicate} %dot = f32[64,32]{1,0} dot(f32[4,256,64]{2,1,0}
%param0, f32[4,256,32]{2,1,0} %param1), lhs_contracting_dims={0,1},
rhs_contracting_dims={0,1}, sharding={devices=[2,2]0,1,2,3} ROOT %copy =
f32[64,32]{1,0} copy(f32[64,32]{1,0} %dot), sharding={devices=[2,2]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kRemoveAllShardings;
absl::flat_hash_set<const HloInstruction*> instructions_to_shard(
module->entry_computation()->instructions().begin(),
module->entry_computation()->instructions().end());
TF_ASSERT_OK_AND_ASSIGN(
AutoShardingImplementation::SaveShardingAnnotationsResult
saved_shardings_result,
AutoShardingImplementation(option).SaveAndRemoveShardingAnnotation(
module.get(), instructions_to_shard,
{},
{}));
absl::flat_hash_map<std::string, std::vector<HloSharding>> saved_shardings =
saved_shardings_result.preserved_shardings;
EXPECT_TRUE(saved_shardings_result.module_is_changed);
EXPECT_THAT(saved_shardings, IsEmpty());
std::vector<HloInstruction*> instructions =
module->entry_computation()->MakeInstructionPostOrder();
EXPECT_THAT(instructions,
Each(ResultOf(
[](const HloInstruction* ins) { return ins->has_sharding(); },
IsFalse())));
}
TEST_F(AutoShardingTest, SaveAndRemoveShardingAnnotationRemoveAllSmallTensor) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry (param0: f32[4,256,64], param1: f32[4,256,32]) -> f32[64,32] {
%param0 = f32[4,256,64]{2,1,0} parameter(0), sharding={devices=[2,2,1]0,1,2,3}
%param1 = f32[4,256,32]{2,1,0} parameter(1), sharding={devices=[2,2,1]0,1,2,3}
%dot = f32[64,32]{1,0} dot(f32[4,256,64]{2,1,0} %param0, f32[4,256,32]{2,1,0} %param1), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}, sharding={replicated}
ROOT %copy = f32[64,32]{1,0} copy(f32[64,32]{1,0} %dot), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kRemoveAllShardings;
absl::flat_hash_set<const HloInstruction*> instructions_to_shard(
module->entry_computation()->instructions().begin(),
module->entry_computation()->instructions().end());
TF_ASSERT_OK_AND_ASSIGN(
AutoShardingImplementation::SaveShardingAnnotationsResult
saved_shardings_result,
AutoShardingImplementation(option).SaveAndRemoveShardingAnnotation(
module.get(), instructions_to_shard,
{"dot", "copy"},
{}));
absl::flat_hash_map<std::string, std::vector<HloSharding>> saved_shardings =
saved_shardings_result.preserved_shardings;
EXPECT_TRUE(saved_shardings_result.module_is_changed);
const HloInstruction* param0 = FindInstruction(module.get(), "param0");
ASSERT_NE(param0, nullptr);
EXPECT_FALSE(param0->has_sharding());
const HloInstruction* param1 = FindInstruction(module.get(), "param1");
ASSERT_NE(param1, nullptr);
EXPECT_FALSE(param1->has_sharding());
const HloInstruction* dot = FindInstruction(module.get(), "dot");
ASSERT_NE(dot, nullptr);
EXPECT_TRUE(dot->has_sharding());
EXPECT_TRUE(dot->sharding().IsReplicated());
const HloInstruction* copy = FindInstruction(module.get(), "copy");
ASSERT_NE(copy, nullptr);
EXPECT_TRUE(copy->has_sharding());
EXPECT_TRUE(copy->sharding().IsReplicated());
EXPECT_THAT(
saved_shardings,
UnorderedElementsAre(Pair("dot", ElementsAre(dot->sharding())),
Pair("copy", ElementsAre(copy->sharding()))));
}
TEST_F(AutoShardingTest, TupleReduceTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
%func (lhs_value: f32[], lhs_index: s32[], rhs_value: f32[], rhs_index: s32[]) -> (f32[], s32[]) {
%lhs_value = f32[] parameter(0)
%rhs_value = f32[] parameter(2)
%compare.a = pred[] compare(f32[] %lhs_value, f32[] %rhs_value), direction=GE
%select.a = f32[] select(pred[] %compare.a, f32[] %lhs_value, f32[] %rhs_value)
%compare.b = pred[] compare(f32[] %lhs_value, f32[] %rhs_value), direction=EQ
%lhs_index = s32[] parameter(1)
%rhs_index = s32[] parameter(3)
%minimum = s32[] minimum(s32[] %lhs_index, s32[] %rhs_index)
%select.b = s32[] select(pred[] %compare.a, s32[] %lhs_index, s32[] %rhs_index)
%select.c = s32[] select(pred[] %compare.b, s32[] %minimum, s32[] %select.b)
ROOT %tuple = (f32[], s32[]) tuple(f32[] %select.a, s32[] %select.c)
}
ENTRY %entry {
%param0 = f32[1,16,40]{2,1,0} parameter(0)
%iota = s32[1,16,40]{2,1,0} iota(), iota_dimension=2
%constant.a = f32[] constant(-inf)
%constant.b = s32[] constant(0)
%reduce = (f32[1,16]{1,0}, s32[1,16]{1,0}) reduce(f32[1,16,40]{2,1,0} %param0, s32[1,16,40]{2,1,0} %iota, f32[] %constant.a, s32[] %constant.b), dimensions={2}, to_apply=%func
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
EXPECT_TRUE(changed);
const HloInstruction* reduce = FindInstruction(module.get(), "reduce");
ASSERT_NE(reduce, nullptr);
EXPECT_THAT(
reduce,
AnyOf(op::Sharding("{{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}, "
"{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}}"),
op::Sharding("{{devices=[1,2,2]0,2,1,3 last_tile_dim_replicate}, "
"{devices=[1,2,2]0,2,1,3 last_tile_dim_replicate}}"),
op::Sharding("{{devices=[1,4]0,1,2,3}, "
"{devices=[1,4]0,1,2,3}}")));
const HloSharding& sharding = reduce->sharding();
TF_EXPECT_OK(sharding.Validate(reduce->shape(), 4));
}
TEST_F(AutoShardingTest, ReduceTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
%func (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %x, f32[] %y)
}
ENTRY %entry {
%param0 = f32[1,16,128]{2,1,0} parameter(0)
%param1 = f32[] parameter(1)
%reduce = f32[1,16]{1,0} reduce(f32[1,16,128]{2,1,0} %param0, f32[] %param1), dimensions={2}, to_apply=%func
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
EXPECT_TRUE(changed);
const HloInstruction* reduce = FindInstruction(module.get(), "reduce");
const HloInstruction* param0 = FindInstruction(module.get(), "param0");
ASSERT_NE(reduce, nullptr);
auto reduce_matcher1 =
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}");
auto param0_matcher1 =
op::Sharding("{devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate}");
auto reduce_matcher2 =
op::Sharding("{devices=[1,2,2]0,2,1,3 last_tile_dim_replicate}");
auto param0_matcher2 =
op::Sharding("{devices=[1,2,1,2]0,2,1,3 last_tile_dim_replicate}");
auto reduce_matcher3 = op::Sharding("{devices=[1,4]0,1,2,3}");
auto param0_matcher3 = op::Sharding("{devices=[1,4,1]0,1,2,3}");
EXPECT_TRUE(
(Matches(param0_matcher1)(param0) && Matches(reduce_matcher1)(reduce)) ||
(Matches(param0_matcher2)(param0) && Matches(reduce_matcher2)(reduce)) ||
(Matches(param0_matcher3)(param0) && Matches(reduce_matcher3)(reduce)));
const HloSharding& sharding = reduce->sharding();
TF_EXPECT_OK(sharding.Validate(reduce->shape(), 4));
}
TEST_F(AutoShardingTest, ScatterTest2D) {
constexpr absl::string_view kHloString = R"(
HloModule module
region {
Arg_0 = s32[] parameter(0)
ROOT Arg_1 = s32[] parameter(1)
}
ENTRY %Scatter {
call = s32[4,128]{1,0} parameter(0)
clamp = s32[4,2]{1,0} parameter(1)
broadcast = s32[4,8]{1,0} parameter(2)
ROOT scatter = s32[4,128]{1,0} scatter(call, clamp, broadcast), update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=1, indices_are_sorted=true, unique_indices=true, to_apply=region
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
option.memory_budget_per_device = 1185;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_EQ(scatter->sharding().NumTiles(), 4);
TF_EXPECT_OK(scatter->sharding().Validate(scatter->shape(), 4));
}
TEST_F(AutoShardingTest, ScatterTest3D) {
constexpr absl::string_view kHloString = R"(
HloModule module
region {
Arg_0 = f32[] parameter(0)
ROOT Arg_1 = f32[] parameter(1)
}
ENTRY %Scatter {
call = f32[4,128,128]{2,1,0} parameter(0)
clamp = s32[4,3]{1,0} parameter(1)
multiply = f32[4,8,8]{2,1,0} parameter(2)
ROOT scatter = f32[4,128,128]{2,1,0} scatter(call, clamp, multiply), update_window_dims={1,2}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0,1,2}, index_vector_dim=1, indices_are_sorted=true, unique_indices=true, to_apply=region
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
option.memory_budget_per_device = 4 * 2 * (4 * 128 * 128 / 4) + 48 + 1024 + 1;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_EQ(scatter->sharding().NumTiles(), 4);
TF_EXPECT_OK(scatter->sharding().Validate(scatter->shape(), 4));
}
TEST_F(AutoShardingTest, GatherTest) {
const char* const hlo_string = R"(
HloModule module
ENTRY %module {
parameter.0 = s32[262144,2]{1,0} parameter(0), sharding={devices=[16,1,16]<=[256] last_tile_dim_replicate}
parameter.1 = f32[512,712,4096]{2,1,0} parameter(1), sharding={devices=[16,1,16]<=[256]}
ROOT gather = f32[262144,4096]{1,0} gather(parameter.1, parameter.0), offset_dims={1}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=1, slice_sizes={1,1,4096}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {16, 16};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(0) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* gather = FindInstruction(module.get(), "gather");
ASSERT_NE(gather, nullptr);
EXPECT_THAT(gather, op::Sharding("{devices=[16,16]<=[256]}"));
}
TEST_F(AutoShardingTest, GatherTest2) {
const char* const hlo_string = R"(
HloModule module
ENTRY %module {
data = f32[1000]{0} parameter(0), sharding={replicated}
indices = s32[512,1280,8,1]{3,2,1,0} parameter(1), sharding={devices=[256,1,1,1]<=[256]}
ROOT gather = f32[512,1280,8,1]{3,2,1,0} gather(data, indices), offset_dims={3}, collapsed_slice_dims={}, start_index_map={0}, index_vector_dim=3, slice_sizes={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {256, 1};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(0) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* gather = FindInstruction(module.get(), "gather");
ASSERT_NE(gather, nullptr);
EXPECT_THAT(gather, op::Sharding("{devices=[256,1,1,1]<=[256]}"));
}
TEST_F(AutoShardingTest, GatherTestNoReshard) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry {
data = s8[1000,128]{1,0} parameter(0)
indices = s32[8,1,1]{2,1,0} parameter(1)
gather = s8[8,1,128]{2,1,0} gather(data, indices), offset_dims={2}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=2, slice_sizes={1,128}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {1, 1, 8};
option.device_mesh_ids = {0, 1, 2, 3, 4, 5, 6, 7};
option.device_mesh_alpha = {1.0, 1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* gather = FindInstruction(module.get(), "gather");
const HloInstruction* data = FindInstruction(module.get(), "data");
ASSERT_NE(gather, nullptr);
ASSERT_NE(data, nullptr);
EXPECT_THAT(gather, AnyOf(op::Sharding("{devices=[1,1,8]<=[8]}"),
op::Sharding("{devices=[8,1,1]<=[8]}")));
EXPECT_THAT(data, AnyOf(op::Sharding("{devices=[1,8]<=[8]}"),
op::Sharding("{devices=[8,1]<=[8]}")));
TF_EXPECT_OK(gather->sharding().Validate(gather->shape(), 8));
EXPECT_EQ(data, gather->operand(0));
}
TEST_F(AutoShardingTest, GatherConvTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry {
%param0 = f32[1024,1024]{0,1} parameter(0)
%param1 = s32[128,1024,1]{2,1,0} parameter(1)
%gather = f32[128,1024,1024]{2,1,0} gather(f32[1024,1024]{0,1} %param0, s32[128,1024,1]{2,1,0} %param1), offset_dims={2}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=2, slice_sizes={1,1024}
%param2 = f32[1024,1024]{1,0} parameter(2), sharding={replicated}
%reshape = f32[1024,1024,1]{2,1,0} reshape(param2)
ROOT convolution = f32[128,1024,1024]{2,1,0} convolution(gather, reshape), window={size=1}, dim_labels=b0f_io0->b0f
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepInputOutputShardings;
option.device_mesh_shape = {4, 1};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
EXPECT_TRUE(changed);
const HloInstruction* gather = FindInstruction(module.get(), "gather");
const HloInstruction* conv = FindInstruction(module.get(), "convolution");
ASSERT_NE(gather, nullptr);
ASSERT_NE(conv, nullptr);
const HloSharding& gather_sharding = gather->sharding();
EXPECT_EQ(gather_sharding.NumTiles(), 4);
EXPECT_OK(gather_sharding.Validate(gather->shape(), 4));
const HloSharding& conv_sharding = conv->sharding();
EXPECT_EQ(conv_sharding.NumTiles(), 4);
EXPECT_OK(conv_sharding.Validate(conv->shape(), 4));
}
TEST_F(AutoShardingTest, AutoShardingKeepUserShardingInputOutput) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry (param0: f32[4,256,64], param1: f32[4,256,32]) -> f32[64,32] {
%param0 = f32[4,256,64]{2,1,0} parameter(0), sharding={devices=[1,1,2,2]0,1,2,3 last_tile_dim_replicate}
%param1 = f32[4,256,32]{2,1,0} parameter(1), sharding={devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate}
%dot = f32[64,32]{1,0} dot(f32[4,256,64]{2,1,0} %param0, f32[4,256,32]{2,1,0} %param1), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}, sharding={devices=[2,2]0,1,2,3}
ROOT %copy = f32[64,32]{1,0} copy(f32[64,32]{1,0} %dot), sharding={devices=[2,2]0,1,2,3}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
auto* dot = FindInstruction(module.get(), "dot");
dot->clear_sharding();
EXPECT_FALSE(dot->has_sharding());
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepInputOutputShardings;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
EXPECT_TRUE(changed);
auto* dot_after = FindInstruction(module.get(), "dot");
ASSERT_NE(dot_after, nullptr);
EXPECT_THAT(dot_after, op::Sharding("{devices=[2,2]0,1,2,3}"));
auto sharding = dot_after->sharding();
TF_EXPECT_OK(sharding.Validate(dot_after->shape(), 4));
}
TEST_F(AutoShardingTest, AutoShardingKeepUserShardingAdd) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[128,128]{0,1} parameter(0)
%param1 = f32[128,128]{0,1} parameter(1)
%add = f32[128,128]{0,1} add(%param0, %param1), sharding={devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}
ROOT %copy = f32[128,128]{0,1} copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.allow_mixed_mesh_shape = false;
option.device_mesh_shape = {2, 2};
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
EXPECT_TRUE(changed);
LOG(INFO) << module->ToString();
const HloInstruction* param0_after = FindInstruction(module.get(), "param0");
ASSERT_NE(param0_after, nullptr);
EXPECT_THAT(param0_after,
op::Sharding("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}"));
const HloInstruction* param1_after = FindInstruction(module.get(), "param1");
ASSERT_NE(param1_after, nullptr);
EXPECT_THAT(param1_after,
op::Sharding("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}"));
const HloInstruction* add_after = FindInstruction(module.get(), "add");
ASSERT_NE(add_after, nullptr);
EXPECT_THAT(add_after,
op::Sharding("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}"));
}
TEST_F(AutoShardingTest, AutoShardingKeepUserShardingDot) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry (param0: f32[4,256,64], param1: f32[4,256,32]) -> f32[64,32] {
%param0 = f32[4,256,64]{2,1,0} parameter(0), sharding={devices=[1,1,2,2]0,1,2,3 last_tile_dim_replicate}
%param1 = f32[4,256,32]{2,1,0} parameter(1), sharding={devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate}
%dot = f32[64,32]{1,0} dot(f32[4,256,64]{2,1,0} %param0, f32[4,256,32]{2,1,0} %param1), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}, sharding={devices=[2,2]0,1,2,3}
ROOT %copy = f32[64,32]{1,0} copy(f32[64,32]{1,0} %dot), sharding={devices=[2,2]0,1,2,3}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
HloInstruction* param0 = FindInstruction(module.get(), "param0");
param0->clear_sharding();
EXPECT_FALSE(param0->has_sharding());
HloInstruction* param1 = FindInstruction(module.get(), "param1");
param1->clear_sharding();
EXPECT_FALSE(param1->has_sharding());
HloInstruction* copy = FindInstruction(module.get(), "copy");
copy->clear_sharding();
EXPECT_FALSE(copy->has_sharding());
AutoShardingOption option;
option.enable = true;
option.allow_mixed_mesh_shape = false;
option.device_mesh_shape = {2, 2};
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
EXPECT_TRUE(changed);
const HloInstruction* param0_after = FindInstruction(module.get(), "param0");
ASSERT_NE(param0_after, nullptr);
EXPECT_THAT(
param0_after,
op::Sharding("{devices=[1,1,2,2]0,1,2,3 last_tile_dim_replicate}"));
const HloInstruction* param1_after = FindInstruction(module.get(), "param1");
ASSERT_NE(param1_after, nullptr);
EXPECT_THAT(
param1_after,
op::Sharding("{devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate}"));
const HloInstruction* copy_after = FindInstruction(module.get(), "copy");
ASSERT_NE(copy_after, nullptr);
EXPECT_THAT(copy_after, op::Sharding("{devices=[2,2]0,1,2,3}"));
}
TEST_F(AutoShardingTest, ENABLEDAutoShardingKeepUserShardingTupleReduce) {
constexpr absl::string_view kHloString = R"(
HloModule module
%func (lhs_value: f32[], lhs_index: s32[], rhs_value: f32[], rhs_index: s32[]) -> (f32[], s32[]) {
%lhs_value = f32[] parameter(0)
%rhs_value = f32[] parameter(2)
%compare.a = pred[] compare(f32[] %lhs_value, f32[] %rhs_value), direction=GE
%select.a = f32[] select(pred[] %compare.a, f32[] %lhs_value, f32[] %rhs_value)
%compare.b = pred[] compare(f32[] %lhs_value, f32[] %rhs_value), direction=EQ
%lhs_index = s32[] parameter(1)
%rhs_index = s32[] parameter(3)
%minimum = s32[] minimum(s32[] %lhs_index, s32[] %rhs_index)
%select.b = s32[] select(pred[] %compare.a, s32[] %lhs_index, s32[] %rhs_index)
%select.c = s32[] select(pred[] %compare.b, s32[] %minimum, s32[] %select.b)
ROOT %tuple = (f32[], s32[]) tuple(f32[] %select.a, s32[] %select.c)
}
ENTRY %entry {
%param0 = f32[1,16,40]{2,1,0} parameter(0)
%iota = s32[1,16,40]{2,1,0} iota(), iota_dimension=2
%constant.a = f32[] constant(-inf)
%constant.b = s32[] constant(0)
%reduce = (f32[1,16]{1,0}, s32[1,16]{1,0}) reduce(f32[1,16,40]{2,1,0} %param0, s32[1,16,40]{2,1,0} %iota, f32[] %constant.a, s32[] %constant.b), dimensions={2}, to_apply=%func,
sharding={{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}, {devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
EXPECT_TRUE(changed);
auto* reduce = FindInstruction(module.get(), "reduce");
ASSERT_NE(reduce, nullptr);
EXPECT_THAT(reduce, op::Sharding(
"{{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}, "
"{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}}"));
auto sharding = reduce->sharding();
TF_EXPECT_OK(sharding.Validate(reduce->shape(), 4));
auto* param0 = FindInstruction(module.get(), "param0");
ASSERT_NE(param0, nullptr);
EXPECT_FALSE(param0->sharding().IsReplicated());
}
TEST_F(AutoShardingTest, GetTupleElementUserShardingsParameter) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %tupleparameter {
%param0 = f32[32,64]{1,0} parameter(0)
%param1 = f32[32,64]{1,0} parameter(1), sharding={devices=[2,2]<=[4]}
%tuple1 = (f32[32,64]{1,0}, f32[32,64]{1,0}) tuple(f32[32,64]{1,0} %param0, f32[32,64]{1,0} %param1)
%first = f32[32,64]{1,0} get-tuple-element((f32[32,64]{1,0}, f32[32,64]{1,0}) %tuple1), index=0
%second = f32[32,64]{1,0} get-tuple-element((f32[32,64]{1,0}, f32[32,64]{1,0}) %tuple1), index=1, sharding={devices=[4,1]<=[4]}
ROOT root = f32[32,64]{1,0} add(%first, %second)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* param1 = FindInstruction(module.get(), "param1");
ASSERT_NE(param1, nullptr);
EXPECT_THAT(param1, op::Sharding("{devices=[2,2]<=[4]}"));
const HloInstruction* second = FindInstruction(module.get(), "root");
ASSERT_NE(second, nullptr);
EXPECT_THAT(second, op::Sharding("{devices=[4,1]<=[4]}"));
}
TEST_F(AutoShardingTest, TupleParameter) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %tupleparameter {
%tuple_param = (f32[16,32,64]{2,1,0}, f32[16,32,64]{2,1,0}) parameter(0)
%first = f32[16,32,64]{2,1,0} get-tuple-element((f32[16,32,64]{2,1,0}, f32[16,32,64]{2,1,0}) %tuple_param), index=0
%second = f32[16,32,64]{2,1,0} get-tuple-element((f32[16,32,64]{2,1,0}, f32[16,32,64]{2,1,0}) %tuple_param), index=1
ROOT root = f32[16,32,64]{2,1,0} add(%first, %second)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* tuple_param =
FindInstruction(module.get(), "tuple_param");
const HloInstruction* first = FindInstruction(module.get(), "first");
const HloInstruction* second = FindInstruction(module.get(), "second");
const HloInstruction* root = FindInstruction(module.get(), "root");
ASSERT_NE(tuple_param, nullptr);
ASSERT_NE(first, nullptr);
ASSERT_NE(second, nullptr);
ASSERT_NE(root, nullptr);
ASSERT_TRUE(tuple_param->has_sharding());
ASSERT_TRUE(first->has_sharding());
ASSERT_TRUE(second->has_sharding());
ASSERT_TRUE(root->has_sharding());
EXPECT_EQ(first->sharding(), second->sharding());
EXPECT_EQ(first->sharding(), root->sharding());
ASSERT_TRUE(tuple_param->sharding().IsTuple());
ASSERT_EQ(tuple_param->sharding().tuple_elements().size(), 2);
EXPECT_EQ(tuple_param->sharding().tuple_elements()[0], first->sharding());
EXPECT_EQ(tuple_param->sharding().tuple_elements()[1], second->sharding());
TF_EXPECT_OK(tuple_param->sharding().Validate(tuple_param->shape(), 4));
}
TEST_F(AutoShardingTest, GetTupleElementWithUserShardingTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
%while_cond {
%param0 = (u32[],f32[16,256,256]{2,1,0},f32[16,256,256]{2,1,0}) parameter(0)
%count = u32[] get-tuple-element((u32[],f32[16,256,256]{2,1,0},f32[16,256,256]{2,1,0}) %param0), index=0
%limit = u32[] constant(2)
ROOT %lt = pred[] compare(%count, %limit), direction=LT
}
%while_body {
%param0 = (u32[],f32[16,256,256]{2,1,0},f32[16,256,256]{2,1,0}) parameter(0)
%count = u32[] get-tuple-element((u32[],f32[16,256,256]{2,1,0},f32[16,256,256]{2,1,0}) %param0), index=0
%v1 = f32[16,256,256]{2,1,0} get-tuple-element((u32[],f32[16,256,256]{2,1,0},f32[16,256,256]{2,1,0}) %param0), index=1
%v2 = f32[16,256,256]{2,1,0} get-tuple-element((u32[],f32[16,256,256]{2,1,0},f32[16,256,256]{2,1,0}) %param0), index=2
%dot = f32[16,256,256]{2,1,0} dot(f32[16,256,256]{2,1,0} %v1, f32[16,256,256]{2,1,0} %v2), lhs_contracting_dims={2}, rhs_contracting_dims={2}, lhs_batch_dims={0}, rhs_batch_dims={0}
%dot_tanh = f32[16,256,256]{2,1,0} tanh(f32[16,256,256]{2,1,0} %dot)
%dot_cos = f32[16,256,256]{2,1,0} cosine(f32[16,256,256]{2,1,0} %dot)
ROOT %result = (u32[],f32[16,256,256]{2,1,0},f32[16,256,256]{2,1,0}) tuple(%count, %dot_tanh, %dot_cos)
}
ENTRY %entry (param0: f32[16,256,256], param1: f32[16,256,256]) -> f32[16,256,256] {
%param0 = f32[16,256,256]{2,1,0} parameter(0), sharding={devices=[2,1,2]0,1,2,3}
%param1 = f32[16,256,256]{2,1,0} parameter(1), sharding={devices=[2,1,2]0,1,2,3}
%zero = u32[] constant(0)
%init = (u32[], f32[16,256,256], f32[16,256,256]) tuple(%zero, %param0, %param1)
%while.1 = (u32[],f32[16,256,256]{2,1,0},f32[16,256,256]{2,1,0}) while(%init), body=%while_body, condition=%while_cond
%tuple1 = f32[16,256,256]{2,1,0} get-tuple-element((u32[], f32[16,256,256]{2,1,0}, f32[16,256,256]{2,1,0}) %while.1), index=1, sharding={devices=[2,2,1]0,2,1,3}
ROOT %tanh = f32[16,256,256]{2,1,0} tanh(f32[16,256,256]{2,1,0} %tuple1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings;
option.enable = true;
option.device_mesh_shape = {2, 1, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
EXPECT_TRUE(changed);
}
TEST_F(AutoShardingTest, While) {
constexpr absl::string_view kHloString = R"(
HloModule module
%cond {
%vars.cond = (u32[], bf16[2,2048,768], bf16[128,512,2048], bf16[128,512,768], s32[]) parameter(0)
%count.cond = u32[] get-tuple-element(%vars.cond), index=0
%limit = u32[] constant(2)
ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT
}
%body {
%param = (u32[], bf16[2,2048,768], bf16[128,512,2048], bf16[128,512,768], s32[]) parameter(0)
%i0 = s32[] constant(0)
%count = u32[] get-tuple-element(%param), index=0
%gte0 = bf16[2,2048,768]{2,1,0} get-tuple-element(%param), index=1
%index = s32[] get-tuple-element(%param), index=4
%ds = bf16[1,2048,768]{2,1,0} dynamic-slice(%gte0, s32[] %index, s32[] %i0, s32[] %i0), dynamic_slice_sizes={1,2048,768}
%rhs = bf16[2048,768]{1,0} reshape(%ds)
%lhs = bf16[128,512,2048]{2,1,0} get-tuple-element(%param), index=2
%dot = bf16[128,512,768]{2,1,0} dot(bf16[128,512,2048]{2,1,0} %lhs, bf16[2048,768]{1,0} %rhs), lhs_contracting_dims={2}, rhs_contracting_dims={0}
ROOT %tuple = (u32[], bf16[2,2048,768], bf16[128,512,2048], bf16[128,512,768], s32[]) tuple(%count, %gte0, %lhs, %dot, index)
}
ENTRY %entry {
%p0 = bf16[2048,768] parameter(0)
%p1 = bf16[128,512,2048] parameter(1)
%p2 = bf16[128,512,768] parameter(2)
%reshape0 = bf16[1,2048,768] reshape(%p0)
%concat0 = bf16[2,2048,768] concatenate(%reshape0, %reshape0), dimensions={0}
%zero = u32[] constant(0)
%p3 = s32[] parameter(3)
%init = (u32[], bf16[2,2048,768], bf16[128,512,2048], bf16[128,512,768], s32[]) tuple(%zero, %concat0, %p1, %p2, %p3)
%while = (u32[], bf16[2, 2048, 768], bf16[128,512,2048], bf16[128,512,768], s32[]) while(%init), body=%body, condition=%cond
ROOT %result = bf16[128,512,768] get-tuple-element(%while), index=3
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(0) << module->ToString();
EXPECT_TRUE(changed);
auto* while_op = FindInstruction(module.get(), "while");
ASSERT_NE(while_op, nullptr);
for (size_t i = 0; i < while_op->while_body()
->root_instruction()
->sharding()
.tuple_elements()
.size();
i++) {
const HloSharding& root_sharding = while_op->while_body()
->root_instruction()
->sharding()
.tuple_elements()
.at(i);
EXPECT_EQ(while_op->while_body()
->parameter_instruction(0)
->sharding()
.tuple_elements()
.at(i)
.ToString(),
root_sharding.ToString());
EXPECT_EQ(while_op->while_condition()
->parameter_instruction(0)
->sharding()
.tuple_elements()
.at(i)
.ToString(),
root_sharding.ToString());
}
}
TEST_F(AutoShardingTest, DynamicSlice) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry {
%param0 = s32[] parameter(0)
%arg_tuple = (s32[], f32[4,256,1024]{2,1,0}, f32[2]{0}, f32[2]{0}, f32[2]{0}, f32[2]{0}, f32[2]{0}, f32[2]{0}, f32[2,4,256,1024]{3,2,1,0}, f32[2,4096]{1,0}, f32[2,1024,4096]{2,1,0}, f32[2,1024]{1,0}, f32[2,4096,1024]{2,1,0}, f32[2,1024]{1,0}, f32[2,1024]{1,0}, f32[2,1024]{1,0}, f32[2,1024]{1,0}, f32[2,4,256]{2,1,0}, f32[2,1024,4,256]{3,2,1,0}, f32[2,256]{1,0}, f32[2,1024]{1,0}, f32[2,1024,4,256]{3,2,1,0}, f32[2,4,256]{2,1,0}, f32[2,1024,4,256]{3,2,1,0}, f32[2,4,256]{2,1,0}, f32[2,1024,4,256]{3,2,1,0}, f32[2,4096]{1,0}, f32[2,1024,4096]{2,1,0}, f32[2,1024]{1,0}, f32[2,4096,1024]{2,1,0}, f32[2,1024]{1,0}, f32[2,1024]{1,0}, f32[2,1024]{1,0}, f32[2,1024]{1,0}, f32[2,4,256]{2,1,0}, f32[2,1024,4,256]{3,2,1,0}, f32[2,256]{1,0}, f32[2,1024]{1,0}, f32[2,1024,4,256]{3,2,1,0}, f32[2,4,256]{2,1,0}, f32[2,1024,4,256]{3,2,1,0}, f32[2,4,256]{2,1,0}, f32[2,1024,4,256]{3,2,1,0}, f32[4,1,256,256]{3,2,1,0}, f32[4,256,1]{2,1,0}, f32[4,256,1]{2,1,0}, f32[4,256,1]{2,1,0}, f32[4,256,1]{2,1,0}, f32[4,256,1]{2,1,0}, f32[], f32[], f32[4,256,1]{2,1,0}, f32[], f32[]) parameter(1)
%constant.a = s32[] constant(2)
%constant.b = s32[] constant(0)
%compare = pred[] compare(s32[] %param0, s32[] %constant.b), direction=LT
%add = s32[] add(s32[] %param0, s32[] %constant.a)
%select = s32[] select(pred[] %compare, s32[] %add, s32[] %param0)
%get-tuple-element = f32[2,1024]{1,0} get-tuple-element((s32[], f32[4,256,1024]{2,1,0}, f32[2]{0}, f32[2]{0}, f32[2]{0}, f32[2]{0}, f32[2]{0}, f32[2]{0}, f32[2,4,256,1024]{3,2,1,0}, f32[2,4096]{1,0}, f32[2,1024,4096]{2,1,0}, f32[2,1024]{1,0}, f32[2,4096,1024]{2,1,0}, f32[2,1024]{1,0}, f32[2,1024]{1,0}, f32[2,1024]{1,0}, f32[2,1024]{1,0}, f32[2,4,256]{2,1,0}, f32[2,1024,4,256]{3,2,1,0}, f32[2,256]{1,0}, f32[2,1024]{1,0}, f32[2,1024,4,256]{3,2,1,0}, f32[2,4,256]{2,1,0}, f32[2,1024,4,256]{3,2,1,0}, f32[2,4,256]{2,1,0}, f32[2,1024,4,256]{3,2,1,0}, f32[2,4096]{1,0}, f32[2,1024,4096]{2,1,0}, f32[2,1024]{1,0}, f32[2,4096,1024]{2,1,0}, f32[2,1024]{1,0}, f32[2,1024]{1,0}, f32[2,1024]{1,0}, f32[2,1024]{1,0}, f32[2,4,256]{2,1,0}, f32[2,1024,4,256]{3,2,1,0}, f32[2,256]{1,0}, f32[2,1024]{1,0}, f32[2,1024,4,256]{3,2,1,0}, f32[2,4,256]{2,1,0}, f32[2,1024,4,256]{3,2,1,0}, f32[2,4,256]{2,1,0}, f32[2,1024,4,256]{3,2,1,0}, f32[4,1,256,256]{3,2,1,0}, f32[4,256,1]{2,1,0}, f32[4,256,1]{2,1,0}, f32[4,256,1]{2,1,0}, f32[4,256,1]{2,1,0}, f32[4,256,1]{2,1,0}, f32[], f32[], f32[4,256,1]{2,1,0}, f32[], f32[]) %arg_tuple), index=16
ROOT %dynamic-slice = f32[1,1024]{1,0} dynamic-slice(f32[2,1024]{1,0} %get-tuple-element, s32[] %select, s32[] %constant.b), dynamic_slice_sizes={1,1024}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(0) << module->ToString();
EXPECT_TRUE(changed);
}
TEST_F(AutoShardingTest, Alias) {
constexpr absl::string_view kHloString = R"(
HloModule module, input_output_alias={ {0}: (0, {}, may-alias), {1}: (1, {}, may-alias), {2}: (2, {}, may-alias), {3}: (3, {}, may-alias)}
ENTRY %entry {
param.0 = u32[] parameter(0)
param.1 = f32[32]{0} parameter(1)
param.2 = f32[32]{0} parameter(2)
param.3 = f32[1000]{0} parameter(3)
ROOT tuple = (u32[], f32[32]{0}, f32[32]{0}, f32[1000]{0}) tuple(param.0, param.1, param.2, param.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(0) << module->ToString();
EXPECT_TRUE(changed);
}
TEST_F(AutoShardingTest, AliasTupleParameter) {
constexpr absl::string_view kHloString = R"(
HloModule module, input_output_alias={ {0}: (0, {0}, may-alias), {1}: (0, {1}, may-alias), {2}: (0, {2}, may-alias), {3}: (0, {3}, may-alias)}
ENTRY %entry {
arg_tuple.1 = (u32[], f32[32]{0}, f32[32]{0}, f32[1000]{0}) parameter(0)
get-tuple-element.0 = u32[] get-tuple-element(arg_tuple.1), index=0
get-tuple-element.1 = f32[32]{0} get-tuple-element(arg_tuple.1), index=1
get-tuple-element.2 = f32[32]{0} get-tuple-element(arg_tuple.1), index=2
get-tuple-element.3 = f32[1000]{0} get-tuple-element(arg_tuple.1), index=3
ROOT tuple = (u32[], f32[32]{0}, f32[32]{0}, f32[1000]{0}) tuple(get-tuple-element.0, get-tuple-element.1, get-tuple-element.2, get-tuple-element.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(0) << module->ToString();
EXPECT_TRUE(changed);
}
TEST_F(AutoShardingTest, JaxRandomUniform) {
constexpr absl::string_view kHloString = R"(
HloModule module
clone {
lhs.1 = u32[] parameter(0)
rhs.1 = u32[] parameter(2)
or.2 = u32[] or(lhs.1, rhs.1)
lhs.0 = u32[] parameter(1)
rhs.0 = u32[] parameter(3)
or.3 = u32[] or(lhs.0, rhs.0)
ROOT tuple.23 = (u32[], u32[]) tuple(or.2, or.3)
}
ENTRY %entry {
shift-left = u32[2,2]{1,0} parameter(0)
select = u32[2,2]{1,0} parameter(1)
constant.a = u32[] parameter(2)
reduce = (u32[2]{0}, u32[2]{0}) reduce(shift-left, select, constant.a, constant.a), dimensions={1}, to_apply=clone
rng-bit-generator = u32[8,512]{1,0} rng-bit-generator(reduce), algorithm=rng_default
constant.b = u32[] constant(9)
broadcast.a = u32[8,512]{1,0} broadcast(constant.b), dimensions={}, sharding={replicated}
shift-right-logical = u32[8,512]{1,0} shift-right-logical(rng-bit-generator, broadcast.a)
constant.c = u32[] constant(1065353216)
broadcast.b = u32[8,512]{1,0} broadcast(constant.c), dimensions={}, sharding={replicated}
or = u32[8,512]{1,0} or(shift-right-logical, broadcast.b)
bitcast-convert = f32[8,512]{1,0} bitcast-convert(or)
constant.d = f32[] constant(1)
broadcast.c = f32[8,512]{1,0} broadcast(constant.d), dimensions={}, sharding={replicated}
subtract = f32[8,512]{1,0} subtract(bitcast-convert, broadcast.c)
constant.e = f32[] constant(0)
broadcast.d = f32[8,512]{1,0} broadcast(constant.e), dimensions={}, sharding={replicated}
ROOT maximum = f32[8,512]{1,0} maximum(subtract, broadcast.d)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(0) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_TRUE(module->entry_computation()->root_instruction()->has_sharding());
auto* tuple_operand = FindInstruction(module.get(), "reduce");
ASSERT_NE(tuple_operand, nullptr);
EXPECT_THAT(tuple_operand, op::Sharding("{{replicated}, {replicated}}"));
}
TEST_F(AutoShardingTest, Reshape) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry {
%param.0 = bf16[24,2048,2048]{2,1,0} parameter(0)
%param.1 = s32[] parameter(1)
%param.2 = bf16[512,1024,2048]{2,1,0} parameter(2)
%constant = s32[] constant(0)
%dynamic-slice = bf16[1,2048,2048]{2,1,0} dynamic-slice(bf16[24,2048,2048]{2,1,0} %param.0, s32[] %param.1, s32[] %constant, s32[] %constant), dynamic_slice_sizes={1,2048,2048}
%reshape = bf16[2048,16,128]{2,1,0} reshape(bf16[1,2048,2048]{2,1,0} %dynamic-slice)
%dot = bf16[512,1024,16,128]{3,2,1,0} dot(bf16[512,1024,2048]{2,1,0} %param.2, bf16[2048,16,128]{2,1,0} %reshape), lhs_contracting_dims={2}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {64, 1};
option.device_mesh_ids.resize(64);
std::iota(option.device_mesh_ids.begin(), option.device_mesh_ids.end(), 0);
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
}
TEST_F(AutoShardingTest, ReshapeWithInvalidUserSharding) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry {
%param.0 = bf16[24,16,16]{2,1,0} parameter(0), sharding={devices=[32,1,1]<=[32]}
%reshape = bf16[1,24,16,16]{3,2,1,0} reshape(%param.0)
%copy = bf16[1,24,16,16]{3,2,1,0} copy(%reshape)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {32, 1};
option.device_mesh_ids.resize(32);
std::iota(option.device_mesh_ids.begin(), option.device_mesh_ids.end(), 0);
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
EXPECT_TRUE(changed);
VLOG(1) << module->ToString();
HloInstruction* reshape = FindInstruction(module.get(), "reshape");
EXPECT_THAT(reshape, op::Sharding("{devices=[1,32,1,1]<=[32]}"));
}
TEST_F(AutoShardingTest, Broadcast) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry {
%param.0 = s32[32]{0} parameter(0)
ROOT broadcast = s32[512,1024,1024,32]{3,2,1,0} broadcast(s32[32]{0} %param.0), dimensions={3}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {1, 1, 64};
option.memory_budget_per_device = 1025 * 1024 * 1024;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
}
TEST_F(AutoShardingTest, TestReshardingCostsForUserAnnotatedSharding) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry {
%param0 = f32[256,256] parameter(0)
%param1 = f32[256,256] parameter(1)
%dot = f32[256,256] dot(%param0, %param1), lhs_contracting_dims={1}, rhs_contracting_dims={1}
ROOT %result = f32[256,256] tanh(%dot), sharding={devices=[1,4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_beta = {1, 1};
option.device_mesh_alpha = {1, 1};
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings;
AutoSharding pass(option);
TF_ASSERT_OK_AND_ASSIGN(bool changed, pass.Run(module.get()));
EXPECT_TRUE(changed);
LOG(INFO) << module->ToString();
EXPECT_GT(pass.GetSolverOptimalObjectiveValue(), 0);
}
TEST_F(AutoShardingTest, AllowAliasToFollowerConversion) {
constexpr absl::string_view kHloString = R"(
HloModule module, input_output_alias={ {0}: (0, {}, may-alias), {1}: (1, {}, may-alias), {2}: (2, {}, may-alias), {3}: (3, {}, may-alias)}
ENTRY %entry {
param.0 = u32[] parameter(0)
param.1 = f32[32]{0} parameter(1)
param.2 = f32[32]{0} parameter(2)
param.3 = f32[32000]{0} parameter(3)
ROOT tuple.61 = (u32[], f32[32]{0}, f32[32]{0}, f32[32000]{0}) tuple(param.0, param.1, param.2, param.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
option.allow_alias_to_follower_conversion = true;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(0) << module->ToString();
EXPECT_TRUE(changed);
}
TEST_F(AutoShardingTest, DisallowAliasToFollowerConversion) {
constexpr absl::string_view kHloString = R"(
HloModule module, input_output_alias={ {0}: (0, {}, may-alias), {1}: (1, {}, may-alias), {2}: (2, {}, may-alias), {3}: (3, {}, may-alias)}
ENTRY %entry {
param.0 = u32[] parameter(0)
param.1 = f32[32]{0} parameter(1)
param.2 = f32[32]{0} parameter(2)
param.3 = f32[32000]{0} parameter(3)
ROOT tuple.61 = (u32[], f32[32]{0}, f32[32]{0}, f32[32000]{0}) tuple(param.0, param.1, param.2, param.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
option.allow_alias_to_follower_conversion = false;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(0) << module->ToString();
EXPECT_TRUE(changed);
}
TEST_F(AutoShardingTest, BufferDonorConfigPreservation) {
constexpr absl::string_view kHloString = R"(
HloModule Module, buffer_donor={ (0, {0}), (0, {1}) }
ENTRY entry {
%p = (f32[], f32[]) parameter(0)
%p0 = f32[] get-tuple-element((f32[], f32[]) %p), index=0
%p1 = f32[] get-tuple-element((f32[], f32[]) %p), index=1
ROOT %out = (f32[], f32[]) tuple(%p0, %p1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
const HloBufferDonorConfig buffer_donor_config_before =
module->buffer_donor_config();
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
EXPECT_TRUE(changed);
const HloBufferDonorConfig& buffer_donor_config_after =
module->buffer_donor_config();
EXPECT_EQ(buffer_donor_config_before.ToString(),
buffer_donor_config_after.ToString());
}
TEST_F(AutoShardingTest, InputOutputAliasConfigPreservation) {
constexpr absl::string_view kHloString = R"(
HloModule Module, input_output_alias={ {0}: (0, {0}, must-alias), {1}: (0, {1}) }
ENTRY entry {
%p = (f32[], f32[]) parameter(0)
%p0 = f32[] get-tuple-element((f32[], f32[]) %p), index=0
%p1 = f32[] get-tuple-element((f32[], f32[]) %p), index=1
ROOT %out = (f32[], f32[]) tuple(%p0, %p1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
const HloInputOutputAliasConfig input_output_alias_config_before =
module->input_output_alias_config();
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
EXPECT_TRUE(changed);
const HloInputOutputAliasConfig& input_output_alias_config_after =
module->input_output_alias_config();
EXPECT_EQ(input_output_alias_config_before.ToString(),
input_output_alias_config_after.ToString());
}
TEST_F(AutoShardingTest, SliceAliasTest) {
const char* const kHloString = R"(
HloModule module
%branch0 {
%branch0_param = f32[256,256]{1,0} parameter(0)
ROOT %slice0 = f32[16,16]{1,0} slice(f32[256,256]{1,0} %branch0_param), slice={[16:32], [16:32]}
}
%branch1 {
%branch1_param = f32[256,256]{1,0} parameter(0)
ROOT %slice1 = f32[16,16]{1,0} slice(f32[256,256]{1,0} %branch1_param), slice={[0:16], [0:16]}
}
ENTRY %entry {
%entry_param0 = f32[256,256]{1,0} parameter(0), sharding={devices=[32,1]<=[32]}
%entry_param1 = s32[] parameter(1)
ROOT %conditional = f32[16,16]{1,0} conditional(s32[] %entry_param1, f32[256,256]{1,0} %entry_param0, f32[256,256]{1,0} %entry_param0), branch_computations={%branch0, %branch1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings;
option.enable = true;
option.device_mesh_shape = {32, 1};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
ASSERT_TRUE(changed);
VLOG(5) << module->ToString();
const HloInstruction* branch0_param =
FindInstruction(module.get(), "branch0_param");
const HloInstruction* slice0 = FindInstruction(module.get(), "slice0");
const HloInstruction* branch1_param =
FindInstruction(module.get(), "branch1_param");
const HloInstruction* slice1 = FindInstruction(module.get(), "slice1");
ASSERT_NE(branch0_param, nullptr);
ASSERT_NE(slice0, nullptr);
ASSERT_NE(branch1_param, nullptr);
ASSERT_NE(slice1, nullptr);
ASSERT_TRUE(branch0_param->has_sharding());
ASSERT_TRUE(slice0->has_sharding());
ASSERT_TRUE(branch1_param->has_sharding());
ASSERT_TRUE(slice1->has_sharding());
EXPECT_THAT(branch0_param, op::Sharding("{devices=[32,1]<=[32]}"));
EXPECT_THAT(slice0, op::Sharding("{replicated}"));
EXPECT_THAT(branch1_param, op::Sharding("{devices=[32,1]<=[32]}"));
EXPECT_THAT(slice1, op::Sharding("{replicated}"));
}
TEST_F(AutoShardingTest, CrashIfAskedToRespectShardAsShardLike) {
const char* const kHloString = R"(
HloModule module
ENTRY matmul {
param1 = f32[32,64]{1,0} parameter(0)
param2 = f32[64,128]{1,0} parameter(1)
custom-call1 = f32[32,64]{1,0} custom-call(param1), custom_call_target="Sharding", custom_call_has_side_effect=true, sharding={unknown shard_as 0}
custom-call2 = f32[64,128]{1,0} custom-call(param2), custom_call_target="Sharding", custom_call_has_side_effect=true, sharding={unknown shard_as 0}
ROOT root = f32[32,128]{1,0} dot(custom-call1, custom-call2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings;
option.enable = true;
option.device_mesh_shape = {4, 1};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
EXPECT_DEATH(
absl::StatusOr<bool> status = AutoSharding(option).Run(module.get()),
"The auto-sharding pass could not find shardings that works for this "
"input.");
}
TEST_F(AutoShardingTest, IgnoreShardAsShardLike) {
const char* const kHloString = R"(
HloModule module
ENTRY matmul {
param1 = f32[32,64]{1,0} parameter(0)
param2 = f32[64,128]{1,0} parameter(1)
custom-call1 = f32[32,64]{1,0} custom-call(param1), custom_call_target="Sharding", custom_call_has_side_effect=true, sharding={unknown shard_as 0}
custom-call2 = f32[64,128]{1,0} custom-call(param2), custom_call_target="Sharding", custom_call_has_side_effect=true, sharding={unknown shard_as 0}
ROOT root = f32[32,128]{1,0} dot(custom-call1, custom-call2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kRemoveAllShardings;
option.enable = true;
option.device_mesh_shape = {4, 1};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
EXPECT_TRUE(changed);
}
TEST(NormalizeTest, NormalizeHandlesNegativeCosts) {
EdgeReshardingCostMatrix edge_cost(2, 2);
edge_cost(0, 0).communication_cost = -100;
edge_cost(0, 1).communication_cost = 200;
edge_cost(1, 0).communication_cost = 300;
edge_cost(1, 1).communication_cost = 400;
const EdgeReshardingCostMatrix normalized_edge_cost = Normalize(edge_cost);
EXPECT_EQ(normalized_edge_cost(0, 0).communication_cost, 0);
EXPECT_EQ(normalized_edge_cost(0, 1).communication_cost, 300);
EXPECT_EQ(normalized_edge_cost(1, 0).communication_cost, 400);
EXPECT_EQ(normalized_edge_cost(1, 1).communication_cost, 500);
}
TEST(NormalizeTest, NormalizeHandlesPositiveCosts) {
EdgeReshardingCostMatrix edge_cost(2, 2);
edge_cost(0, 0).communication_cost = 100;
edge_cost(0, 1).communication_cost = 200;
edge_cost(1, 0).communication_cost = 300;
edge_cost(1, 1).communication_cost = 400;
const EdgeReshardingCostMatrix normalized_edge_cost = Normalize(edge_cost);
EXPECT_EQ(normalized_edge_cost(0, 0).communication_cost, 100);
EXPECT_EQ(normalized_edge_cost(0, 1).communication_cost, 200);
EXPECT_EQ(normalized_edge_cost(1, 0).communication_cost, 300);
EXPECT_EQ(normalized_edge_cost(1, 1).communication_cost, 400);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/experimental/auto_sharding/auto_sharding.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/experimental/auto_sharding/auto_sharding_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1e4605fa-a258-4035-815f-4aaa4e0190de | cpp | google/quiche | quic_server | quiche/quic/tools/quic_server.cc | quiche/quic/tools/quic_server_test.cc | #include "quiche/quic/tools/quic_server.h"
#include <cstdint>
#include <memory>
#include <utility>
#include "quiche/quic/core/crypto/crypto_handshake.h"
#include "quiche/quic/core/crypto/quic_random.h"
#include "quiche/quic/core/io/event_loop_socket_factory.h"
#include "quiche/quic/core/io/quic_default_event_loop.h"
#include "quiche/quic/core/io/quic_event_loop.h"
#include "quiche/quic/core/quic_clock.h"
#include "quiche/quic/core/quic_crypto_stream.h"
#include "quiche/quic/core/quic_data_reader.h"
#include "quiche/quic/core/quic_default_clock.h"
#include "quiche/quic/core/quic_default_connection_helper.h"
#include "quiche/quic/core/quic_default_packet_writer.h"
#include "quiche/quic/core/quic_dispatcher.h"
#include "quiche/quic/core/quic_packet_reader.h"
#include "quiche/quic/core/quic_packets.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/tools/quic_simple_crypto_server_stream_helper.h"
#include "quiche/quic/tools/quic_simple_dispatcher.h"
#include "quiche/quic/tools/quic_simple_server_backend.h"
#include "quiche/common/simple_buffer_allocator.h"
namespace quic {
namespace {
const char kSourceAddressTokenSecret[] = "secret";
}
const size_t kNumSessionsToCreatePerSocketEvent = 16;
QuicServer::QuicServer(std::unique_ptr<ProofSource> proof_source,
QuicSimpleServerBackend* quic_simple_server_backend)
: QuicServer(std::move(proof_source), quic_simple_server_backend,
AllSupportedVersions()) {}
QuicServer::QuicServer(std::unique_ptr<ProofSource> proof_source,
QuicSimpleServerBackend* quic_simple_server_backend,
const ParsedQuicVersionVector& supported_versions)
: QuicServer(std::move(proof_source), QuicConfig(),
QuicCryptoServerConfig::ConfigOptions(), supported_versions,
quic_simple_server_backend, kQuicDefaultConnectionIdLength) {}
QuicServer::QuicServer(
std::unique_ptr<ProofSource> proof_source, const QuicConfig& config,
const QuicCryptoServerConfig::ConfigOptions& crypto_config_options,
const ParsedQuicVersionVector& supported_versions,
QuicSimpleServerBackend* quic_simple_server_backend,
uint8_t expected_server_connection_id_length)
: port_(0),
fd_(-1),
packets_dropped_(0),
overflow_supported_(false),
silent_close_(false),
config_(config),
crypto_config_(kSourceAddressTokenSecret, QuicRandom::GetInstance(),
std::move(proof_source), KeyExchangeSource::Default()),
crypto_config_options_(crypto_config_options),
version_manager_(supported_versions),
max_sessions_to_create_per_socket_event_(
kNumSessionsToCreatePerSocketEvent),
packet_reader_(new QuicPacketReader()),
quic_simple_server_backend_(quic_simple_server_backend),
expected_server_connection_id_length_(
expected_server_connection_id_length),
connection_id_generator_(expected_server_connection_id_length) {
QUICHE_DCHECK(quic_simple_server_backend_);
Initialize();
}
void QuicServer::Initialize() {
const uint32_t kInitialSessionFlowControlWindow = 1 * 1024 * 1024;
const uint32_t kInitialStreamFlowControlWindow = 64 * 1024;
if (config_.GetInitialStreamFlowControlWindowToSend() ==
kDefaultFlowControlSendWindow) {
config_.SetInitialStreamFlowControlWindowToSend(
kInitialStreamFlowControlWindow);
}
if (config_.GetInitialSessionFlowControlWindowToSend() ==
kDefaultFlowControlSendWindow) {
config_.SetInitialSessionFlowControlWindowToSend(
kInitialSessionFlowControlWindow);
}
std::unique_ptr<CryptoHandshakeMessage> scfg(crypto_config_.AddDefaultConfig(
QuicRandom::GetInstance(), QuicDefaultClock::Get(),
crypto_config_options_));
}
QuicServer::~QuicServer() {
if (event_loop_ != nullptr) {
if (!event_loop_->UnregisterSocket(fd_)) {
QUIC_LOG(ERROR) << "Failed to unregister socket: " << fd_;
}
}
(void)socket_api::Close(fd_);
fd_ = kInvalidSocketFd;
quic_simple_server_backend_->SetSocketFactory(nullptr);
}
bool QuicServer::CreateUDPSocketAndListen(const QuicSocketAddress& address) {
event_loop_ = CreateEventLoop();
socket_factory_ = std::make_unique<EventLoopSocketFactory>(
event_loop_.get(), quiche::SimpleBufferAllocator::Get());
quic_simple_server_backend_->SetSocketFactory(socket_factory_.get());
QuicUdpSocketApi socket_api;
fd_ = socket_api.Create(address.host().AddressFamilyToInt(),
kDefaultSocketReceiveBuffer,
kDefaultSocketReceiveBuffer);
if (fd_ == kQuicInvalidSocketFd) {
QUIC_LOG(ERROR) << "CreateSocket() failed: " << strerror(errno);
return false;
}
overflow_supported_ = socket_api.EnableDroppedPacketCount(fd_);
socket_api.EnableReceiveTimestamp(fd_);
bool success = socket_api.Bind(fd_, address);
if (!success) {
QUIC_LOG(ERROR) << "Bind failed: " << strerror(errno);
return false;
}
QUIC_LOG(INFO) << "Listening on " << address.ToString();
port_ = address.port();
if (port_ == 0) {
QuicSocketAddress self_address;
if (self_address.FromSocket(fd_) != 0) {
QUIC_LOG(ERROR) << "Unable to get self address. Error: "
<< strerror(errno);
}
port_ = self_address.port();
}
bool register_result = event_loop_->RegisterSocket(
fd_, kSocketEventReadable | kSocketEventWritable, this);
if (!register_result) {
return false;
}
dispatcher_.reset(CreateQuicDispatcher());
dispatcher_->InitializeWithWriter(CreateWriter(fd_));
return true;
}
QuicPacketWriter* QuicServer::CreateWriter(int fd) {
return new QuicDefaultPacketWriter(fd);
}
QuicDispatcher* QuicServer::CreateQuicDispatcher() {
return new QuicSimpleDispatcher(
&config_, &crypto_config_, &version_manager_,
std::make_unique<QuicDefaultConnectionHelper>(),
std::unique_ptr<QuicCryptoServerStreamBase::Helper>(
new QuicSimpleCryptoServerStreamHelper()),
event_loop_->CreateAlarmFactory(), quic_simple_server_backend_,
expected_server_connection_id_length_, connection_id_generator_);
}
std::unique_ptr<QuicEventLoop> QuicServer::CreateEventLoop() {
return GetDefaultEventLoop()->Create(QuicDefaultClock::Get());
}
void QuicServer::HandleEventsForever() {
while (true) {
WaitForEvents();
}
}
void QuicServer::WaitForEvents() {
event_loop_->RunEventLoopOnce(QuicTime::Delta::FromMilliseconds(50));
}
void QuicServer::Shutdown() {
if (!silent_close_) {
dispatcher_->Shutdown();
}
dispatcher_.reset();
event_loop_.reset();
}
void QuicServer::OnSocketEvent(QuicEventLoop* ,
QuicUdpSocketFd fd, QuicSocketEventMask events) {
QUICHE_DCHECK_EQ(fd, fd_);
if (events & kSocketEventReadable) {
QUIC_DVLOG(1) << "EPOLLIN";
dispatcher_->ProcessBufferedChlos(max_sessions_to_create_per_socket_event_);
bool more_to_read = true;
while (more_to_read) {
more_to_read = packet_reader_->ReadAndDispatchPackets(
fd_, port_, *QuicDefaultClock::Get(), dispatcher_.get(),
overflow_supported_ ? &packets_dropped_ : nullptr);
}
if (dispatcher_->HasChlosBuffered()) {
bool success =
event_loop_->ArtificiallyNotifyEvent(fd_, kSocketEventReadable);
QUICHE_DCHECK(success);
}
if (!event_loop_->SupportsEdgeTriggered()) {
bool success = event_loop_->RearmSocket(fd_, kSocketEventReadable);
QUICHE_DCHECK(success);
}
}
if (events & kSocketEventWritable) {
dispatcher_->OnCanWrite();
if (!event_loop_->SupportsEdgeTriggered() &&
dispatcher_->HasPendingWrites()) {
bool success = event_loop_->RearmSocket(fd_, kSocketEventWritable);
QUICHE_DCHECK(success);
}
}
}
} | #include "quiche/quic/tools/quic_server.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/base/macros.h"
#include "quiche/quic/core/crypto/quic_random.h"
#include "quiche/quic/core/deterministic_connection_id_generator.h"
#include "quiche/quic/core/io/quic_default_event_loop.h"
#include "quiche/quic/core/io/quic_event_loop.h"
#include "quiche/quic/core/quic_default_clock.h"
#include "quiche/quic/core/quic_default_connection_helper.h"
#include "quiche/quic/core/quic_default_packet_writer.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/platform/api/quic_test_loopback.h"
#include "quiche/quic/test_tools/crypto_test_utils.h"
#include "quiche/quic/test_tools/mock_quic_dispatcher.h"
#include "quiche/quic/test_tools/quic_server_peer.h"
#include "quiche/quic/tools/quic_memory_cache_backend.h"
#include "quiche/quic/tools/quic_simple_crypto_server_stream_helper.h"
namespace quic {
namespace test {
using ::testing::_;
namespace {
class MockQuicSimpleDispatcher : public QuicSimpleDispatcher {
public:
MockQuicSimpleDispatcher(
const QuicConfig* config, const QuicCryptoServerConfig* crypto_config,
QuicVersionManager* version_manager,
std::unique_ptr<QuicConnectionHelperInterface> helper,
std::unique_ptr<QuicCryptoServerStreamBase::Helper> session_helper,
std::unique_ptr<QuicAlarmFactory> alarm_factory,
QuicSimpleServerBackend* quic_simple_server_backend,
ConnectionIdGeneratorInterface& generator)
: QuicSimpleDispatcher(config, crypto_config, version_manager,
std::move(helper), std::move(session_helper),
std::move(alarm_factory),
quic_simple_server_backend,
kQuicDefaultConnectionIdLength, generator) {}
~MockQuicSimpleDispatcher() override = default;
MOCK_METHOD(void, OnCanWrite, (), (override));
MOCK_METHOD(bool, HasPendingWrites, (), (const, override));
MOCK_METHOD(bool, HasChlosBuffered, (), (const, override));
MOCK_METHOD(void, ProcessBufferedChlos, (size_t), (override));
};
class TestQuicServer : public QuicServer {
public:
explicit TestQuicServer(QuicEventLoopFactory* event_loop_factory,
QuicMemoryCacheBackend* quic_simple_server_backend)
: QuicServer(crypto_test_utils::ProofSourceForTesting(),
quic_simple_server_backend),
quic_simple_server_backend_(quic_simple_server_backend),
event_loop_factory_(event_loop_factory) {}
~TestQuicServer() override = default;
MockQuicSimpleDispatcher* mock_dispatcher() { return mock_dispatcher_; }
protected:
QuicDispatcher* CreateQuicDispatcher() override {
mock_dispatcher_ = new MockQuicSimpleDispatcher(
&config(), &crypto_config(), version_manager(),
std::make_unique<QuicDefaultConnectionHelper>(),
std::unique_ptr<QuicCryptoServerStreamBase::Helper>(
new QuicSimpleCryptoServerStreamHelper()),
event_loop()->CreateAlarmFactory(), quic_simple_server_backend_,
connection_id_generator());
return mock_dispatcher_;
}
std::unique_ptr<QuicEventLoop> CreateEventLoop() override {
return event_loop_factory_->Create(QuicDefaultClock::Get());
}
MockQuicSimpleDispatcher* mock_dispatcher_ = nullptr;
QuicMemoryCacheBackend* quic_simple_server_backend_;
QuicEventLoopFactory* event_loop_factory_;
};
class QuicServerEpollInTest : public QuicTestWithParam<QuicEventLoopFactory*> {
public:
QuicServerEpollInTest()
: server_address_(TestLoopback(), 0),
server_(GetParam(), &quic_simple_server_backend_) {}
void StartListening() {
server_.CreateUDPSocketAndListen(server_address_);
server_address_ = QuicSocketAddress(server_address_.host(), server_.port());
ASSERT_TRUE(QuicServerPeer::SetSmallSocket(&server_));
if (!server_.overflow_supported()) {
QUIC_LOG(WARNING) << "Overflow not supported. Not testing.";
return;
}
}
protected:
QuicSocketAddress server_address_;
QuicMemoryCacheBackend quic_simple_server_backend_;
TestQuicServer server_;
};
std::string GetTestParamName(
::testing::TestParamInfo<QuicEventLoopFactory*> info) {
return EscapeTestParamName(info.param->GetName());
}
INSTANTIATE_TEST_SUITE_P(QuicServerEpollInTests, QuicServerEpollInTest,
::testing::ValuesIn(GetAllSupportedEventLoops()),
GetTestParamName);
TEST_P(QuicServerEpollInTest, ProcessBufferedCHLOsOnEpollin) {
StartListening();
bool more_chlos = true;
MockQuicSimpleDispatcher* dispatcher_ = server_.mock_dispatcher();
QUICHE_DCHECK(dispatcher_ != nullptr);
EXPECT_CALL(*dispatcher_, OnCanWrite()).Times(testing::AnyNumber());
EXPECT_CALL(*dispatcher_, ProcessBufferedChlos(_)).Times(2);
EXPECT_CALL(*dispatcher_, HasPendingWrites()).Times(testing::AnyNumber());
EXPECT_CALL(*dispatcher_, HasChlosBuffered())
.WillOnce(testing::Return(true))
.WillOnce(
DoAll(testing::Assign(&more_chlos, false), testing::Return(false)));
QuicUdpSocketApi socket_api;
SocketFd fd =
socket_api.Create(server_address_.host().AddressFamilyToInt(),
kDefaultSocketReceiveBuffer,
kDefaultSocketReceiveBuffer);
ASSERT_NE(fd, kQuicInvalidSocketFd);
char buf[1024];
memset(buf, 0, ABSL_ARRAYSIZE(buf));
QuicUdpPacketInfo packet_info;
packet_info.SetPeerAddress(server_address_);
WriteResult result =
socket_api.WritePacket(fd, buf, sizeof(buf), packet_info);
if (result.status != WRITE_STATUS_OK) {
QUIC_LOG(ERROR) << "Write error for UDP packet: " << result.error_code;
}
while (more_chlos) {
server_.WaitForEvents();
}
}
class QuicServerDispatchPacketTest : public QuicTest {
public:
QuicServerDispatchPacketTest()
: crypto_config_("blah", QuicRandom::GetInstance(),
crypto_test_utils::ProofSourceForTesting(),
KeyExchangeSource::Default()),
version_manager_(AllSupportedVersions()),
event_loop_(GetDefaultEventLoop()->Create(QuicDefaultClock::Get())),
connection_id_generator_(kQuicDefaultConnectionIdLength),
dispatcher_(&config_, &crypto_config_, &version_manager_,
std::make_unique<QuicDefaultConnectionHelper>(),
std::make_unique<QuicSimpleCryptoServerStreamHelper>(),
event_loop_->CreateAlarmFactory(),
&quic_simple_server_backend_, connection_id_generator_) {
dispatcher_.InitializeWithWriter(new QuicDefaultPacketWriter(1234));
}
void DispatchPacket(const QuicReceivedPacket& packet) {
QuicSocketAddress client_addr, server_addr;
dispatcher_.ProcessPacket(server_addr, client_addr, packet);
}
protected:
QuicConfig config_;
QuicCryptoServerConfig crypto_config_;
QuicVersionManager version_manager_;
std::unique_ptr<QuicEventLoop> event_loop_;
QuicMemoryCacheBackend quic_simple_server_backend_;
DeterministicConnectionIdGenerator connection_id_generator_;
MockQuicDispatcher dispatcher_;
};
TEST_F(QuicServerDispatchPacketTest, DispatchPacket) {
unsigned char valid_packet[] = {
0x3C,
0x10, 0x32, 0x54, 0x76,
0x98, 0xBA, 0xDC, 0xFE,
0xBC, 0x9A, 0x78, 0x56,
0x34, 0x12,
0x00
};
QuicReceivedPacket encrypted_valid_packet(
reinterpret_cast<char*>(valid_packet), ABSL_ARRAYSIZE(valid_packet),
QuicTime::Zero(), false);
EXPECT_CALL(dispatcher_, ProcessPacket(_, _, _)).Times(1);
DispatchPacket(encrypted_valid_packet);
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/tools/quic_server.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/tools/quic_server_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
01947b9c-2719-4c74-b27a-405f3c7c3b5f | cpp | tensorflow/tensorflow | xplane_to_trace_events | third_party/xla/xla/tsl/profiler/convert/xplane_to_trace_events.cc | third_party/xla/xla/tsl/profiler/convert/xplane_to_trace_events_test.cc | #include "xla/tsl/profiler/convert/xplane_to_trace_events.h"
#include <stddef.h>
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "xla/tsl/profiler/utils/trace_utils.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "xla/tsl/profiler/utils/xplane_utils.h"
#include "xla/tsl/profiler/utils/xplane_visitor.h"
#include "tsl/platform/types.h"
#include "tsl/profiler/protobuf/trace_events.pb.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tsl {
namespace profiler {
namespace {
using tensorflow::profiler::XSpace;
void BuildDeviceAndResources(uint32 device_id, const XPlaneVisitor& plane,
Device* device) {
device->set_name(std::string(plane.Name()));
device->set_device_id(device_id);
bool sort_by_ordinal = (device_id == kHostThreadsDeviceId);
int ordinal = 0;
plane.ForEachLine([&](const XLineVisitor& line) {
uint32 resource_id = line.DisplayId();
Resource& resource = (*device->mutable_resources())[resource_id];
resource.set_resource_id(resource_id);
resource.set_name(std::string(line.DisplayName()));
if (sort_by_ordinal) {
resource.set_sort_index(++ordinal);
}
});
}
void ConvertXPlaneToTraceEvents(uint32 device_id, const XPlaneVisitor& xplane,
TraceContainer& container) {
BuildDeviceAndResources(device_id, xplane,
container.MutableDevice(device_id));
xplane.ForEachLine([device_id, &container](const XLineVisitor& xline) {
uint32 resource_id = xline.DisplayId();
if (xline.DisplayName() == tsl::profiler::kXlaAsyncOpLineName) {
return;
}
xline.ForEachEvent(
[device_id, resource_id, &container](const XEventVisitor& xevent) {
int64_t event_type =
xevent.Type().value_or(HostEventType::kUnknownHostEventType);
if (IsInternalEvent(event_type)) return;
TraceEvent* event = container.CreateEvent();
auto& args = *event->mutable_args();
event->set_device_id(device_id);
event->set_resource_id(resource_id);
if (xevent.HasDisplayName()) {
event->set_name(std::string(xevent.DisplayName()));
args["long_name"] = std::string(xevent.Name());
} else {
event->set_name(std::string(xevent.Name()));
}
event->set_timestamp_ps(xevent.TimestampPs());
event->set_duration_ps(xevent.DurationPs());
auto for_each_stat = [&](const XStatVisitor& stat) {
if (stat.ValueCase() == XStat::VALUE_NOT_SET) return;
if (IsInternalStat(stat.Type())) return;
if (stat.Type() == StatType::kStepName) {
event->set_name(stat.ToString());
}
args[std::string(stat.Name())] = stat.ToString();
};
xevent.Metadata().ForEachStat(for_each_stat);
xevent.ForEachStat(for_each_stat);
});
});
}
}
uint64 GetTraceViewerMaxEvents() {
constexpr uint64 kMaxEvents = 1000000;
char* max_events = getenv("TF_PROFILER_TRACE_VIEWER_MAX_EVENTS");
if (max_events != nullptr) {
return std::stoull(max_events, nullptr, 10);
} else {
return kMaxEvents;
}
}
TraceContainer ConvertXSpaceToTraceContainer(const XSpace& xspace) {
TraceContainer container;
const XPlane* host_plane = FindPlaneWithName(xspace, kHostThreadsPlaneName);
if (host_plane != nullptr) {
XPlaneVisitor xplane = CreateTfXPlaneVisitor(host_plane);
ConvertXPlaneToTraceEvents(kHostThreadsDeviceId, xplane, container);
}
std::vector<const XPlane*> device_planes =
FindPlanesWithPrefix(xspace, kGpuPlanePrefix);
if (device_planes.empty()) {
device_planes = FindPlanesWithPrefix(xspace, kTpuPlanePrefix);
}
if (device_planes.empty()) {
device_planes = FindPlanesWithPrefix(xspace, kCustomPlanePrefix);
}
for (const XPlane* device_plane : device_planes) {
XPlaneVisitor xplane = CreateTfXPlaneVisitor(device_plane);
uint32 device_id = kFirstDeviceId + xplane.Id();
ConvertXPlaneToTraceEvents(device_id, xplane, container);
}
uint64 viewer_max_events = GetTraceViewerMaxEvents();
container.CapEvents(viewer_max_events);
return container;
}
void ConvertXSpaceToTraceEventsString(const XSpace& xspace,
std::string* content) {
ConvertXSpaceToTraceContainer(xspace).FlushAndSerializeEvents(content);
}
}
} | #include "xla/tsl/profiler/convert/xplane_to_trace_events.h"
#include <limits>
#include <utility>
#include "xla/tsl/profiler/utils/trace_utils.h"
#include "xla/tsl/profiler/utils/xplane_builder.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "tsl/platform/test.h"
#include "tsl/profiler/protobuf/trace_events.pb.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tsl {
namespace profiler {
namespace {
using tensorflow::profiler::XSpace;
void CreateXSpace(XSpace* space) {
XPlaneBuilder host_plane(space->add_planes());
host_plane.SetName(kHostThreadsPlaneName);
XLineBuilder thread1 = host_plane.GetOrCreateLine(10);
thread1.SetName("thread1");
XEventBuilder event1 =
thread1.AddEvent(*host_plane.GetOrCreateEventMetadata("event1"));
event1.SetTimestampNs(150000);
event1.SetDurationNs(10000);
event1.AddStatValue(*host_plane.GetOrCreateStatMetadata("tf_op"),
*host_plane.GetOrCreateStatMetadata("Relu"));
XLineBuilder thread2 = host_plane.GetOrCreateLine(20);
thread2.SetName("thread2");
XEventBuilder event2 =
thread2.AddEvent(*host_plane.GetOrCreateEventMetadata("event2"));
event2.SetTimestampNs(160000);
event2.SetDurationNs(10000);
event2.AddStatValue(*host_plane.GetOrCreateStatMetadata("tf_op"),
*host_plane.GetOrCreateStatMetadata("Conv2D"));
XPlaneBuilder device_plane(space->add_planes());
device_plane.SetName(GpuPlaneName(0));
device_plane.SetId(0);
XLineBuilder stream1 = device_plane.GetOrCreateLine(30);
stream1.SetName("gpu stream 1");
XEventBuilder event3 =
stream1.AddEvent(*device_plane.GetOrCreateEventMetadata("kernel1"));
event3.SetTimestampNs(180000);
event3.SetDurationNs(10000);
event3.AddStatValue(*device_plane.GetOrCreateStatMetadata("correlation id"),
55);
}
TEST(ConvertXPlaneToTraceEvents, Convert) {
XSpace xspace;
CreateXSpace(&xspace);
TraceContainer container = ConvertXSpaceToTraceContainer(xspace);
ASSERT_EQ(container.trace().devices_size(), 2);
EXPECT_EQ(
container.trace().devices().at(kHostThreadsDeviceId).resources_size(), 2);
EXPECT_EQ(container.trace().devices().at(kFirstDeviceId).resources_size(), 1);
EXPECT_EQ(container.UnsortedEvents().size(), 3);
}
TEST(ConvertXPlaneToTraceEvents, SkipAsyncOps) {
XSpace xspace;
XPlaneBuilder device_plane(xspace.add_planes());
device_plane.SetName(GpuPlaneName(0));
XLineBuilder async_ops = device_plane.GetOrCreateLine(10);
async_ops.SetName(kXlaAsyncOpLineName);
XEventBuilder event1 =
async_ops.AddEvent(*device_plane.GetOrCreateEventMetadata("event1"));
event1.SetTimestampNs(100);
event1.SetDurationNs(1);
TraceContainer container = ConvertXSpaceToTraceContainer(xspace);
ASSERT_THAT(container.UnsortedEvents(), ::testing::IsEmpty());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/convert/xplane_to_trace_events.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/convert/xplane_to_trace_events_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
41389981-1007-4f8c-826c-97fd840bdf44 | cpp | tensorflow/tensorflow | nn_ops | tensorflow/c/experimental/ops/nn_ops.cc | tensorflow/core/ops/nn_ops_test.cc | #include "tensorflow/c/experimental/ops/nn_ops.h"
#include "absl/types/span.h"
#include "tensorflow/c/eager/abstract_context.h"
#include "tensorflow/c/eager/abstract_operation.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/tracing_utils.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/platform/errors.h"
using tensorflow::tracing::MaybeSetOpName;
namespace tensorflow {
namespace ops {
Status SparseSoftmaxCrossEntropyWithLogits(AbstractContext* ctx,
AbstractTensorHandle* const features,
AbstractTensorHandle* const labels,
AbstractTensorHandle** loss,
AbstractTensorHandle** backprop,
const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(
op_ptr->Reset("SparseSoftmaxCrossEntropyWithLogits", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(features));
TF_RETURN_IF_ERROR(op_ptr->AddInput(labels));
int num_retvals = 2;
AbstractTensorHandle* temp_outputs[2];
Status status = op_ptr->Execute(temp_outputs, &num_retvals);
*loss = temp_outputs[0];
*backprop = temp_outputs[1];
return status;
}
Status ReluGrad(AbstractContext* ctx, AbstractTensorHandle* const gradients,
AbstractTensorHandle* const features,
AbstractTensorHandle** backprops, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("ReluGrad", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(gradients));
TF_RETURN_IF_ERROR(op_ptr->AddInput(features));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(backprops, 1), &num_retvals);
}
Status Relu(AbstractContext* ctx, AbstractTensorHandle* const features,
AbstractTensorHandle** activations, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("Relu", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(features));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(activations, 1), &num_retvals);
}
Status BiasAdd(AbstractContext* ctx, AbstractTensorHandle* const value,
AbstractTensorHandle* const bias, AbstractTensorHandle** output,
const char* data_format, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("BiasAdd", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(value));
TF_RETURN_IF_ERROR(op_ptr->AddInput(bias));
TF_RETURN_IF_ERROR(
op_ptr->SetAttrString("data_format", data_format, strlen(data_format)));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(output, 1), &num_retvals);
}
Status BiasAddGrad(AbstractContext* ctx,
AbstractTensorHandle* const out_backprop,
AbstractTensorHandle** output, const char* data_format,
const char* name, const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("BiasAddGrad", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(out_backprop));
TF_RETURN_IF_ERROR(
op_ptr->SetAttrString("data_format", data_format, strlen(data_format)));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(output, 1), &num_retvals);
}
}
} | #include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(NNOpsTest, TopK_ShapeFn) {
ShapeInferenceTestOp op("TopK");
auto set_k = [&op](int k) {
TF_ASSERT_OK(NodeDefBuilder("test", "Pack")
.Input({{"a", 0, DT_FLOAT}})
.Attr("k", k)
.Finalize(&op.node_def));
};
set_k(20);
INFER_OK(op, "?", "?;?");
INFER_OK(op, "[20]", "[20];[20]");
INFER_OK(op, "[21]", "[20];[20]");
INFER_OK(op, "[1,?,21]", "[d0_0,d0_1,20];[d0_0,d0_1,20]");
INFER_OK(op, "[1,?,21,?]", "[d0_0,d0_1,d0_2,20];[d0_0,d0_1,d0_2,20]");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[]");
INFER_ERROR("input must have last dimension >= k = 20 but is 1", op, "[1]");
INFER_ERROR("input must have last dimension >= k = 20 but is 4", op,
"[1,2,3,4]");
set_k(-1);
INFER_ERROR("Need k >= 0, got -1", op, "[1,2,3,4]");
}
TEST(NNOpsTest, TopKV2_ShapeFn) {
ShapeInferenceTestOp op("TopKV2");
op.input_tensors.resize(2);
Tensor k_t;
op.input_tensors[1] = &k_t;
k_t = test::AsScalar<int32>(20);
INFER_OK(op, "?;[]", "?;?");
INFER_OK(op, "[20];[]", "[20];[20]");
INFER_OK(op, "[1,?,21];[]", "[d0_0,d0_1,20];[d0_0,d0_1,20]");
INFER_OK(op, "[1,?,21,?];[]", "[d0_0,d0_1,d0_2,20];[d0_0,d0_1,d0_2,20]");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[];[]");
INFER_ERROR("input must have last dimension >= k = 20 but is 1", op,
"[1];[]");
INFER_ERROR("input must have last dimension >= k = 20 but is 4", op,
"[1,2,3,4];[]");
k_t = test::AsScalar<int32>(-1);
INFER_ERROR(
"Dimension size, given by scalar input 1, must be non-negative but is -1",
op, "[1,2,3,4];[]");
}
TEST(NNOpsTest, NthElement_ShapeFn) {
ShapeInferenceTestOp op("NthElement");
op.input_tensors.resize(2);
Tensor n_t;
op.input_tensors[1] = &n_t;
n_t = test::AsScalar<int32>(20);
INFER_OK(op, "?;[]", "?");
INFER_OK(op, "[21];[]", "[]");
INFER_OK(op, "[2,?,?];[]", "[d0_0,d0_1]");
INFER_OK(op, "[?,3,?,21];[]", "[d0_0,d0_1,d0_2]");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[];[]");
INFER_ERROR("Input must have last dimension > n = 20 but is 1", op, "[1];[]");
INFER_ERROR("Input must have last dimension > n = 20 but is 20", op,
"[1,2,3,20];[]");
n_t = test::AsScalar<int32>(-1);
INFER_ERROR(
"Dimension size, given by scalar input 1, must be non-negative but is -1",
op, "[1,2,3,4];[]");
}
TEST(NNOpsTest, BatchNormWithGlobalNormalization_ShapeFn) {
ShapeInferenceTestOp op("BatchNormWithGlobalNormalization");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;[1,2,3];?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;[1,2,3];?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;[1,2,3];?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;?;[1,2,3]");
INFER_OK(op, "?;?;?;?;?", "[?,?,?,?]");
INFER_OK(op, "?;[1];?;?;?", "[?,?,?,d1_0]");
INFER_OK(op, "?;?;[1];?;?", "[?,?,?,d2_0]");
INFER_OK(op, "?;?;?;[1];?", "[?,?,?,d3_0]");
INFER_OK(op, "?;?;?;?;[1]", "[?,?,?,d4_0]");
INFER_OK(op, "[1,2,3,4];[4];[4];[4];[4]",
"[d0_0,d0_1,d0_2,d0_3|d1_0|d2_0|d3_0|d4_0]");
}
TEST(NNOpsTest, QuantizedBatchNormWithGlobalNormalization_ShapeFn) {
ShapeInferenceTestOp op("QuantizedBatchNormWithGlobalNormalization");
INFER_ERROR("Shape must be rank 4 but is rank 3", op,
"[1,2,3];?;?;?;?;?;?;?;?;?;?;?;?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op,
"?;?;?;[1,2,3];?;?;?;?;?;?;?;?;?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op,
"?;?;?;?;?;?;[1,2,3];?;?;?;?;?;?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op,
"?;?;?;?;?;?;?;?;?;[1,2,3];?;?;?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op,
"?;?;?;?;?;?;?;?;?;?;?;?;[1,2,3];?;?");
INFER_OK(op, "?;[];[];?;[];[];?;[];[];?;[];[];?;[];[]", "[?,?,?,?];[];[]");
INFER_OK(op, "?;[];[];[1];[];[];?;[];[];?;[];[];?;[];[]",
"[?,?,?,d3_0];[];[]");
INFER_OK(op, "?;[];[];?;[];[];[1];[];[];?;[];[];?;[];[]",
"[?,?,?,d6_0];[];[]");
INFER_OK(op, "?;[];[];?;[];[];?;[];[];[1];[];[];?;[];[]",
"[?,?,?,d9_0];[];[]");
INFER_OK(op, "?;[];[];?;[];[];?;[];[];?;[];[];[1];[];[]",
"[?,?,?,d12_0];[];[]");
INFER_OK(op, "[1,2,3,4];[];[];[4];[];[];[4];[];[];[4];[];[];[4];[];[]",
"[d0_0,d0_1,d0_2,d0_3|d3_0|d6_0|d9_0|d12_0];[];[]");
}
TEST(NNOpsTest, BatchNormWithGlobalNormalizationGrad_ShapeFn) {
ShapeInferenceTestOp op("BatchNormWithGlobalNormalizationGrad");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;[1,2,3];?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;[1,2,3];?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;[1,2,3];?");
INFER_ERROR("Shapes must be equal rank, but are 4 and 3", op,
"?;?;?;?;[1,2,3]");
INFER_OK(op, "?;?;?;?;?", "[?,?,?,?];[?];[?];[?];[?]");
INFER_OK(op, "?;[1];?;?;?", "[?,?,?,d1_0];[d1_0];[d1_0];[d1_0];[d1_0]");
INFER_OK(op, "?;?;[1];?;?", "[?,?,?,d2_0];[d2_0];[d2_0];[d2_0];[d2_0]");
INFER_OK(op, "?;?;?;[1];?", "[?,?,?,d3_0];[d3_0];[d3_0];[d3_0];[d3_0]");
INFER_OK(op, "[1,?,3,?];[?];[?];[?];[?,2,?,4]",
"[d0_0,d4_1,d0_2,d4_3];[d4_3];[d4_3];[d4_3];[d4_3]");
}
TEST(NNOpsTest, FusedBatchNorm_ShapeFn) {
ShapeInferenceTestOp op("FusedBatchNorm");
auto set_op = [&op](bool is_training, float exponential_avg_factor,
string data_format) {
TF_ASSERT_OK(NodeDefBuilder("test", "FusedBatchNorm")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("data_format", data_format)
.Attr("is_training", is_training)
.Attr("exponential_avg_factor", exponential_avg_factor)
.Finalize(&op.node_def));
};
set_op(true, 1.0, "NHWC");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;[1,2,3];?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;[1,2,3];?;?");
INFER_OK(op, "?;?;?;?;?", "[?,?,?,?];[?];[?];[?];[?]");
INFER_OK(op, "?;[1];?;?;?", "[?,?,?,d1_0];[d1_0];[d1_0];[d1_0];[d1_0]");
INFER_OK(op, "?;?;[1];?;?", "[?,?,?,d2_0];[d2_0];[d2_0];[d2_0];[d2_0]");
INFER_OK(op, "[1,2,3,4];[4];[4];?;?",
"[d0_0,d0_1,d0_2,d0_3|d1_0|d2_0];"
"[d0_3|d1_0|d2_0];[d0_3|d1_0|d2_0];"
"[d0_3|d1_0|d2_0];[d0_3|d1_0|d2_0]");
set_op(true, 0.5, "NHWC");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;[1,2,3];?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;[1,2,3];?;?");
INFER_OK(op, "?;?;?;?;?", "[?,?,?,?];[?];[?];[?];[?]");
INFER_OK(op, "?;[1];?;?;?", "[?,?,?,d1_0];[d1_0];[d1_0];[d1_0];[d1_0]");
INFER_OK(op, "?;?;[1];?;?", "[?,?,?,d2_0];[d2_0];[d2_0];[d2_0];[d2_0]");
INFER_OK(op, "[1,2,3,4];[4];[4];?;?",
"[d0_0,d0_1,d0_2,d0_3|d1_0|d2_0];"
"[d0_3|d1_0|d2_0];[d0_3|d1_0|d2_0];"
"[d0_3|d1_0|d2_0];[d0_3|d1_0|d2_0]");
set_op(true, 1.0, "NCHW");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;[1,2,3];?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;[1,2,3];?;?");
INFER_OK(op, "?;?;?;?;?", "[?,?,?,?];[?];[?];[?];[?]");
INFER_OK(op, "?;[1];?;?;?", "[?,d1_0,?,?];[d1_0];[d1_0];[d1_0];[d1_0]");
INFER_OK(op, "?;?;[1];?;?", "[?,d2_0,?,?];[d2_0];[d2_0];[d2_0];[d2_0]");
INFER_OK(op, "[1,4,2,3];[4];[4];?;?",
"[d0_0,d0_1|d1_0|d2_0,d0_2,d0_3];"
"[d0_1|d1_0|d2_0];[d0_1|d1_0|d2_0];"
"[d0_1|d1_0|d2_0];[d0_1|d1_0|d2_0]");
set_op(false, 1.0, "NHWC");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;[1,2,3];?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;[1,2,3];?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;[1,2,3];?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;?;[1,2,3]");
INFER_OK(op, "?;?;?;?;?", "[?,?,?,?];[?];[?];[?];[?]");
INFER_OK(op, "?;[1];?;?;?", "[?,?,?,d1_0];[d1_0];[d1_0];[d1_0];[d1_0]");
INFER_OK(op, "?;?;[1];?;?", "[?,?,?,d2_0];[d2_0];[d2_0];[d2_0];[d2_0]");
INFER_OK(op, "?;?;?;[1];?", "[?,?,?,d3_0];[d3_0];[d3_0];[d3_0];[d3_0]");
INFER_OK(op, "?;?;?;?;[1]", "[?,?,?,d4_0];[d4_0];[d4_0];[d4_0];[d4_0]");
INFER_OK(op, "[1,2,3,4];[4];[4];[4];[4]",
"[d0_0,d0_1,d0_2,d0_3|d1_0|d2_0|d3_0|d4_0];"
"[d0_3|d1_0|d2_0|d3_0|d4_0];[d0_3|d1_0|d2_0|d3_0|d4_0];"
"[d0_3|d1_0|d2_0|d3_0|d4_0];[d0_3|d1_0|d2_0|d3_0|d4_0]");
set_op(false, 1.0, "NCHW");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;[1,2,3];?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;[1,2,3];?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;[1,2,3];?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;?;[1,2,3]");
INFER_OK(op, "?;?;?;?;?", "[?,?,?,?];[?];[?];[?];[?]");
INFER_OK(op, "?;[1];?;?;?", "[?,d1_0,?,?];[d1_0];[d1_0];[d1_0];[d1_0]");
INFER_OK(op, "?;?;[1];?;?", "[?,d2_0,?,?];[d2_0];[d2_0];[d2_0];[d2_0]");
INFER_OK(op, "?;?;?;[1];?", "[?,d3_0,?,?];[d3_0];[d3_0];[d3_0];[d3_0]");
INFER_OK(op, "?;?;?;?;[1]", "[?,d4_0,?,?];[d4_0];[d4_0];[d4_0];[d4_0]");
INFER_OK(op, "[1,4,2,3];[4];[4];[4];[4]",
"[d0_0,d0_1|d1_0|d2_0|d3_0|d4_0,d0_2,d0_3];"
"[d0_1|d1_0|d2_0|d3_0|d4_0];[d0_1|d1_0|d2_0|d3_0|d4_0];"
"[d0_1|d1_0|d2_0|d3_0|d4_0];[d0_1|d1_0|d2_0|d3_0|d4_0]");
}
TEST(NNOpsTest, FusedBatchNormGrad_ShapeFn) {
ShapeInferenceTestOp op("FusedBatchNormGrad");
auto set_op = [&op](string data_format) {
TF_ASSERT_OK(NodeDefBuilder("test", "FusedBatchNormGrad")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("data_format", data_format)
.Finalize(&op.node_def));
};
set_op("NCHW");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?;?;?");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "?;[1,2,3];?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;[1,2,3];?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;[1,2,3];?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;?;[1,2,3]");
INFER_OK(op, "?;?;?;?;?", "[?,?,?,?];[?];[?];[0];[0]");
INFER_OK(op, "?;?;[1];?;?", "[?,d2_0,?,?];[d2_0];[d2_0];[0];[0]");
INFER_OK(op, "?;?;?;[1];?", "[?,d3_0,?,?];[d3_0];[d3_0];[0];[0]");
INFER_OK(op, "?;?;?;?;[1]", "[?,d4_0,?,?];[d4_0];[d4_0];[0];[0]");
INFER_OK(op, "[1,4,2,3];[1,4,2,3];[4];[4];[4]",
"[d0_0,d0_1|d2_0|d3_0|d4_0,d0_2,d0_3];"
"[d0_1|d2_0|d3_0|d4_0];[d0_1|d2_0|d3_0|d4_0];[0];[0]");
set_op("NHWC");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?;?;?");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "?;[1,2,3];?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;[1,2,3];?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;[1,2,3];?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;?;[1,2,3]");
INFER_OK(op, "?;?;?;?;?", "[?,?,?,?];[?];[?];[0];[0]");
INFER_OK(op, "?;?;[1];?;?", "[?,?,?,d2_0];[d2_0];[d2_0];[0];[0]");
INFER_OK(op, "?;?;?;[1];?", "[?,?,?,d3_0];[d3_0];[d3_0];[0];[0]");
INFER_OK(op, "?;?;?;?;[1]", "[?,?,?,d4_0];[d4_0];[d4_0];[0];[0]");
INFER_OK(op, "[1,2,3,4];[1,2,3,4];[4];[4];[4]",
"[d0_0,d0_1,d0_2,d0_3|d2_0|d3_0|d4_0];"
"[d0_3|d2_0|d3_0|d4_0];[d0_3|d2_0|d3_0|d4_0];[0];[0]");
}
TEST(NNOpsTest, Conv2DBackpropInput_ShapeFn) {
ShapeInferenceTestOp op("Conv2DBackpropInput");
INFER_ERROR("input_sizes to contain 4 values or 2 values", op,
"[3];[?,?,?,?];[?,?,?,?]");
INFER_ERROR("Shape must be rank 4 but is rank 3", op,
"[4];[?,?,?,?];[?,?,?]");
INFER_OK(op, "[4];[?,?,2,?];[1,?,?,?]", "[d2_0,?,?,?]");
INFER_OK(op, "[2];[?,?,2,?];[1,?,?,?]", "[d2_0,?,?,d1_2]");
}
TEST(NNOpsTest, Conv3DBackpropInput_ShapeFn) {
ShapeInferenceTestOp op("Conv3DBackpropInput");
INFER_ERROR("Shape must be rank 5 but is rank 3", op, "[1,2,3];?;?");
INFER_OK(op, "?;?;?", "[?,?,?,?,?]");
INFER_OK(op, "[?,?,?,?,?];?;?", "in0");
INFER_OK(op, "[?,2,?,4,?];?;?", "in0");
}
TEST(NNOpsTest, Conv3DBackpropFilter_ShapeFn) {
ShapeInferenceTestOp op("Conv3DBackpropFilter");
INFER_ERROR("Shape must be rank 5 but is rank 3", op, "?;[1,2,3];?");
INFER_OK(op, "?;?;?", "[?,?,?,?,?]");
INFER_OK(op, "?;[?,?,?,?,?];?", "in1");
INFER_OK(op, "?;[?,2,?,4,?];?", "in1");
}
TEST(NNOpsTest, MaxPool3DGrad_ShapeFn) {
ShapeInferenceTestOp op("MaxPool3DGrad");
INFER_ERROR("Shape must be rank 5 but is rank 3", op, "[1,2,3];?;?");
INFER_OK(op, "?;?;?", "[?,?,?,?,?]");
INFER_OK(op, "[?,?,?,?,?];?;?", "in0");
INFER_OK(op, "[?,2,?,4,?];?;?", "in0");
}
TEST(NNOpsTest, LRNGrad_ShapeFn) {
ShapeInferenceTestOp op("LRNGrad");
INFER_OK(op, "[1,?,?,4];[?,2,?,?];[?,?,3,?]", "[d0_0,d1_1,d2_2,d0_3]");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?");
INFER_ERROR("Shapes must be equal rank, but are 4 and 3", op, "?;[1,2,3];?");
INFER_ERROR("Shapes must be equal rank, but are 4 and 3", op, "?;?;[1,2,3]");
}
TEST(NNOpsTest, MaxPoolGrad_ShapeFn) {
for (const char* op_name : {"MaxPoolGrad", "MaxPoolGradWithArgmax"}) {
ShapeInferenceTestOp op(op_name);
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?");
INFER_OK(op, "?;?;?", "[?,?,?,?]");
INFER_OK(op, "[?,?,?,?];?;?", "in0");
INFER_OK(op, "[?,2,?,4];?;?", "in0");
}
}
TEST(NNOpsTest, Dilation2DBackpropInput_ShapeFn) {
ShapeInferenceTestOp op("Dilation2DBackpropInput");
INFER_OK(op, "?;?;?", "in0");
INFER_OK(op, "?;[?,?,?,?,?];?", "in0");
INFER_OK(op, "?;[?,2,?,4,?];?", "in0");
}
TEST(NNOpsTest, Dilation2DBackpropFilter_ShapeFn) {
ShapeInferenceTestOp op("Dilation2DBackpropFilter");
INFER_OK(op, "?;?;?", "in1");
INFER_OK(op, "?;[?,?,?,?,?];?", "in1");
INFER_OK(op, "?;[?,2,?,4,?];?", "in1");
}
TEST(NNOpsTest, MergeBothInputs_ShapeFn) {
for (const char* op_name : {"ReluGrad", "Relu6Grad", "EluGrad", "SeluGrad",
"SoftplusGrad", "SoftsignGrad"}) {
ShapeInferenceTestOp op(op_name);
INFER_OK(op, "?;?", "in0|in1");
INFER_OK(op, "?;[1,?,3]", "in1");
INFER_OK(op, "[1,?,3];?", "in0");
INFER_OK(op, "[1,?];[?,2]", "[d0_0,d1_1]");
INFER_ERROR("Dimension 1 in both shapes must be equal, but are 3 and 2", op,
"[1,3];[?,2]");
}
}
TEST(NNOpsTest, SoftmaxCrossEntropyWithLogits_ShapeFn) {
ShapeInferenceTestOp op("SoftmaxCrossEntropyWithLogits");
INFER_OK(op, "?;?", "[?];[?,?]");
INFER_OK(op, "[?,?];[?,?]", "[d0_0|d1_0];in0|in1");
INFER_OK(op, "[1,2];[?,2]", "[d0_0];in0");
INFER_OK(op, "[1,?];[?,2]", "[d0_0];[d0_0,d0_1|d1_1]");
INFER_OK(op, "[?,2];[1,2]", "[d1_0];in1");
INFER_ERROR("Shape must be broadcasted with rank 2", op, "[1,2,3];?");
INFER_ERROR("Shape must be broadcasted with rank 2", op, "?;[1,2,3]");
INFER_OK(op, "[1,4];[2,4]", "[d1_0];[d1_0,d0_1|d1_1]");
INFER_OK(op, "[2,4];[2,1]", "[d0_0];[d0_0|d1_0,d0_1]");
INFER_OK(op, "[1,?];[2,4]", "[d1_0];[d1_0,d0_1|d1_1]");
INFER_OK(op, "[2,4];[?,1]", "[d0_0];[d0_0|d1_0,d0_1]");
}
TEST(NNOpsTest, SparseSoftmaxCrossEntropyWithLogits_ShapeFn) {
ShapeInferenceTestOp op("SparseSoftmaxCrossEntropyWithLogits");
INFER_OK(op, "?;?", "[?];[?,?]");
INFER_OK(op, "[?,?];[?]", "[d0_0|d1_0];[d0_0|d1_0,d0_1]");
INFER_OK(op, "[1,2];[1]", "[d0_0|d1_0];[d0_0|d1_0,d0_1]");
INFER_OK(op, "[?,2];[1]", "[d1_0];[d1_0,d0_1]");
INFER_ERROR("Dimensions must be equal, but are 1 and 2", op, "[1,?];[2]");
INFER_ERROR("Shape must be rank 2 but is rank 3", op, "[1,2,3];?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[1,2]");
}
TEST(NNOpsTest, InTopK_ShapeFn) {
ShapeInferenceTestOp op("InTopK");
INFER_OK(op, "?;?", "[?]");
INFER_OK(op, "[?,?];[?]", "[d0_0|d1_0]");
INFER_OK(op, "[1,2];[1]", "[d0_0|d1_0]");
INFER_OK(op, "[?,2];[1]", "[d1_0]");
INFER_ERROR("Dimensions must be equal, but are 1 and 2", op, "[1,?];[2]");
INFER_ERROR("Shape must be rank 2 but is rank 3", op, "[1,2,3];?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[1,2]");
}
TEST(NNOpsTest, Dilation2DShapeTest) {
ShapeInferenceTestOp op("Dilation2D");
auto set_op = [&op](const std::vector<int32>& strides,
const std::vector<int32>& rates, const string& padding) {
TF_ASSERT_OK(NodeDefBuilder("test", "Dilation2D")
.Input("input", 0, DT_FLOAT)
.Input("filter", 0, DT_FLOAT)
.Attr("strides", strides)
.Attr("rates", rates)
.Attr("padding", padding)
.Finalize(&op.node_def));
};
set_op({1, 1, 1, 1}, {1, 1, 1, 1}, "VALID");
INFER_OK(op, "[1,2,2,2];[1,1,2]", "[d0_0,2,2,d1_2]");
set_op({1, 1, 1, 1}, {1, 2, 2, 1}, "VALID");
INFER_OK(op, "[1,7,7,2];[2,2,2]", "[d0_0,5,5,d1_2]");
}
TEST(NNOpsTest, FractionalPool_ShapeFn) {
for (const char* op_name : {"FractionalAvgPool", "FractionalMaxPool"}) {
ShapeInferenceTestOp op(op_name);
auto set_op = [&op, op_name](const std::vector<float>& pooling_ratio) {
TF_ASSERT_OK(NodeDefBuilder("test", op_name)
.Input("input", 0, DT_FLOAT)
.Attr("pooling_ratio", pooling_ratio)
.Finalize(&op.node_def));
};
set_op(std::vector<float>{2.0f, 1, 1.5f, 4.0f});
INFER_ERROR("must be rank 4", op, "[?,?,?]");
INFER_OK(op, "?", "[?,?,?,?];[?];[?]");
INFER_OK(op, "[?,?,?,?]", "[?,?,?,?];[?];[?]");
INFER_OK(op, "[10,20,30,40]", "[5,20,20,10];[20];[20]");
INFER_OK(op, "[?,20,30,40]", "[?,20,20,10];[20];[20]");
INFER_OK(op, "[10,?,30,40]", "[5,?,20,10];[?];[20]");
INFER_OK(op, "[10,20,?,40]", "[5,20,?,10];[20];[?]");
INFER_OK(op, "[10,20,30,?]", "[5,20,20,?];[20];[20]");
set_op(std::vector<float>{.5, 1.0, 1.5});
INFER_ERROR("pooling_ratio field", op, "?");
set_op(std::vector<float>{1, 2, 3, 4, 5});
INFER_ERROR("pooling_ratio field", op, "?");
set_op(std::vector<float>{-1, 2, 3, 4});
INFER_ERROR("is negative", op, "[1,2,3,4]");
}
}
TEST(NNOpsTest, FractionalMaxPoolGrad) {
ShapeInferenceTestOp op("FractionalMaxPoolGrad");
INFER_ERROR("must be rank 4", op, "[?,?,?];?;?;?;?");
INFER_OK(op, "?;?;?;?;?", "[?,?,?,?]");
INFER_OK(op, "[?,?,3,4];?;?;?;?", "in0");
}
TEST(NNOpsTest, FractionalAvgPoolGrad) {
ShapeInferenceTestOp op("FractionalAvgPoolGrad");
op.input_tensors.resize(1);
INFER_OK(op, "?;?;?;?", "[?,?,?,?]");
std::vector<int32> shape{1, 2, 3, 4};
Tensor shape_t = test::AsTensor<int32>(shape);
op.input_tensors[0] = &shape_t;
INFER_OK(op, "[5];?;?;?", "[1,2,3,4]");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/ops/nn_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/nn_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
dc717713-4129-44c3-8efc-bf49df16771f | cpp | abseil/abseil-cpp | civil_time | absl/time/civil_time.cc | absl/time/internal/cctz/src/civil_time_test.cc | #include "absl/time/civil_time.h"
#include <cstdlib>
#include <ostream>
#include <string>
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace {
inline civil_year_t NormalizeYear(civil_year_t year) {
return 2400 + year % 400;
}
std::string FormatYearAnd(string_view fmt, CivilSecond cs) {
const CivilSecond ncs(NormalizeYear(cs.year()), cs.month(), cs.day(),
cs.hour(), cs.minute(), cs.second());
const TimeZone utc = UTCTimeZone();
return StrCat(cs.year(), FormatTime(fmt, FromCivil(ncs, utc), utc));
}
template <typename CivilT>
bool ParseYearAnd(string_view fmt, string_view s, CivilT* c) {
const std::string ss = std::string(s);
const char* const np = ss.c_str();
char* endp;
errno = 0;
const civil_year_t y =
std::strtoll(np, &endp, 10);
if (endp == np || errno == ERANGE) return false;
const std::string norm = StrCat(NormalizeYear(y), endp);
const TimeZone utc = UTCTimeZone();
Time t;
if (ParseTime(StrCat("%Y", fmt), norm, utc, &t, nullptr)) {
const auto cs = ToCivilSecond(t, utc);
*c = CivilT(y, cs.month(), cs.day(), cs.hour(), cs.minute(), cs.second());
return true;
}
return false;
}
template <typename CivilT1, typename CivilT2>
bool ParseAs(string_view s, CivilT2* c) {
CivilT1 t1;
if (ParseCivilTime(s, &t1)) {
*c = CivilT2(t1);
return true;
}
return false;
}
template <typename CivilT>
bool ParseLenient(string_view s, CivilT* c) {
if (ParseCivilTime(s, c)) return true;
if (ParseAs<CivilDay>(s, c)) return true;
if (ParseAs<CivilSecond>(s, c)) return true;
if (ParseAs<CivilHour>(s, c)) return true;
if (ParseAs<CivilMonth>(s, c)) return true;
if (ParseAs<CivilMinute>(s, c)) return true;
if (ParseAs<CivilYear>(s, c)) return true;
return false;
}
}
std::string FormatCivilTime(CivilSecond c) {
return FormatYearAnd("-%m-%d%ET%H:%M:%S", c);
}
std::string FormatCivilTime(CivilMinute c) {
return FormatYearAnd("-%m-%d%ET%H:%M", c);
}
std::string FormatCivilTime(CivilHour c) {
return FormatYearAnd("-%m-%d%ET%H", c);
}
std::string FormatCivilTime(CivilDay c) { return FormatYearAnd("-%m-%d", c); }
std::string FormatCivilTime(CivilMonth c) { return FormatYearAnd("-%m", c); }
std::string FormatCivilTime(CivilYear c) { return FormatYearAnd("", c); }
bool ParseCivilTime(string_view s, CivilSecond* c) {
return ParseYearAnd("-%m-%d%ET%H:%M:%S", s, c);
}
bool ParseCivilTime(string_view s, CivilMinute* c) {
return ParseYearAnd("-%m-%d%ET%H:%M", s, c);
}
bool ParseCivilTime(string_view s, CivilHour* c) {
return ParseYearAnd("-%m-%d%ET%H", s, c);
}
bool ParseCivilTime(string_view s, CivilDay* c) {
return ParseYearAnd("-%m-%d", s, c);
}
bool ParseCivilTime(string_view s, CivilMonth* c) {
return ParseYearAnd("-%m", s, c);
}
bool ParseCivilTime(string_view s, CivilYear* c) {
return ParseYearAnd("", s, c);
}
bool ParseLenientCivilTime(string_view s, CivilSecond* c) {
return ParseLenient(s, c);
}
bool ParseLenientCivilTime(string_view s, CivilMinute* c) {
return ParseLenient(s, c);
}
bool ParseLenientCivilTime(string_view s, CivilHour* c) {
return ParseLenient(s, c);
}
bool ParseLenientCivilTime(string_view s, CivilDay* c) {
return ParseLenient(s, c);
}
bool ParseLenientCivilTime(string_view s, CivilMonth* c) {
return ParseLenient(s, c);
}
bool ParseLenientCivilTime(string_view s, CivilYear* c) {
return ParseLenient(s, c);
}
namespace time_internal {
std::ostream& operator<<(std::ostream& os, CivilYear y) {
return os << FormatCivilTime(y);
}
std::ostream& operator<<(std::ostream& os, CivilMonth m) {
return os << FormatCivilTime(m);
}
std::ostream& operator<<(std::ostream& os, CivilDay d) {
return os << FormatCivilTime(d);
}
std::ostream& operator<<(std::ostream& os, CivilHour h) {
return os << FormatCivilTime(h);
}
std::ostream& operator<<(std::ostream& os, CivilMinute m) {
return os << FormatCivilTime(m);
}
std::ostream& operator<<(std::ostream& os, CivilSecond s) {
return os << FormatCivilTime(s);
}
bool AbslParseFlag(string_view s, CivilSecond* c, std::string*) {
return ParseLenientCivilTime(s, c);
}
bool AbslParseFlag(string_view s, CivilMinute* c, std::string*) {
return ParseLenientCivilTime(s, c);
}
bool AbslParseFlag(string_view s, CivilHour* c, std::string*) {
return ParseLenientCivilTime(s, c);
}
bool AbslParseFlag(string_view s, CivilDay* c, std::string*) {
return ParseLenientCivilTime(s, c);
}
bool AbslParseFlag(string_view s, CivilMonth* c, std::string*) {
return ParseLenientCivilTime(s, c);
}
bool AbslParseFlag(string_view s, CivilYear* c, std::string*) {
return ParseLenientCivilTime(s, c);
}
std::string AbslUnparseFlag(CivilSecond c) { return FormatCivilTime(c); }
std::string AbslUnparseFlag(CivilMinute c) { return FormatCivilTime(c); }
std::string AbslUnparseFlag(CivilHour c) { return FormatCivilTime(c); }
std::string AbslUnparseFlag(CivilDay c) { return FormatCivilTime(c); }
std::string AbslUnparseFlag(CivilMonth c) { return FormatCivilTime(c); }
std::string AbslUnparseFlag(CivilYear c) { return FormatCivilTime(c); }
}
ABSL_NAMESPACE_END
} | #include "absl/time/internal/cctz/include/cctz/civil_time.h"
#include <iomanip>
#include <limits>
#include <sstream>
#include <string>
#include <type_traits>
#include "gtest/gtest.h"
#include "absl/base/config.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace time_internal {
namespace cctz {
namespace {
template <typename T>
std::string Format(const T& t) {
std::stringstream ss;
ss << t;
return ss.str();
}
}
#if __cpp_constexpr >= 201304 || (defined(_MSC_VER) && _MSC_VER >= 1910)
TEST(CivilTime, Normal) {
constexpr civil_second css(2016, 1, 28, 17, 14, 12);
static_assert(css.second() == 12, "Normal.second");
constexpr civil_minute cmm(2016, 1, 28, 17, 14);
static_assert(cmm.minute() == 14, "Normal.minute");
constexpr civil_hour chh(2016, 1, 28, 17);
static_assert(chh.hour() == 17, "Normal.hour");
constexpr civil_day cd(2016, 1, 28);
static_assert(cd.day() == 28, "Normal.day");
constexpr civil_month cm(2016, 1);
static_assert(cm.month() == 1, "Normal.month");
constexpr civil_year cy(2016);
static_assert(cy.year() == 2016, "Normal.year");
}
TEST(CivilTime, Conversion) {
constexpr civil_year cy(2016);
static_assert(cy.year() == 2016, "Conversion.year");
constexpr civil_month cm(cy);
static_assert(cm.month() == 1, "Conversion.month");
constexpr civil_day cd(cm);
static_assert(cd.day() == 1, "Conversion.day");
constexpr civil_hour chh(cd);
static_assert(chh.hour() == 0, "Conversion.hour");
constexpr civil_minute cmm(chh);
static_assert(cmm.minute() == 0, "Conversion.minute");
constexpr civil_second css(cmm);
static_assert(css.second() == 0, "Conversion.second");
}
TEST(CivilTime, Normalized) {
constexpr civil_second cs(2016, 1, 28, 17, 14, 12);
static_assert(cs.year() == 2016, "Normalized.year");
static_assert(cs.month() == 1, "Normalized.month");
static_assert(cs.day() == 28, "Normalized.day");
static_assert(cs.hour() == 17, "Normalized.hour");
static_assert(cs.minute() == 14, "Normalized.minute");
static_assert(cs.second() == 12, "Normalized.second");
}
TEST(CivilTime, SecondOverflow) {
constexpr civil_second cs(2016, 1, 28, 17, 14, 121);
static_assert(cs.year() == 2016, "SecondOverflow.year");
static_assert(cs.month() == 1, "SecondOverflow.month");
static_assert(cs.day() == 28, "SecondOverflow.day");
static_assert(cs.hour() == 17, "SecondOverflow.hour");
static_assert(cs.minute() == 16, "SecondOverflow.minute");
static_assert(cs.second() == 1, "SecondOverflow.second");
}
TEST(CivilTime, SecondUnderflow) {
constexpr civil_second cs(2016, 1, 28, 17, 14, -121);
static_assert(cs.year() == 2016, "SecondUnderflow.year");
static_assert(cs.month() == 1, "SecondUnderflow.month");
static_assert(cs.day() == 28, "SecondUnderflow.day");
static_assert(cs.hour() == 17, "SecondUnderflow.hour");
static_assert(cs.minute() == 11, "SecondUnderflow.minute");
static_assert(cs.second() == 59, "SecondUnderflow.second");
}
TEST(CivilTime, MinuteOverflow) {
constexpr civil_second cs(2016, 1, 28, 17, 121, 12);
static_assert(cs.year() == 2016, "MinuteOverflow.year");
static_assert(cs.month() == 1, "MinuteOverflow.month");
static_assert(cs.day() == 28, "MinuteOverflow.day");
static_assert(cs.hour() == 19, "MinuteOverflow.hour");
static_assert(cs.minute() == 1, "MinuteOverflow.minute");
static_assert(cs.second() == 12, "MinuteOverflow.second");
}
TEST(CivilTime, MinuteUnderflow) {
constexpr civil_second cs(2016, 1, 28, 17, -121, 12);
static_assert(cs.year() == 2016, "MinuteUnderflow.year");
static_assert(cs.month() == 1, "MinuteUnderflow.month");
static_assert(cs.day() == 28, "MinuteUnderflow.day");
static_assert(cs.hour() == 14, "MinuteUnderflow.hour");
static_assert(cs.minute() == 59, "MinuteUnderflow.minute");
static_assert(cs.second() == 12, "MinuteUnderflow.second");
}
TEST(CivilTime, HourOverflow) {
constexpr civil_second cs(2016, 1, 28, 49, 14, 12);
static_assert(cs.year() == 2016, "HourOverflow.year");
static_assert(cs.month() == 1, "HourOverflow.month");
static_assert(cs.day() == 30, "HourOverflow.day");
static_assert(cs.hour() == 1, "HourOverflow.hour");
static_assert(cs.minute() == 14, "HourOverflow.minute");
static_assert(cs.second() == 12, "HourOverflow.second");
}
TEST(CivilTime, HourUnderflow) {
constexpr civil_second cs(2016, 1, 28, -49, 14, 12);
static_assert(cs.year() == 2016, "HourUnderflow.year");
static_assert(cs.month() == 1, "HourUnderflow.month");
static_assert(cs.day() == 25, "HourUnderflow.day");
static_assert(cs.hour() == 23, "HourUnderflow.hour");
static_assert(cs.minute() == 14, "HourUnderflow.minute");
static_assert(cs.second() == 12, "HourUnderflow.second");
}
TEST(CivilTime, MonthOverflow) {
constexpr civil_second cs(2016, 25, 28, 17, 14, 12);
static_assert(cs.year() == 2018, "MonthOverflow.year");
static_assert(cs.month() == 1, "MonthOverflow.month");
static_assert(cs.day() == 28, "MonthOverflow.day");
static_assert(cs.hour() == 17, "MonthOverflow.hour");
static_assert(cs.minute() == 14, "MonthOverflow.minute");
static_assert(cs.second() == 12, "MonthOverflow.second");
}
TEST(CivilTime, MonthUnderflow) {
constexpr civil_second cs(2016, -25, 28, 17, 14, 12);
static_assert(cs.year() == 2013, "MonthUnderflow.year");
static_assert(cs.month() == 11, "MonthUnderflow.month");
static_assert(cs.day() == 28, "MonthUnderflow.day");
static_assert(cs.hour() == 17, "MonthUnderflow.hour");
static_assert(cs.minute() == 14, "MonthUnderflow.minute");
static_assert(cs.second() == 12, "MonthUnderflow.second");
}
TEST(CivilTime, C4Overflow) {
constexpr civil_second cs(2016, 1, 292195, 17, 14, 12);
static_assert(cs.year() == 2816, "C4Overflow.year");
static_assert(cs.month() == 1, "C4Overflow.month");
static_assert(cs.day() == 1, "C4Overflow.day");
static_assert(cs.hour() == 17, "C4Overflow.hour");
static_assert(cs.minute() == 14, "C4Overflow.minute");
static_assert(cs.second() == 12, "C4Overflow.second");
}
TEST(CivilTime, C4Underflow) {
constexpr civil_second cs(2016, 1, -292195, 17, 14, 12);
static_assert(cs.year() == 1215, "C4Underflow.year");
static_assert(cs.month() == 12, "C4Underflow.month");
static_assert(cs.day() == 30, "C4Underflow.day");
static_assert(cs.hour() == 17, "C4Underflow.hour");
static_assert(cs.minute() == 14, "C4Underflow.minute");
static_assert(cs.second() == 12, "C4Underflow.second");
}
TEST(CivilTime, MixedNormalization) {
constexpr civil_second cs(2016, -42, 122, 99, -147, 4949);
static_assert(cs.year() == 2012, "MixedNormalization.year");
static_assert(cs.month() == 10, "MixedNormalization.month");
static_assert(cs.day() == 4, "MixedNormalization.day");
static_assert(cs.hour() == 1, "MixedNormalization.hour");
static_assert(cs.minute() == 55, "MixedNormalization.minute");
static_assert(cs.second() == 29, "MixedNormalization.second");
}
TEST(CivilTime, Less) {
constexpr civil_second cs1(2016, 1, 28, 17, 14, 12);
constexpr civil_second cs2(2016, 1, 28, 17, 14, 13);
constexpr bool less = cs1 < cs2;
static_assert(less, "Less");
}
TEST(CivilTime, Addition) {
constexpr civil_second cs1(2016, 1, 28, 17, 14, 12);
constexpr civil_second cs2 = cs1 + 50;
static_assert(cs2.year() == 2016, "Addition.year");
static_assert(cs2.month() == 1, "Addition.month");
static_assert(cs2.day() == 28, "Addition.day");
static_assert(cs2.hour() == 17, "Addition.hour");
static_assert(cs2.minute() == 15, "Addition.minute");
static_assert(cs2.second() == 2, "Addition.second");
}
TEST(CivilTime, Subtraction) {
constexpr civil_second cs1(2016, 1, 28, 17, 14, 12);
constexpr civil_second cs2 = cs1 - 50;
static_assert(cs2.year() == 2016, "Subtraction.year");
static_assert(cs2.month() == 1, "Subtraction.month");
static_assert(cs2.day() == 28, "Subtraction.day");
static_assert(cs2.hour() == 17, "Subtraction.hour");
static_assert(cs2.minute() == 13, "Subtraction.minute");
static_assert(cs2.second() == 22, "Subtraction.second");
}
TEST(CivilTime, Difference) {
constexpr civil_day cd1(2016, 1, 28);
constexpr civil_day cd2(2015, 1, 28);
constexpr int diff = cd1 - cd2;
static_assert(diff == 365, "Difference");
}
TEST(CivilTime, ConstructionWithHugeYear) {
constexpr civil_hour h(-9223372036854775807, 1, 1, -1);
static_assert(h.year() == -9223372036854775807 - 1,
"ConstructionWithHugeYear");
static_assert(h.month() == 12, "ConstructionWithHugeYear");
static_assert(h.day() == 31, "ConstructionWithHugeYear");
static_assert(h.hour() == 23, "ConstructionWithHugeYear");
}
TEST(CivilTime, DifferenceWithHugeYear) {
{
constexpr civil_day d1(9223372036854775807, 1, 1);
constexpr civil_day d2(9223372036854775807, 12, 31);
static_assert(d2 - d1 == 364, "DifferenceWithHugeYear");
}
{
constexpr civil_day d1(-9223372036854775807 - 1, 1, 1);
constexpr civil_day d2(-9223372036854775807 - 1, 12, 31);
static_assert(d2 - d1 == 365, "DifferenceWithHugeYear");
}
{
constexpr civil_day d1(9223372036854775807, 1, 1);
constexpr civil_day d2(9198119301927009252, 6, 6);
static_assert(d1 - d2 == 9223372036854775807, "DifferenceWithHugeYear");
static_assert((d2 - 1) - d1 == -9223372036854775807 - 1,
"DifferenceWithHugeYear");
}
{
constexpr civil_day d1(-9223372036854775807 - 1, 1, 1);
constexpr civil_day d2(-9198119301927009254, 7, 28);
static_assert(d2 - d1 == 9223372036854775807, "DifferenceWithHugeYear");
static_assert(d1 - (d2 + 1) == -9223372036854775807 - 1,
"DifferenceWithHugeYear");
}
{
constexpr civil_day d1(-12626367463883278, 9, 3);
constexpr civil_day d2(12626367463883277, 3, 28);
static_assert(d2 - d1 == 9223372036854775807, "DifferenceWithHugeYear");
static_assert(d1 - (d2 + 1) == -9223372036854775807 - 1,
"DifferenceWithHugeYear");
}
}
TEST(CivilTime, DifferenceNoIntermediateOverflow) {
{
constexpr civil_second s1(-292277022657, 1, 27, 8, 29 - 1, 52);
constexpr civil_second s2(1970, 1, 1, 0, 0 - 1, 0);
static_assert(s1 - s2 == -9223372036854775807 - 1,
"DifferenceNoIntermediateOverflow");
}
{
constexpr civil_second s1(292277026596, 12, 4, 15, 30, 7 - 7);
constexpr civil_second s2(1970, 1, 1, 0, 0, 0 - 7);
static_assert(s1 - s2 == 9223372036854775807,
"DifferenceNoIntermediateOverflow");
}
}
TEST(CivilTime, WeekDay) {
constexpr civil_day cd(2016, 1, 28);
constexpr weekday wd = get_weekday(cd);
static_assert(wd == weekday::thursday, "Weekday");
}
TEST(CivilTime, NextWeekDay) {
constexpr civil_day cd(2016, 1, 28);
constexpr civil_day next = next_weekday(cd, weekday::thursday);
static_assert(next.year() == 2016, "NextWeekDay.year");
static_assert(next.month() == 2, "NextWeekDay.month");
static_assert(next.day() == 4, "NextWeekDay.day");
}
TEST(CivilTime, PrevWeekDay) {
constexpr civil_day cd(2016, 1, 28);
constexpr civil_day prev = prev_weekday(cd, weekday::thursday);
static_assert(prev.year() == 2016, "PrevWeekDay.year");
static_assert(prev.month() == 1, "PrevWeekDay.month");
static_assert(prev.day() == 21, "PrevWeekDay.day");
}
TEST(CivilTime, YearDay) {
constexpr civil_day cd(2016, 1, 28);
constexpr int yd = get_yearday(cd);
static_assert(yd == 28, "YearDay");
}
#endif
TEST(CivilTime, DefaultConstruction) {
civil_second ss;
EXPECT_EQ("1970-01-01T00:00:00", Format(ss));
civil_minute mm;
EXPECT_EQ("1970-01-01T00:00", Format(mm));
civil_hour hh;
EXPECT_EQ("1970-01-01T00", Format(hh));
civil_day d;
EXPECT_EQ("1970-01-01", Format(d));
civil_month m;
EXPECT_EQ("1970-01", Format(m));
civil_year y;
EXPECT_EQ("1970", Format(y));
}
TEST(CivilTime, StructMember) {
struct S {
civil_day day;
};
S s = {};
EXPECT_EQ(civil_day{}, s.day);
}
TEST(CivilTime, FieldsConstruction) {
EXPECT_EQ("2015-01-02T03:04:05", Format(civil_second(2015, 1, 2, 3, 4, 5)));
EXPECT_EQ("2015-01-02T03:04:00", Format(civil_second(2015, 1, 2, 3, 4)));
EXPECT_EQ("2015-01-02T03:00:00", Format(civil_second(2015, 1, 2, 3)));
EXPECT_EQ("2015-01-02T00:00:00", Format(civil_second(2015, 1, 2)));
EXPECT_EQ("2015-01-01T00:00:00", Format(civil_second(2015, 1)));
EXPECT_EQ("2015-01-01T00:00:00", Format(civil_second(2015)));
EXPECT_EQ("2015-01-02T03:04", Format(civil_minute(2015, 1, 2, 3, 4, 5)));
EXPECT_EQ("2015-01-02T03:04", Format(civil_minute(2015, 1, 2, 3, 4)));
EXPECT_EQ("2015-01-02T03:00", Format(civil_minute(2015, 1, 2, 3)));
EXPECT_EQ("2015-01-02T00:00", Format(civil_minute(2015, 1, 2)));
EXPECT_EQ("2015-01-01T00:00", Format(civil_minute(2015, 1)));
EXPECT_EQ("2015-01-01T00:00", Format(civil_minute(2015)));
EXPECT_EQ("2015-01-02T03", Format(civil_hour(2015, 1, 2, 3, 4, 5)));
EXPECT_EQ("2015-01-02T03", Format(civil_hour(2015, 1, 2, 3, 4)));
EXPECT_EQ("2015-01-02T03", Format(civil_hour(2015, 1, 2, 3)));
EXPECT_EQ("2015-01-02T00", Format(civil_hour(2015, 1, 2)));
EXPECT_EQ("2015-01-01T00", Format(civil_hour(2015, 1)));
EXPECT_EQ("2015-01-01T00", Format(civil_hour(2015)));
EXPECT_EQ("2015-01-02", Format(civil_day(2015, 1, 2, 3, 4, 5)));
EXPECT_EQ("2015-01-02", Format(civil_day(2015, 1, 2, 3, 4)));
EXPECT_EQ("2015-01-02", Format(civil_day(2015, 1, 2, 3)));
EXPECT_EQ("2015-01-02", Format(civil_day(2015, 1, 2)));
EXPECT_EQ("2015-01-01", Format(civil_day(2015, 1)));
EXPECT_EQ("2015-01-01", Format(civil_day(2015)));
EXPECT_EQ("2015-01", Format(civil_month(2015, 1, 2, 3, 4, 5)));
EXPECT_EQ("2015-01", Format(civil_month(2015, 1, 2, 3, 4)));
EXPECT_EQ("2015-01", Format(civil_month(2015, 1, 2, 3)));
EXPECT_EQ("2015-01", Format(civil_month(2015, 1, 2)));
EXPECT_EQ("2015-01", Format(civil_month(2015, 1)));
EXPECT_EQ("2015-01", Format(civil_month(2015)));
EXPECT_EQ("2015", Format(civil_year(2015, 1, 2, 3, 4, 5)));
EXPECT_EQ("2015", Format(civil_year(2015, 1, 2, 3, 4)));
EXPECT_EQ("2015", Format(civil_year(2015, 1, 2, 3)));
EXPECT_EQ("2015", Format(civil_year(2015, 1, 2)));
EXPECT_EQ("2015", Format(civil_year(2015, 1)));
EXPECT_EQ("2015", Format(civil_year(2015)));
}
TEST(CivilTime, FieldsConstructionLimits) {
const int kIntMax = std::numeric_limits<int>::max();
EXPECT_EQ("2038-01-19T03:14:07",
Format(civil_second(1970, 1, 1, 0, 0, kIntMax)));
EXPECT_EQ("6121-02-11T05:21:07",
Format(civil_second(1970, 1, 1, 0, kIntMax, kIntMax)));
EXPECT_EQ("251104-11-20T12:21:07",
Format(civil_second(1970, 1, 1, kIntMax, kIntMax, kIntMax)));
EXPECT_EQ("6130715-05-30T12:21:07",
Format(civil_second(1970, 1, kIntMax, kIntMax, kIntMax, kIntMax)));
EXPECT_EQ(
"185087685-11-26T12:21:07",
Format(civil_second(1970, kIntMax, kIntMax, kIntMax, kIntMax, kIntMax)));
const int kIntMin = std::numeric_limits<int>::min();
EXPECT_EQ("1901-12-13T20:45:52",
Format(civil_second(1970, 1, 1, 0, 0, kIntMin)));
EXPECT_EQ("-2182-11-20T18:37:52",
Format(civil_second(1970, 1, 1, 0, kIntMin, kIntMin)));
EXPECT_EQ("-247165-02-11T10:37:52",
Format(civil_second(1970, 1, 1, kIntMin, kIntMin, kIntMin)));
EXPECT_EQ("-6126776-08-01T10:37:52",
Format(civil_second(1970, 1, kIntMin, kIntMin, kIntMin, kIntMin)));
EXPECT_EQ(
"-185083747-10-31T10:37:52",
Format(civil_second(1970, kIntMin, kIntMin, kIntMin, kIntMin, kIntMin)));
}
TEST(CivilTime, ImplicitCrossAlignment) {
civil_year year(2015);
civil_month month = year;
civil_day day = month;
civil_hour hour = day;
civil_minute minute = hour;
civil_second second = minute;
second = year;
EXPECT_EQ(second, year);
second = month;
EXPECT_EQ(second, month);
second = day;
EXPECT_EQ(second, day);
second = hour;
EXPECT_EQ(second, hour);
second = minute;
EXPECT_EQ(second, minute);
minute = year;
EXPECT_EQ(minute, year);
minute = month;
EXPECT_EQ(minute, month);
minute = day;
EXPECT_EQ(minute, day);
minute = hour;
EXPECT_EQ(minute, hour);
hour = year;
EXPECT_EQ(hour, year);
hour = month;
EXPECT_EQ(hour, month);
hour = day;
EXPECT_EQ(hour, day);
day = year;
EXPECT_EQ(day, year);
day = month;
EXPECT_EQ(day, month);
month = year;
EXPECT_EQ(month, year);
EXPECT_FALSE((std::is_convertible<civil_second, civil_minute>::value));
EXPECT_FALSE((std::is_convertible<civil_second, civil_hour>::value));
EXPECT_FALSE((std::is_convertible<civil_second, civil_day>::value));
EXPECT_FALSE((std::is_convertible<civil_second, civil_month>::value));
EXPECT_FALSE((std::is_convertible<civil_second, civil_year>::value));
EXPECT_FALSE((std::is_convertible<civil_minute, civil_hour>::value));
EXPECT_FALSE((std::is_convertible<civil_minute, civil_day>::value));
EXPECT_FALSE((std::is_convertible<civil_minute, civil_month>::value));
EXPECT_FALSE((std::is_convertible<civil_minute, civil_year>::value));
EXPECT_FALSE((std::is_convertible<civil_hour, civil_day>::value));
EXPECT_FALSE((std::is_convertible<civil_hour, civil_month>::value));
EXPECT_FALSE((std::is_convertible<civil_hour, civil_year>::value));
EXPECT_FALSE((std::is_convertible<civil_day, civil_month>::value));
EXPECT_FALSE((std::is_convertible<civil_day, civil_year>::value));
EXPECT_FALSE((std::is_convertible<civil_month, civil_year>::value));
}
TEST(CivilTime, ExplicitCrossAlignment) {
civil_second second(2015, 1, 2, 3, 4, 5);
EXPECT_EQ("2015-01-02T03:04:05", Format(second));
civil_minute minute(second);
EXPECT_EQ("2015-01-02T03:04", Format(minute));
civil_hour hour(minute);
EXPECT_EQ("2015-01-02T03", Format(hour));
civil_day day(hour);
EXPECT_EQ("2015-01-02", Format(day));
civil_month month(day);
EXPECT_EQ("2015-01", Format(month));
civil_year year(month);
EXPECT_EQ("2015", Format(year));
month = civil_month(year);
EXPECT_EQ("2015-01", Format(month));
day = civil_day(month);
EXPECT_EQ("2015-01-01", Format(day));
hour = civil_hour(day);
EXPECT_EQ("2015-01-01T00", Format(hour));
minute = civil_minute(hour);
EXPECT_EQ("2015-01-01T00:00", Format(minute));
second = civil_second(minute);
EXPECT_EQ("2015-01-01T00:00:00", Format(second));
}
template <typename T1, typename T2>
struct HasDifference {
template <typename U1, typename U2>
static std::false_type test(...);
template <typename U1, typename U2>
static std::true_type test(decltype(std::declval<U1>() - std::declval<U2>()));
static constexpr bool value = decltype(test<T1, T2>(0))::value;
};
TEST(CivilTime, DisallowCrossAlignedDifference) {
static_assert(HasDifference<civil_second, civil_second>::value, "");
static_assert(HasDifference<civil_minute, civil_minute>::value, "");
static_assert(HasDifference<civil_hour, civil_hour>::value, "");
static_assert(HasDifference<civil_day, civil_day>::value, "");
static_assert(HasDifference<civil_month, civil_month>::value, "");
static_assert(HasDifference<civil_year, civil_year>::value, "");
static_assert(!HasDifference<civil_second, civil_minute>::value, "");
static_assert(!HasDifference<civil_second, civil_hour>::value, "");
static_assert(!HasDifference<civil_second, civil_day>::value, "");
static_assert(!HasDifference<civil_second, civil_month>::value, "");
static_assert(!HasDifference<civil_second, civil_year>::value, "");
static_assert(!HasDifference<civil_minute, civil_hour>::value, "");
static_assert(!HasDifference<civil_minute, civil_day>::value, "");
static_assert(!HasDifference<civil_minute, civil_month>::value, "");
static_assert(!HasDifference<civil_minute, civil_year>::value, "");
static_assert(!HasDifference<civil_hour, civil_day>::value, "");
static_assert(!HasDifference<civil_hour, civil_month>::value, "");
static_assert(!HasDifference<civil_hour, civil_year>::value, "");
static_assert(!HasDifference<civil_day, civil_month>::value, "");
static_assert(!HasDifference<civil_day, civil_year>::value, "");
static_assert(!HasDifference<civil_month, civil_year>::value, "");
}
TEST(CivilTime, ValueSemantics) {
const civil_hour a(2015, 1, 2, 3);
const civil_hour b = a;
const civil_hour c(b);
civil_hour d;
d = c;
EXPECT_EQ("2015-01-02T03", Format(d));
}
TEST(CivilTime, Relational) {
const civil_year year(2014);
const civil_month month(year);
EXPECT_EQ(year, month);
#define TEST_RELATIONAL(OLDER, YOUNGER) \
do { \
EXPECT_FALSE(OLDER < OLDER); \
EXPECT_FALSE(OLDER > OLDER); \
EXPECT_TRUE(OLDER >= OLDER); \
EXPECT_TRUE(OLDER <= OLDER); \
EXPECT_FALSE(YOUNGER < YOUNGER); \
EXPECT_FALSE(YOUNGER > YOUNGER); \
EXPECT_TRUE(YOUNGER >= YOUNGER); \
EXPECT_TRUE(YOUNGER <= YOUNGER); \
EXPECT_EQ(OLDER, OLDER); \
EXPECT_NE(OLDER, YOUNGER); \
EXPECT_LT(OLDER, YOUNGER); \
EXPECT_LE(OLDER, YOUNGER); \
EXPECT_GT(YOUNGER, OLDER); \
EXPECT_GE(YOUNGER, OLDER); \
} while (0)
TEST_RELATIONAL(civil_second(2014, 1, 1, 0, 0, 0),
civil_second(2015, 1, 1, 0, 0, 0));
TEST_RELATIONAL(civil_second(2014, 1, 1, 0, 0, 0),
civil_second(2014, 2, 1, 0, 0, 0));
TEST_RELATIONAL(civil_second(2014, 1, 1, 0, 0, 0),
civil_second(2014, 1, 2, 0, 0, 0));
TEST_RELATIONAL(civil_second(2014, 1, 1, 0, 0, 0),
civil_second(2014, 1, 1, 1, 0, 0));
TEST_RELATIONAL(civil_second(2014, 1, 1, 1, 0, 0),
civil_second(2014, 1, 1, 1, 1, 0));
TEST_RELATIONAL(civil_second(2014, 1, 1, 1, 1, 0),
civil_second(2014, 1, 1, 1, 1, 1));
TEST_RELATIONAL(civil_day(2014, 1, 1), civil_minute(2014, 1, 1, 1, 1));
TEST_RELATIONAL(civil_day(2014, 1, 1), civil_month(2014, 2));
#undef TEST_RELATIONAL
}
TEST(CivilTime, Arithmetic) {
civil_second second(2015, 1, 2, 3, 4, 5);
EXPECT_EQ("2015-01-02T03:04:06", Format(second += 1));
EXPECT_EQ("2015-01-02T03:04:07", Format(second + 1));
EXPECT_EQ("2015-01-02T03:04:08", Format(2 + second));
EXPECT_EQ("2015-01-02T03:04:05", Format(second - 1));
EXPECT_EQ("2015-01-02T03:04:05", Format(second -= 1));
EXPECT_EQ("2015-01-02T03:04:05", Format(second++));
EXPECT_EQ("2015-01-02T03:04:07", Format(++second));
EXPECT_EQ("2015-01-02T03:04:07", Format(second--));
EXPECT_EQ("2015-01-02T03:04:05", Format(--second));
civil_minute minute(2015, 1, 2, 3, 4);
EXPECT_EQ("2015-01-02T03:05", Format(minute += 1));
EXPECT_EQ("2015-01-02T03:06", Format(minute + 1));
EXPECT_EQ("2015-01-02T03:07", Format(2 + minute));
EXPECT_EQ("2015-01-02T03:04", Format(minute - 1));
EXPECT_EQ("2015-01-02T03:04", Format(minute -= 1));
EXPECT_EQ("2015-01-02T03:04", Format(minute++));
EXPECT_EQ("2015-01-02T03:06", Format(++minute));
EXPECT_EQ("2015-01-02T03:06", Format(minute--));
EXPECT_EQ("2015-01-02T03:04", Format(--minute));
civil_hour hour(2015, 1, 2, 3);
EXPECT_EQ("2015-01-02T04", Format(hour += 1));
EXPECT_EQ("2015-01-02T05", Format(hour + 1));
EXPECT_EQ("2015-01-02T06", Format(2 + hour));
EXPECT_EQ("2015-01-02T03", Format(hour - 1));
EXPECT_EQ("2015-01-02T03", Format(hour -= 1));
EXPECT_EQ("2015-01-02T03", Format(hour++));
EXPECT_EQ("2015-01-02T05", Format(++hour));
EXPECT_EQ("2015-01-02T05", Format(hour--));
EXPECT_EQ("2015-01-02T03", Format(--hour));
civil_day day(2015, 1, 2);
EXPECT_EQ("2015-01-03", Format(day += 1));
EXPECT_EQ("2015-01-04", Format(day + 1));
EXPECT_EQ("2015-01-05", Format(2 + day));
EXPECT_EQ("2015-01-02", Format(day - 1));
EXPECT_EQ("2015-01-02", Format(day -= 1));
EXPECT_EQ("2015-01-02", Format(day++));
EXPECT_EQ("2015-01-04", Format(++day));
EXPECT_EQ("2015-01-04", Format(day--));
EXPECT_EQ("2015-01-02", Format(--day));
civil_month month(2015, 1);
EXPECT_EQ("2015-02", Format(month += 1));
EXPECT_EQ("2015-03", Format(month + 1));
EXPECT_EQ("2015-04", Format(2 + month));
EXPECT_EQ("2015-01", Format(month - 1));
EXPECT_EQ("2015-01", Format(month -= 1));
EXPECT_EQ("2015-01", Format(month++));
EXPECT_EQ("2015-03", Format(++month));
EXPECT_EQ("2015-03", Format(month--));
EXPECT_EQ("2015-01", Format(--month));
civil_year year(2015);
EXPECT_EQ("2016", Format(year += 1));
EXPECT_EQ("2017", Format(year + 1));
EXPECT_EQ("2018", Format(2 + year));
EXPECT_EQ("2015", Format(year - 1));
EXPECT_EQ("2015", Format(year -= 1));
EXPECT_EQ("2015", Format(year++));
EXPECT_EQ("2017", Format(++year));
EXPECT_EQ("2017", Format(year--));
EXPECT_EQ("2015", Format(--year));
}
TEST(CivilTime, ArithmeticLimits) {
const int kIntMax = std::numeric_limits<int>::max();
const int kIntMin = std::numeric_limits<int>::min();
civil_second second(1970, 1, 1, 0, 0, 0);
second += kIntMax;
EXPECT_EQ("2038-01-19T03:14:07", Format(second));
second -= kIntMax;
EXPECT_EQ("1970-01-01T00:00:00", Format(second));
second += kIntMin;
EXPECT_EQ("1901-12-13T20:45:52", Format(second));
second -= kIntMin;
EXPECT_EQ("1970-01-01T00:00:00", Format(second));
civil_minute minute(1970, 1, 1, 0, 0);
minute += kIntMax;
EXPECT_EQ("6053-01-23T02:07", Format(minute));
minute -= kIntMax;
EXPECT_EQ("1970-01-01T00:00", Format(minute));
minute += kIntMin;
EXPECT_EQ("-2114-12-08T21:52", Format(minute));
minute -= kIntMin;
EXPECT_EQ("1970-01-01T00:00", Format(minute));
civil_hour hour(1970, 1, 1, 0);
hour += kIntMax;
EXPECT_EQ("246953-10-09T07", Format(hour));
hour -= kIntMax;
EXPECT_EQ("1970-01-01T00", Format(hour));
hour += kIntMin;
EXPECT_EQ("-243014-03-24T16", Format(hour));
hour -= kIntMin;
EXPECT_EQ("1970-01-01T00", Format(hour));
civil_day day(1970, 1, 1);
day += kIntMax;
EXPECT_EQ("5881580-07-11", Format(day));
day -= kIntMax;
EXPECT_EQ("1970-01-01", Format(day));
day += kIntMin;
EXPECT_EQ("-5877641-06-23", Format(day));
day -= kIntMin;
EXPECT_EQ("1970-01-01", Format(day));
civil_month month(1970, 1);
month += kIntMax;
EXPECT_EQ("178958940-08", Format(month));
month -= kIntMax;
EXPECT_EQ("1970-01", Format(month));
month += kIntMin;
EXPECT_EQ("-178955001-05", Format(month));
month -= kIntMin;
EXPECT_EQ("1970-01", Format(month));
civil_year year(0);
year += kIntMax;
EXPECT_EQ("2147483647", Format(year));
year -= kIntMax;
EXPECT_EQ("0", Format(year));
year += kIntMin;
EXPECT_EQ("-2147483648", Format(year));
year -= kIntMin;
EXPECT_EQ("0", Format(year));
}
TEST(CivilTime, ArithmeticDifference) {
civil_second second(2015, 1, 2, 3, 4, 5);
EXPECT_EQ(0, second - second);
EXPECT_EQ(10, (second + 10) - second);
EXPECT_EQ(-10, (second - 10) - second);
civil_minute minute(2015, 1, 2, 3, 4);
EXPECT_EQ(0, minute - minute);
EXPECT_EQ(10, (minute + 10) - minute);
EXPECT_EQ(-10, (minute - 10) - minute);
civil_hour hour(2015, 1, 2, 3);
EXPECT_EQ(0, hour - hour);
EXPECT_EQ(10, (hour + 10) - hour);
EXPECT_EQ(-10, (hour - 10) - hour);
civil_day day(2015, 1, 2);
EXPECT_EQ(0, day - day);
EXPECT_EQ(10, (day + 10) - day);
EXPECT_EQ(-10, (day - 10) - day);
civil_month month(2015, 1);
EXPECT_EQ(0, month - month);
EXPECT_EQ(10, (month + 10) - month);
EXPECT_EQ(-10, (month - 10) - month);
civil_year year(2015);
EXPECT_EQ(0, year - year);
EXPECT_EQ(10, (year + 10) - year);
EXPECT_EQ(-10, (year - 10) - year);
}
TEST(CivilTime, DifferenceLimits) {
const int kIntMax = std::numeric_limits<int>::max();
const int kIntMin = std::numeric_limits<int>::min();
const civil_day max_day(kIntMax, 12, 31);
EXPECT_EQ(1, max_day - (max_day - 1));
EXPECT_EQ(-1, (max_day - 1) - max_day);
const civil_day min_day(kIntMin, 1, 1);
EXPECT_EQ(1, (min_day + 1) - min_day);
EXPECT_EQ(-1, min_day - (min_day + 1));
const civil_day d1(1970, 1, 1);
const civil_day d2(5881580, 7, 11);
EXPECT_EQ(kIntMax, d2 - d1);
EXPECT_EQ(kIntMin, d1 - (d2 + 1));
}
TEST(CivilTime, Properties) {
civil_second ss(2015, 2, 3, 4, 5, 6);
EXPECT_EQ(2015, ss.year());
EXPECT_EQ(2, ss.month());
EXPECT_EQ(3, ss.day());
EXPECT_EQ(4, ss.hour());
EXPECT_EQ(5, ss.minute());
EXPECT_EQ(6, ss.second());
EXPECT_EQ(weekday::tuesday, get_weekday(ss));
EXPECT_EQ(34, get_yearday(ss));
civil_minute mm(2015, 2, 3, 4, 5, 6);
EXPECT_EQ(2015, mm.year());
EXPECT_EQ(2, mm.month());
EXPECT_EQ(3, mm.day());
EXPECT_EQ(4, mm.hour());
EXPECT_EQ(5, mm.minute());
EXPECT_EQ(0, mm.second());
EXPECT_EQ(weekday::tuesday, get_weekday(mm));
EXPECT_EQ(34, get_yearday(mm));
civil_hour hh(2015, 2, 3, 4, 5, 6);
EXPECT_EQ(2015, hh.year());
EXPECT_EQ(2, hh.month());
EXPECT_EQ(3, hh.day());
EXPECT_EQ(4, hh.hour());
EXPECT_EQ(0, hh.minute());
EXPECT_EQ(0, hh.second());
EXPECT_EQ(weekday::tuesday, get_weekday(hh));
EXPECT_EQ(34, get_yearday(hh));
civil_day d(2015, 2, 3, 4, 5, 6);
EXPECT_EQ(2015, d.year());
EXPECT_EQ(2, d.month());
EXPECT_EQ(3, d.day());
EXPECT_EQ(0, d.hour());
EXPECT_EQ(0, d.minute());
EXPECT_EQ(0, d.second());
EXPECT_EQ(weekday::tuesday, get_weekday(d));
EXPECT_EQ(34, get_yearday(d));
civil_month m(2015, 2, 3, 4, 5, 6);
EXPECT_EQ(2015, m.year());
EXPECT_EQ(2, m.month());
EXPECT_EQ(1, m.day());
EXPECT_EQ(0, m.hour());
EXPECT_EQ(0, m.minute());
EXPECT_EQ(0, m.second());
EXPECT_EQ(weekday::sunday, get_weekday(m));
EXPECT_EQ(32, get_yearday(m));
civil_year y(2015, 2, 3, 4, 5, 6);
EXPECT_EQ(2015, y.year());
EXPECT_EQ(1, y.month());
EXPECT_EQ(1, y.day());
EXPECT_EQ(0, y.hour());
EXPECT_EQ(0, y.minute());
EXPECT_EQ(0, y.second());
EXPECT_EQ(weekday::thursday, get_weekday(y));
EXPECT_EQ(1, get_yearday(y));
}
TEST(CivilTime, OutputStream) {
EXPECT_EQ("2016", Format(civil_year(2016)));
EXPECT_EQ("123", Format(civil_year(123)));
EXPECT_EQ("0", Format(civil_year(0)));
EXPECT_EQ("-1", Format(civil_year(-1)));
EXPECT_EQ("2016-02", Format(civil_month(2016, 2)));
EXPECT_EQ("2016-02-03", Format(civil_day(2016, 2, 3)));
EXPECT_EQ("2016-02-03T04", Format(civil_hour(2016, 2, 3, 4)));
EXPECT_EQ("2016-02-03T04:05", Format(civil_minute(2016, 2, 3, 4, 5)));
EXPECT_EQ("2016-02-03T04:05:06", Format(civil_second(2016, 2, 3, 4, 5, 6)));
EXPECT_EQ("Monday", Format(weekday::monday));
EXPECT_EQ("Tuesday", Format(weekday::tuesday));
EXPECT_EQ("Wednesday", Format(weekday::wednesday));
EXPECT_EQ("Thursday", Format(weekday::thursday));
EXPECT_EQ("Friday", Format(weekday::friday));
EXPECT_EQ("Saturday", Format(weekday::saturday));
EXPECT_EQ("Sunday", Format(weekday::sunday));
}
TEST(CivilTime, OutputStreamLeftFillWidth) {
civil_second cs(2016, 2, 3, 4, 5, 6);
{
std::stringstream ss;
ss << std::left << std::setfill('.');
ss << std::setw(3) << 'X';
ss << std::setw(21) << civil_year(cs);
ss << std::setw(3) << 'X';
EXPECT_EQ("X..2016.................X..", ss.str());
}
{
std::stringstream ss;
ss << std::left << std::setfill('.');
ss << std::setw(3) << 'X';
ss << std::setw(21) << civil_month(cs);
ss << std::setw(3) << 'X';
EXPECT_EQ("X..2016-02..............X..", ss.str());
}
{
std::stringstream ss;
ss << std::left << std::setfill('.');
ss << std::setw(3) << 'X';
ss << std::setw(21) << civil_day(cs);
ss << std::setw(3) << 'X';
EXPECT_EQ("X..2016-02-03...........X..", ss.str());
}
{
std::stringstream ss;
ss << std::left << std::setfill('.');
ss << std::setw(3) << 'X';
ss << std::setw(21) << civil_hour(cs);
ss << std::setw(3) << 'X';
EXPECT_EQ("X..2016-02-03T04........X..", ss.str());
}
{
std::stringstream ss;
ss << std::left << std::setfill('.');
ss << std::setw(3) << 'X';
ss << std::setw(21) << civil_minute(cs);
ss << std::setw(3) << 'X';
EXPECT_EQ("X..2016-02-03T04:05.....X..", ss.str());
}
{
std::stringstream ss;
ss << std::left << std::setfill('.');
ss << std::setw(3) << 'X';
ss << std::setw(21) << civil_second(cs);
ss << std::setw(3) << 'X';
EXPECT_EQ("X..2016-02-03T04:05:06..X..", ss.str());
}
}
TEST(CivilTime, NextPrevWeekday) {
const civil_day thursday(1970, 1, 1);
EXPECT_EQ(weekday::thursday, get_weekday(thursday));
civil_day d = next_weekday(thursday, weekday::thursday);
EXPECT_EQ(7, d - thursday) << Format(d);
EXPECT_EQ(d - 14, prev_weekday(thursday, weekday::thursday));
d = next_weekday(thursday, weekday::friday);
EXPECT_EQ(1, d - thursday) << Format(d);
EXPECT_EQ(d - 7, prev_weekday(thursday, weekday::friday));
d = next_weekday(thursday, weekday::saturday);
EXPECT_EQ(2, d - thursday) << Format(d);
EXPECT_EQ(d - 7, prev_weekday(thursday, weekday::saturday));
d = next_weekday(thursday, weekday::sunday);
EXPECT_EQ(3, d - thursday) << Format(d);
EXPECT_EQ(d - 7, prev_weekday(thursday, weekday::sunday));
d = next_weekday(thursday, weekday::monday);
EXPECT_EQ(4, d - thursday) << Format(d);
EXPECT_EQ(d - 7, prev_weekday(thursday, weekday::monday));
d = next_weekday(thursday, weekday::tuesday);
EXPECT_EQ(5, d - thursday) << Format(d);
EXPECT_EQ(d - 7, prev_weekday(thursday, weekday::tuesday));
d = next_weekday(thursday, weekday::wednesday);
EXPECT_EQ(6, d - thursday) << Format(d);
EXPECT_EQ(d - 7, prev_weekday(thursday, weekday::wednesday));
}
TEST(CivilTime, NormalizeWithHugeYear) {
civil_month c(9223372036854775807, 1);
EXPECT_EQ("9223372036854775807-01", Format(c));
c = c - 1;
EXPECT_EQ("9223372036854775806-12", Format(c));
c = civil_month(-9223372036854775807 - 1, 1);
EXPECT_EQ("-9223372036854775808-01", Format(c));
c = c + 12;
EXPECT_EQ("-9223372036854775807-01", Format(c));
}
TEST(CivilTime, LeapYears) {
const struct {
int year;
int days;
struct {
int month;
int day;
} leap_day;
} kLeapYearTable[]{
{1900, 365, {3, 1}}, {1999, 365, {3, 1}},
{2000, 366, {2, 29}},
{2001, 365, {3, 1}}, {2002, 365, {3, 1}},
{2003, 365, {3, 1}}, {2004, 366, {2, 29}},
{2005, 365, {3, 1}}, {2006, 365, {3, 1}},
{2007, 365, {3, 1}}, {2008, 366, {2, 29}},
{2009, 365, {3, 1}}, {2100, 365, {3, 1}},
};
for (const auto& e : kLeapYearTable) {
const civil_day feb28(e.year, 2, 28);
const civil_day next_day = feb28 + 1;
EXPECT_EQ(e.leap_day.month, next_day.month());
EXPECT_EQ(e.leap_day.day, next_day.day());
const civil_year year(feb28);
const civil_year next_year = year + 1;
EXPECT_EQ(e.days, civil_day(next_year) - civil_day(year));
}
}
TEST(CivilTime, FirstThursdayInMonth) {
const civil_day nov1(2014, 11, 1);
const civil_day thursday = next_weekday(nov1 - 1, weekday::thursday);
EXPECT_EQ("2014-11-06", Format(thursday));
const civil_day thanksgiving = thursday + 7 * 3;
EXPECT_EQ("2014-11-27", Format(thanksgiving));
}
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/time/civil_time.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/time/internal/cctz/src/civil_time_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
872adbcd-9a42-46b0-9278-91c217a8afec | cpp | tensorflow/tensorflow | indexed_array_analysis | third_party/xla/xla/service/indexed_array_analysis.cc | third_party/xla/xla/service/indexed_array_analysis_test.cc | #include "xla/service/indexed_array_analysis.h"
#include <algorithm>
#include <numeric>
#include <optional>
#include <string>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/evaluator/hlo_evaluator.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/map_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using Analysis = IndexedArrayAnalysis;
using UnknownArray = Analysis::UnknownArray;
using ConstantArray = Analysis::ConstantArray;
using ReshapedArray = Analysis::ReshapedArray;
using ScalarIndexedArray = Analysis::ScalarIndexedArray;
using absl::StrJoin;
}
std::string IndexedArrayAnalysis::ToString(Array* root, bool print_constants) {
switch (root->kind()) {
case Array::kUnknown: {
auto* unknown_tensor = root->as<UnknownArray>();
return absl::StrCat("%", unknown_tensor->instruction().name());
}
case Array::kConstant: {
if (print_constants) {
std::string contents = root->as<ConstantArray>()->literal()->ToString();
return absl::StrCat("(constant ", ShapeUtil::HumanString(root->shape()),
" ", contents, ")");
}
return absl::StrCat("(constant ", ShapeUtil::HumanString(root->shape()),
")");
}
case Array::kReshaped: {
ReshapedArray* reshaped_array = root->as<ReshapedArray>();
return absl::StrCat(
"(reshape ", ToString(reshaped_array->operand(), print_constants),
" to ", ShapeUtil::HumanString(reshaped_array->shape()), ")");
}
case Array::kScalarIndexedConstant:
case Array::kScalarIndexed: {
auto* indexed_array = root->as<ScalarIndexedArray>();
std::string name = root->kind() == Array::kScalarIndexedConstant
? "scalar-indexed-const"
: "scalar-indexed";
return absl::StrCat(
"(", name, " ", ToString(indexed_array->source(), print_constants),
" ", ToString(indexed_array->indices(), print_constants), " ",
indexed_array->source_dim(), "->[",
StrJoin(indexed_array->output_dims(), ","), "])");
}
}
}
absl::StatusOr<Analysis::Array*> IndexedArrayAnalysis::GetArrayFor(
const HloInstruction* instr) {
auto it = cache_.find(instr);
if (it != cache_.end()) {
return it->second;
}
TF_RETURN_IF_ERROR(TraverseAndPopulateCache(instr));
return FindOrDie(cache_, instr);
}
absl::Status IndexedArrayAnalysis::TraverseAndPopulateCache(
const HloInstruction* root) {
absl::InlinedVector<const HloInstruction*, 4> stack;
enum DfsState { kDiscovered, kVisited };
absl::flat_hash_map<const HloInstruction*, DfsState> dfs_state_map;
stack.push_back(root);
InsertOrDie(&dfs_state_map, root, kDiscovered);
do {
const HloInstruction* instr = stack.back();
if (cache_.contains(instr)) {
stack.pop_back();
continue;
}
switch (FindOrDie(dfs_state_map, instr)) {
case kDiscovered: {
for (const HloInstruction* operand : instr->operands()) {
if (!cache_.contains(operand)) {
stack.push_back(operand);
CHECK(!dfs_state_map.contains(operand) ||
dfs_state_map[operand] == kDiscovered);
dfs_state_map[operand] = kDiscovered;
}
}
dfs_state_map[instr] = kVisited;
break;
}
case kVisited:
stack.pop_back();
TF_ASSIGN_OR_RETURN(Array * array, ComputeArrayFor(instr));
InsertOrDie(&cache_, instr, array);
break;
}
} while (!stack.empty());
return absl::OkStatus();
}
absl::StatusOr<Analysis::Array*> IndexedArrayAnalysis::ComputeArrayFor(
const HloInstruction* instr) {
Array* computed_array;
if (instr->IsElementwise() && instr->operand_count() == 1) {
TF_ASSIGN_OR_RETURN(
computed_array,
ComputeArrayForElementwiseUnaryOp(
instr->opcode(), FindOrDie(cache_, instr->operand(0))));
} else if (instr->IsElementwise() && instr->operand_count() == 2) {
TF_ASSIGN_OR_RETURN(
computed_array,
ComputeArrayForElementwiseBinaryOp(
instr->opcode(), FindOrDie(cache_, instr->operand(0)),
FindOrDie(cache_, instr->operand(1))));
} else if (instr->opcode() == HloOpcode::kConstant) {
TF_ASSIGN_OR_RETURN(computed_array,
ComputeArrayForConstant(instr->literal()));
} else if (instr->opcode() == HloOpcode::kGather) {
TF_ASSIGN_OR_RETURN(
computed_array,
ComputeArrayForGather(instr->shape(), instr->gather_dimension_numbers(),
instr->gather_slice_sizes(),
FindOrDie(cache_, instr->operand(0)),
FindOrDie(cache_, instr->operand(1))));
} else if (instr->opcode() == HloOpcode::kReshape) {
TF_ASSIGN_OR_RETURN(
computed_array,
ComputeArrayForReshape(instr->shape(),
FindOrDie(cache_, instr->operand(0))));
} else if (instr->opcode() == HloOpcode::kDot) {
TF_ASSIGN_OR_RETURN(
computed_array,
ComputeArrayForDot(instr->shape(), instr->dot_dimension_numbers(),
instr->precision_config(),
FindOrDie(cache_, instr->operand(0)),
FindOrDie(cache_, instr->operand(1))));
} else {
computed_array = nullptr;
}
if (!computed_array) {
computed_array = Construct<UnknownArray>(instr);
}
return computed_array;
}
absl::StatusOr<Analysis::Array*> IndexedArrayAnalysis::ComputeArrayForConstant(
const Literal& literal) {
return Construct<ConstantArray>(&literal);
}
absl::StatusOr<ScalarIndexedArray*> IndexedArrayAnalysis::FoldGatherOfGather(
ScalarIndexedArray* source, Array* indices, int64_t source_dim,
absl::Span<const int64_t> output_dims, Shape shape) {
Array* a = source->source();
Array* x = source->indices();
Array* y = indices;
enum class IndexComponent { Ungathered, GatheredFirst, GatheredSecond };
std::vector<IndexComponent> simulated_index(a->shape().dimensions_size(),
IndexComponent::Ungathered);
EraseAt(&simulated_index, source->source_dim());
for (int64_t gather_dim : source->output_dims()) {
simulated_index.insert(simulated_index.begin() + gather_dim,
IndexComponent::GatheredFirst);
}
EraseAt(&simulated_index, source_dim);
for (int64_t output_dim : output_dims) {
simulated_index.insert(simulated_index.begin() + output_dim,
IndexComponent::GatheredSecond);
}
int64_t source_dim_for_index_array =
FindIndex(source->output_dims(), source_dim);
CHECK_NE(source_dim_for_index_array, source->output_dims().size());
std::vector<int64_t> output_dims_for_index_array;
int64_t gathered_index_components_seen = 0;
for (IndexComponent simulation_dim : simulated_index) {
if (simulation_dim == IndexComponent::GatheredSecond) {
output_dims_for_index_array.push_back(gathered_index_components_seen);
}
if (simulation_dim != IndexComponent::Ungathered) {
gathered_index_components_seen++;
}
}
std::vector<int64_t> dim_sizes_for_composed_index;
std::vector<int64_t> output_dims_for_new_gather;
for (int64_t i = 0, e = simulated_index.size(); i < e; i++) {
if (simulated_index[i] != IndexComponent::Ungathered) {
dim_sizes_for_composed_index.push_back(shape.dimensions(i));
output_dims_for_new_gather.push_back(i);
}
}
Array* inner_indices = ConstructScalarIndexedArray(
x, y, source_dim_for_index_array, output_dims_for_index_array,
ShapeUtil::MakeShape(x->shape().element_type(),
dim_sizes_for_composed_index));
return ConstructScalarIndexedArray(a, inner_indices, source->source_dim(),
output_dims_for_new_gather,
std::move(shape));
}
absl::StatusOr<Analysis::Array*> IndexedArrayAnalysis::ComputeArrayForGather(
const Shape& shape, const GatherDimensionNumbers& dim_numbers,
absl::Span<const int64_t> slice_sizes, Array* source, Array* indices) {
if (dim_numbers.index_vector_dim() != indices->shape().dimensions_size()) {
VLOG(3) << "ComputeArrayForGather: indices are not scalar";
return nullptr;
}
CHECK_EQ(dim_numbers.start_index_map_size(), 1);
if (dim_numbers.collapsed_slice_dims_size() != 1 ||
dim_numbers.collapsed_slice_dims(0) != dim_numbers.start_index_map(0)) {
VLOG(3) << "ComputeArrayForGather: gather operations must elide "
"start_index_map[0] and "
"start_index_map[0] only";
return nullptr;
}
for (int64_t i = 0, e = source->shape().dimensions_size(); i < e; i++) {
if (i != dim_numbers.collapsed_slice_dims(0) &&
source->shape().dimensions(i) != slice_sizes[i]) {
VLOG(3) << "ComputeArrayForGather: slice_sizes[" << i
<< "] != source->shape().dimensions(" << i << ") -- "
<< source->shape().dimensions(i) << " vs. " << slice_sizes[i]
<< " with dim_numbers.collapsed_slice_dims(0) = "
<< dim_numbers.collapsed_slice_dims(0);
return nullptr;
}
}
int64_t source_dim = dim_numbers.start_index_map(0);
std::vector<int64_t> output_dims;
for (int64_t i = 0, e = shape.dimensions_size(); i < e; i++) {
if (!absl::c_binary_search(dim_numbers.offset_dims(), i)) {
output_dims.push_back(i);
}
}
if (auto* indexed = dynamic_cast<ScalarIndexedArray*>(source)) {
if (absl::c_linear_search(indexed->output_dims(), source_dim)) {
return FoldGatherOfGather(indexed, indices, source_dim, output_dims,
shape);
}
} else if (auto* constant = dynamic_cast<ConstantArray*>(source)) {
return Construct<ScalarIndexedConstantArray>(constant, indices, source_dim,
output_dims, shape);
}
return Construct<ScalarIndexedArray>(source, indices, source_dim, output_dims,
shape);
}
namespace {
int64_t FindSuffixWithProduct(absl::Span<const int64_t> values,
int64_t product) {
DCHECK(absl::c_all_of(values, [](int64_t value) { return value > 0; }));
int64_t current_product = 1;
int64_t i;
for (i = values.size() - 1; i >= 0 && product > current_product; --i) {
current_product *= values[i];
}
if (product == current_product) {
return i + 1;
}
return -1;
}
struct ReshapePassthroughDimPair {
int64_t result_dim;
int64_t operand_dim;
};
std::vector<ReshapePassthroughDimPair> ComputeReshapePassthroughDimPairs(
absl::Span<const int64_t> operand_shape,
absl::Span<const int64_t> result_shape) {
std::vector<ReshapePassthroughDimPair> result;
int64_t result_subarray_size = 1;
for (int64_t result_dim = result_shape.size() - 1; result_dim >= 0;
--result_dim) {
int64_t candidate_operand_dim =
FindSuffixWithProduct(operand_shape, result_subarray_size);
CHECK_NE(candidate_operand_dim, 0)
<< "result_dim = " << result_dim
<< ", result_subarray_size = " << result_subarray_size
<< ", result_shape = [" << StrJoin(result_shape, ",") << "]"
<< ", operand_shape = [" << StrJoin(operand_shape, ",") << "]";
if (candidate_operand_dim != -1 &&
result_shape[result_dim] == operand_shape[candidate_operand_dim - 1]) {
result.push_back({result_dim,
candidate_operand_dim - 1});
}
result_subarray_size *= result_shape[result_dim];
}
absl::c_reverse(result);
if (VLOG_IS_ON(3)) {
std::vector<std::string> result_strings;
absl::c_transform(result, std::back_inserter(result_strings),
[](ReshapePassthroughDimPair value) {
return absl::StrCat(value.result_dim, "->",
value.operand_dim);
});
VLOG(3) << "For a reshape from [" << StrJoin(operand_shape, ",") << "] to ["
<< StrJoin(result_shape, ",") << "] passthrough indices are ["
<< StrJoin(result_strings, ",")
<< "] (legend: `result`->`operand`)";
}
DCHECK(absl::c_is_sorted(
result, [](ReshapePassthroughDimPair lhs, ReshapePassthroughDimPair rhs) {
return lhs.result_dim < rhs.result_dim;
}));
DCHECK(absl::c_is_sorted(
result, [](ReshapePassthroughDimPair lhs, ReshapePassthroughDimPair rhs) {
return lhs.operand_dim < rhs.operand_dim;
}));
return result;
}
bool IsReshapePassthroughOperandDim(
absl::Span<const ReshapePassthroughDimPair> passthrough_dims, int64_t dim) {
return absl::c_any_of(passthrough_dims,
[&](ReshapePassthroughDimPair passthrough_dim_pair) {
return passthrough_dim_pair.operand_dim == dim;
});
}
int64_t MapPassthroughOperandDimToResultDim(
absl::Span<const ReshapePassthroughDimPair> passthrough_dims,
int64_t operand_dim) {
auto it = absl::c_find_if(
passthrough_dims, [&](ReshapePassthroughDimPair passthrough_dim_pair) {
return passthrough_dim_pair.operand_dim == operand_dim;
});
CHECK(it != passthrough_dims.end());
return it->result_dim;
}
int64_t FindSourcePositionForPassthroughResultDim(
absl::Span<const int64_t> operand_shape,
absl::Span<const int64_t> result_shape, int64_t source_passthrough_dim) {
VLOG(3) << "FindSourcePositionForPassthroughResultDim(["
<< StrJoin(operand_shape, ",") << "], [" << StrJoin(result_shape, ",")
<< "], " << source_passthrough_dim << ")";
int64_t indexed_source_subarray_size =
std::accumulate(operand_shape.begin() + source_passthrough_dim + 1,
operand_shape.end(), 1LL, std::multiplies<int64_t>());
return FindSuffixWithProduct(result_shape, indexed_source_subarray_size);
}
Shape StripDegenerateDimensions(const Shape& shape) {
DimensionVector new_dims;
absl::c_copy_if(shape.dimensions(), std::back_inserter(new_dims),
[](int64_t dim) { return dim != 1; });
return ShapeUtil::MakeShape(shape.element_type(), new_dims);
}
};
absl::StatusOr<ScalarIndexedArray*>
IndexedArrayAnalysis::ReshapeToRemoveDegenerateDims(
ScalarIndexedArray* operand) {
const Shape& shape = operand->shape();
if (!ShapeUtil::HasDegenerateDimensions(shape)) {
return operand;
}
const Shape& source_shape = operand->source()->shape();
DimensionVector new_source_shape_dims;
for (int64_t i = 0, e = source_shape.dimensions_size(); i < e; i++) {
if (i == operand->source_dim() || source_shape.dimensions(i) != 1) {
new_source_shape_dims.push_back(source_shape.dimensions(i));
}
}
Shape new_source_shape =
ShapeUtil::MakeShape(shape.element_type(), new_source_shape_dims);
Shape new_indices_shape =
StripDegenerateDimensions(operand->indices()->shape());
TF_ASSIGN_OR_RETURN(
Array* const new_source,
ComputeArrayForReshape(new_source_shape, operand->source()));
TF_ASSIGN_OR_RETURN(
Array* const new_indices,
ComputeArrayForReshape(new_indices_shape, operand->indices()));
DimensionVector new_output_dims;
int64_t degenerate_dims_seen = 0;
for (int64_t i = 0, e = shape.dimensions_size(); i < e; i++) {
if (shape.dimensions(i) == 1) {
degenerate_dims_seen++;
} else if (absl::c_linear_search(operand->output_dims(), i)) {
new_output_dims.push_back(i - degenerate_dims_seen);
}
}
int64_t degenerate_dims_before_source_dim =
std::count(source_shape.dimensions().begin(),
source_shape.dimensions().begin() + operand->source_dim(), 1);
int64_t new_source_dim =
operand->source_dim() - degenerate_dims_before_source_dim;
return ConstructScalarIndexedArray(
new_source, new_indices, new_source_dim,
InlinedVectorToVector(new_output_dims),
StripDegenerateDimensions(operand->shape()));
}
absl::StatusOr<ScalarIndexedArray*>
IndexedArrayAnalysis::ReshapeToAddDegenerateDims(
ScalarIndexedArray* operand, absl::Span<const int64_t> degenerate_dims) {
if (degenerate_dims.empty()) {
return operand;
}
CHECK(!ShapeUtil::HasDegenerateDimensions(operand->shape()));
DimensionVector new_output_dims = [&]() {
absl::InlinedVector<bool, 6> output_dims_bitvector(
operand->shape().dimensions_size());
for (int64_t output_dim : operand->output_dims()) {
output_dims_bitvector[output_dim] = true;
}
for (int64_t degenerate_dim : degenerate_dims) {
InsertAt(&output_dims_bitvector, degenerate_dim, false);
}
DimensionVector result;
result.reserve(operand->output_dims().size());
for (int64_t i = 0, e = output_dims_bitvector.size(); i < e; i++) {
if (output_dims_bitvector[i]) {
result.push_back(i);
}
}
return result;
}();
DimensionVector new_result_shape_dims;
absl::c_copy(operand->shape().dimensions(),
std::back_inserter(new_result_shape_dims));
for (int64_t degenerate_dim : degenerate_dims) {
InsertAt(&new_result_shape_dims, degenerate_dim, 1);
}
DimensionVector new_source_shape_dims = new_result_shape_dims;
for (int64_t output_dim : new_output_dims) {
EraseAt(&new_source_shape_dims, output_dim);
}
int64_t new_source_dim = [&]() {
for (int i = 0, e = new_source_shape_dims.size(); i < e; i++) {
int64_t non_degenerate_dims_seen = 0;
if (non_degenerate_dims_seen == operand->source_dim()) {
return i;
}
if (new_source_shape_dims[new_source_dim] != 1) {
non_degenerate_dims_seen++;
}
}
LOG(FATAL) << "Did not find source dim in " << ToString(operand);
}();
int64_t source_dim_size =
operand->source()->shape().dimensions(operand->source_dim());
InsertAt(&new_source_shape_dims, new_source_dim,
source_dim_size);
Shape new_source_shape = ShapeUtil::MakeShape(operand->shape().element_type(),
new_source_shape_dims);
Shape new_result_shape = ShapeUtil::MakeShape(operand->shape().element_type(),
new_result_shape_dims);
TF_ASSIGN_OR_RETURN(
Array* const new_source,
ComputeArrayForReshape(new_source_shape, operand->source()));
return ConstructScalarIndexedArray(
new_source, operand->indices(), new_source_dim,
InlinedVectorToVector(new_output_dims), new_result_shape);
}
absl::StatusOr<ScalarIndexedArray*> IndexedArrayAnalysis::FoldReshapeOfGather(
const Shape& shape, ScalarIndexedConstantArray* operand) {
VLOG(3) << "FoldReshapeOfGather(" << ToString(operand) << ")";
TF_ASSIGN_OR_RETURN(ScalarIndexedArray* const operand_without_degenerate_dims,
ReshapeToRemoveDegenerateDims(operand));
Shape output_shape_without_degenerate_dims = StripDegenerateDimensions(shape);
TF_ASSIGN_OR_RETURN(
ScalarIndexedArray* const folded_reshape_without_degenerate_dims,
FoldReshapeOfGatherNoDegenerateDims(
output_shape_without_degenerate_dims,
operand_without_degenerate_dims->as<ScalarIndexedConstantArray>()));
if (folded_reshape_without_degenerate_dims == nullptr) {
return nullptr;
}
DimensionVector degenerate_result_dims;
for (int64_t i = 0, e = shape.dimensions_size(); i < e; i++) {
if (shape.dimensions(i) == 1) {
degenerate_result_dims.push_back(i);
}
}
return ReshapeToAddDegenerateDims(folded_reshape_without_degenerate_dims,
degenerate_result_dims);
}
absl::StatusOr<ScalarIndexedArray*>
IndexedArrayAnalysis::FoldReshapeOfGatherNoDegenerateDims(
const Shape& shape, ScalarIndexedConstantArray* scalar_indexed) {
VLOG(3) << "FoldReshapeOfGatherNoDegenerateDims(" << ToString(scalar_indexed)
<< ")";
CHECK(!ShapeUtil::HasDegenerateDimensions(shape));
CHECK(!ShapeUtil::HasDegenerateDimensions(scalar_indexed->shape()));
std::vector<ReshapePassthroughDimPair> reshape_passthrough_dims =
ComputeReshapePassthroughDimPairs(
scalar_indexed->shape().dimensions(),
shape.dimensions());
auto is_reshape_passthrough_operand_dim = [&](int64_t operand_dim) {
return IsReshapePassthroughOperandDim(reshape_passthrough_dims,
operand_dim);
};
if (!absl::c_all_of(scalar_indexed->output_dims(),
is_reshape_passthrough_operand_dim)) {
VLOG(3) << "Not all output dims are passthrough dims "
<< ToString(scalar_indexed);
return nullptr;
}
std::vector<int64_t> new_scalar_indexed_source_shape(
shape.dimensions().begin(), shape.dimensions().end());
for (int64_t i = scalar_indexed->output_dims().size() - 1; i >= 0; i--) {
int64_t output_dim = scalar_indexed->output_dims()[i];
int64_t output_dim_after_reshape = MapPassthroughOperandDimToResultDim(
reshape_passthrough_dims, output_dim);
EraseAt(&new_scalar_indexed_source_shape, output_dim_after_reshape);
}
const Shape& scalar_indexed_source_shape = scalar_indexed->source()->shape();
int64_t source_dim_for_new_scalar_indexed_node =
FindSourcePositionForPassthroughResultDim(
scalar_indexed_source_shape.dimensions(),
new_scalar_indexed_source_shape,
scalar_indexed->source_dim());
if (source_dim_for_new_scalar_indexed_node == -1) {
VLOG(3) << "Could not compute the source dim for the new scalar indexed "
"node: scalar_indexed_source_shape = ["
<< StrJoin(scalar_indexed_source_shape.dimensions(), ",")
<< "] and new_scalar_indexed_source_shape = ["
<< StrJoin(new_scalar_indexed_source_shape, ",") << "]";
return nullptr;
}
InsertAt(
&new_scalar_indexed_source_shape, source_dim_for_new_scalar_indexed_node,
scalar_indexed_source_shape.dimensions(scalar_indexed->source_dim()));
CHECK_EQ(absl::c_accumulate(new_scalar_indexed_source_shape, 1LL,
std::multiplies<int64_t>()),
ShapeUtil::ElementsIn(scalar_indexed_source_shape));
CHECK(IsReshapePassthroughOperandDim(
ComputeReshapePassthroughDimPairs(
scalar_indexed_source_shape.dimensions(),
new_scalar_indexed_source_shape),
scalar_indexed->source_dim()));
auto map_passthrough_operand_dim_to_result_dim = [&](int64_t result_dim) {
return MapPassthroughOperandDimToResultDim(reshape_passthrough_dims,
result_dim);
};
std::vector<int64_t> output_dims_for_new_scalar_indexed_node;
absl::c_transform(scalar_indexed->output_dims(),
std::back_inserter(output_dims_for_new_scalar_indexed_node),
map_passthrough_operand_dim_to_result_dim);
TF_ASSIGN_OR_RETURN(const Literal* new_scalar_indexed_source_literal,
TakeOwnership(scalar_indexed->literal().Reshape(
new_scalar_indexed_source_shape)));
TF_ASSIGN_OR_RETURN(
Array * new_scalar_indexed_source,
ComputeArrayForConstant(*new_scalar_indexed_source_literal));
return ConstructScalarIndexedArray(
new_scalar_indexed_source, scalar_indexed->indices(),
source_dim_for_new_scalar_indexed_node,
output_dims_for_new_scalar_indexed_node, shape);
}
absl::StatusOr<Analysis::Array*> IndexedArrayAnalysis::ComputeArrayForReshape(
const Shape& shape, Array* operand) {
if (ShapeUtil::Compatible(operand->shape(), shape)) {
return operand;
}
if (auto* scalar_indexed =
dynamic_cast<ScalarIndexedConstantArray*>(operand)) {
TF_ASSIGN_OR_RETURN(Analysis::Array * reshape_folded_into_gather,
FoldReshapeOfGather(shape, scalar_indexed));
if (reshape_folded_into_gather) {
return reshape_folded_into_gather;
}
}
if (auto* constant_array = dynamic_cast<ConstantArray*>(operand)) {
TF_ASSIGN_OR_RETURN(
Literal* const new_literal,
TakeOwnership(constant_array->literal()->Reshape(shape.dimensions())));
return Construct<ConstantArray>(new_literal);
}
return Construct<ReshapedArray>(operand, shape);
}
absl::StatusOr<Analysis::Array*>
IndexedArrayAnalysis::ComputeArrayForElementwiseBinaryOp(HloOpcode opcode,
Array* lhs,
Array* rhs) {
ScalarIndexedConstantArray* lhs_scalar_indexed_const =
dynamic_cast<ScalarIndexedConstantArray*>(lhs);
ScalarIndexedConstantArray* rhs_scalar_indexed_const =
dynamic_cast<ScalarIndexedConstantArray*>(rhs);
bool lhs_is_indexed;
if (lhs_scalar_indexed_const && !rhs_scalar_indexed_const) {
lhs_is_indexed = true;
} else if (rhs_scalar_indexed_const && !lhs_scalar_indexed_const) {
lhs_is_indexed = false;
} else {
return nullptr;
}
ScalarIndexedConstantArray* scalar_indexed_const =
lhs_is_indexed ? lhs_scalar_indexed_const : rhs_scalar_indexed_const;
UnknownArray* candidate_broadcast_array =
dynamic_cast<UnknownArray*>(lhs_is_indexed ? rhs : lhs);
if (!candidate_broadcast_array ||
candidate_broadcast_array->instruction().opcode() !=
HloOpcode::kBroadcast) {
return nullptr;
}
const HloInstruction* broadcast_instr =
&candidate_broadcast_array->instruction();
const HloInstruction* broadcast_const_operand = broadcast_instr->operand(0);
if (broadcast_const_operand->opcode() != HloOpcode::kConstant) {
return nullptr;
}
absl::Span<const int64_t> broadcast_dims = broadcast_instr->dimensions();
auto is_broadcasted_dim = [&](int64_t output_dim) {
return absl::c_find(broadcast_dims, output_dim) == broadcast_dims.end();
};
if (!absl::c_all_of(scalar_indexed_const->output_dims(),
is_broadcasted_dim)) {
return nullptr;
}
enum class IndexComponent { Broadcasted, NotBroadcasted };
std::vector<IndexComponent> simulated_index(
broadcast_instr->shape().dimensions_size(), IndexComponent::Broadcasted);
for (int64_t broadcast_dim : broadcast_dims) {
simulated_index[broadcast_dim] = IndexComponent::NotBroadcasted;
}
absl::Span<const int64_t> output_dims = scalar_indexed_const->output_dims();
for (int64_t i = output_dims.size() - 1; i >= 0; --i) {
CHECK(simulated_index[output_dims[i]] == IndexComponent::Broadcasted);
EraseAt(&simulated_index, output_dims[i]);
}
InsertAt(&simulated_index, scalar_indexed_const->source_dim(),
IndexComponent::Broadcasted);
std::vector<int64_t> new_inner_broadcast_dims;
for (int64_t i = 0; i < simulated_index.size(); i++) {
if (simulated_index[i] == IndexComponent::NotBroadcasted) {
new_inner_broadcast_dims.push_back(i);
}
}
TF_ASSIGN_OR_RETURN(
Literal inner_broadcast_result,
broadcast_const_operand->literal().Broadcast(
scalar_indexed_const->source()->shape(), new_inner_broadcast_dims));
const Literal* literal_for_new_source;
if (lhs_is_indexed) {
TF_ASSIGN_OR_RETURN(
literal_for_new_source,
TakeOwnership(HloEvaluator{}.EvaluateElementwiseBinaryOp(
opcode, scalar_indexed_const->literal(), inner_broadcast_result)));
} else {
TF_ASSIGN_OR_RETURN(
literal_for_new_source,
TakeOwnership(HloEvaluator{}.EvaluateElementwiseBinaryOp(
opcode, inner_broadcast_result, scalar_indexed_const->literal())));
}
ConstantArray* new_source = Construct<ConstantArray>(literal_for_new_source);
return Construct<ScalarIndexedConstantArray>(
new_source, scalar_indexed_const->indices(),
scalar_indexed_const->source_dim(),
std::vector<int64_t>(scalar_indexed_const->output_dims().begin(),
scalar_indexed_const->output_dims().end()),
scalar_indexed_const->shape());
}
absl::StatusOr<Analysis::Array*>
IndexedArrayAnalysis::ComputeArrayForElementwiseUnaryOp(HloOpcode opcode,
Array* operand) {
auto* scalar_indexed_const =
dynamic_cast<ScalarIndexedConstantArray*>(operand);
if (scalar_indexed_const == nullptr) {
return nullptr;
}
TF_ASSIGN_OR_RETURN(Literal * literal_for_new_source,
TakeOwnership(HloEvaluator{}.EvaluateElementwiseUnaryOp(
opcode, scalar_indexed_const->literal())));
ConstantArray* new_source = Construct<ConstantArray>(literal_for_new_source);
return Construct<ScalarIndexedConstantArray>(
new_source, scalar_indexed_const->indices(),
scalar_indexed_const->source_dim(),
SpanToVector(scalar_indexed_const->output_dims()),
scalar_indexed_const->shape());
}
namespace {
std::optional<int64_t> GetOnlyNonContractingNonBatchDim(
int64_t rank, absl::Span<const int64_t> contracting_dims,
absl::Span<const int64_t> batch_dims) {
std::optional<int64_t> result;
for (int64_t dim = 0; dim < rank; dim++) {
if (!absl::c_linear_search(contracting_dims, dim) &&
!absl::c_linear_search(batch_dims, dim)) {
if (result.has_value()) {
return std::nullopt;
}
result = dim;
}
}
return result;
}
bool CanFoldDotIntoIndexedArray(
absl::string_view tag, Analysis::ScalarIndexedConstantArray* indexed_array,
absl::Span<const int64_t> contracting_dims,
absl::Span<const int64_t> batch_dims) {
std::optional<int64_t> non_contracting_non_batch_dim =
GetOnlyNonContractingNonBatchDim(indexed_array->shape().rank(),
contracting_dims, batch_dims);
if (!non_contracting_non_batch_dim.has_value()) {
VLOG(3) << tag << ": multiple or no non-contracting non-batch dimensions";
return false;
}
if (indexed_array->output_dims().size() != 1 ||
indexed_array->output_dims()[0] != *non_contracting_non_batch_dim) {
VLOG(3) << tag << ": output dims != the lhs non-contracting non-batch dim";
return false;
}
int64_t indexed_array_rank = indexed_array->shape().rank();
if (indexed_array->source_dim() < (indexed_array_rank - 2)) {
VLOG(3) << tag
<< ": source dim is not in the low two dims, won't be able to form "
"a matmul";
return false;
}
return true;
}
}
absl::StatusOr<Analysis::Array*>
IndexedArrayAnalysis::ComputeArrayForDotWithIndexedLhs(
const Shape& shape, const DotDimensionNumbers& dim_numbers,
const PrecisionConfig& precision_config, ScalarIndexedConstantArray* lhs,
ConstantArray* rhs) {
VLOG(3) << "ComputeArrayForDotWithIndexedLhs(" << ToString(lhs) << " "
<< ToString(rhs);
if (!CanFoldDotIntoIndexedArray(
"ComputeArrayForDotWithIndexedLhs", lhs,
dim_numbers.lhs_contracting_dimensions(),
dim_numbers.lhs_batch_dimensions())) {
return nullptr;
}
int64_t lhs_rank = lhs->shape().rank();
DotDimensionNumbers new_dim_numbers = dim_numbers;
new_dim_numbers.set_lhs_contracting_dimensions(
0, lhs->source_dim() == (lhs_rank - 1) ? (lhs_rank - 2) : (lhs_rank - 1));
TF_ASSIGN_OR_RETURN(
Literal * literal_for_new_source,
TakeOwnership(HloEvaluator{}.EvaluateDotOp(
new_dim_numbers, precision_config, lhs->literal(), *rhs->literal())));
int64_t new_source_dim = dim_numbers.lhs_batch_dimensions_size() +
dim_numbers.rhs_batch_dimensions_size();
ConstantArray* new_source = Construct<ConstantArray>(literal_for_new_source);
return Construct<ScalarIndexedConstantArray>(
new_source, lhs->indices(), new_source_dim,
SpanToVector(lhs->output_dims()), shape);
}
absl::StatusOr<Analysis::Array*>
IndexedArrayAnalysis::ComputeArrayForDotWithIndexedRhs(
const Shape& shape, const DotDimensionNumbers& dim_numbers,
const PrecisionConfig& precision_config, ConstantArray* lhs,
ScalarIndexedConstantArray* rhs) {
VLOG(3) << "ComputeArrayForDotWithIndexedRhs(" << ToString(lhs) << " "
<< ToString(rhs);
if (!CanFoldDotIntoIndexedArray(
"ComputeArrayForDotWithIndexedRhs", rhs,
dim_numbers.rhs_contracting_dimensions(),
dim_numbers.rhs_batch_dimensions())) {
return nullptr;
}
int64_t rhs_rank = rhs->shape().rank();
DotDimensionNumbers new_dim_numbers = dim_numbers;
new_dim_numbers.set_rhs_contracting_dimensions(
0, rhs->source_dim() == (rhs_rank - 1) ? (rhs_rank - 2) : (rhs_rank - 1));
TF_ASSIGN_OR_RETURN(
Literal * literal_for_new_source,
TakeOwnership(HloEvaluator{}.EvaluateDotOp(
new_dim_numbers, precision_config, *lhs->literal(), rhs->literal())));
int64_t new_source_dim = dim_numbers.lhs_batch_dimensions_size() +
dim_numbers.rhs_batch_dimensions_size() + 1;
ConstantArray* new_source = Construct<ConstantArray>(literal_for_new_source);
return Construct<ScalarIndexedConstantArray>(
new_source, rhs->indices(), new_source_dim,
SpanToVector(rhs->output_dims()), shape);
}
absl::StatusOr<Analysis::Array*> IndexedArrayAnalysis::ComputeArrayForDot(
const Shape& shape, const DotDimensionNumbers& dim_numbers,
const PrecisionConfig& precision_config, Array* lhs, Array* rhs) {
VLOG(3) << "ComputeArrayForDot(" << ToString(lhs) << " " << ToString(rhs);
if (auto* lhs_indexed_array =
dynamic_cast<ScalarIndexedConstantArray*>(lhs)) {
if (auto* rhs_constant = dynamic_cast<ConstantArray*>(rhs)) {
return ComputeArrayForDotWithIndexedLhs(shape, dim_numbers,
precision_config,
lhs_indexed_array, rhs_constant);
}
}
if (auto* rhs_indexed_array =
dynamic_cast<ScalarIndexedConstantArray*>(rhs)) {
if (auto* lhs_constant = dynamic_cast<ConstantArray*>(lhs)) {
return ComputeArrayForDotWithIndexedRhs(shape, dim_numbers,
precision_config, lhs_constant,
rhs_indexed_array);
}
}
return nullptr;
}
absl::StatusOr<bool> IndexedArrayAnalysisPrinterPass::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
if (!VLOG_IS_ON(2)) {
return false;
}
IndexedArrayAnalysis analysis;
for (auto* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (auto* instr : computation->instructions()) {
TF_ASSIGN_OR_RETURN(Analysis::Array * t, analysis.GetArrayFor(instr));
if (!dynamic_cast<UnknownArray*>(t) && !dynamic_cast<ConstantArray*>(t)) {
VLOG(2) << instr->ToString() << " -> " << analysis.ToString(t);
}
}
}
return false;
}
} | #include "xla/service/indexed_array_analysis.h"
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/strings/ascii.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class IndexedArrayAnalysisTest : public HloTestBase {
protected:
void AssertArrayForRootExpressionIs(const std::string& hlo_text,
const std::string& root_expression) {
AssertArrayForRootExpressionIsImpl(hlo_text, root_expression,
false);
}
void AssertArrayWithConstantsForRootExpressionIs(
const std::string& hlo_text, const std::string& root_expression) {
AssertArrayForRootExpressionIsImpl(hlo_text, root_expression,
true);
}
private:
std::string CanonicalizeWhitespace(const std::string& text) {
std::string result;
for (char c : text) {
if (!absl::ascii_isspace(c)) {
result.push_back(c);
} else if (!result.empty() && result.back() != ' ') {
result.push_back(' ');
}
}
while (!result.empty() && result.back() == ' ') {
result.pop_back();
}
return result;
}
void AssertArrayForRootExpressionIsImpl(const std::string& hlo_text,
const std::string& root_expression,
bool print_constants) {
IndexedArrayAnalysis indexed_tensor_analysis;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(IndexedArrayAnalysis::Array* const array_result,
indexed_tensor_analysis.GetArrayFor(
m->entry_computation()->root_instruction()));
std::string string_result = CanonicalizeWhitespace(
indexed_tensor_analysis.ToString(array_result, print_constants));
LOG(INFO) << string_result;
ASSERT_EQ(string_result, CanonicalizeWhitespace(root_expression));
}
};
TEST_F(IndexedArrayAnalysisTest, SimpleOneToOneGather) {
std::string hlo_text = R"(
HloModule SimpleGather
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[5] parameter(1)
ROOT gather = s32[5,3] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,3}
}
)";
AssertArrayForRootExpressionIs(hlo_text,
"(scalar-indexed %operand %indices 0->[0])");
}
TEST_F(IndexedArrayAnalysisTest, SimpleOneToOneConstantGather) {
std::string hlo_text = R"(
HloModule SimpleGather
ENTRY main {
operand = s32[3,3] constant({{1,2,3},{1,2,3},{1,2,3}})
indices = s32[5] parameter(0)
ROOT gather = s32[5,3] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,3}
}
)";
AssertArrayForRootExpressionIs(
hlo_text, "(scalar-indexed-const (constant s32[3,3]) %indices 0->[0])");
}
TEST_F(IndexedArrayAnalysisTest, GatherIsNotScalarIndexed0) {
std::string hlo_text = R"(
HloModule SimpleGather
ENTRY main {
operand = s32[3,3] constant({{1,2,3},{1,2,3},{1,2,3}})
indices = s32[5,2] parameter(0)
ROOT gather = s32[5] gather(operand, indices),
offset_dims={},
collapsed_slice_dims={0,1},
start_index_map={0,1},
index_vector_dim=1,
slice_sizes={1,1}
}
)";
AssertArrayForRootExpressionIs(hlo_text, "%gather");
}
TEST_F(IndexedArrayAnalysisTest, GatherIsNotScalarIndexed1) {
std::string hlo_text = R"(
HloModule SimpleGather
ENTRY main {
operand = s32[3,3,1] parameter(0)
indices = s32[5] parameter(1)
ROOT gather = s32[5,3] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0,2},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,3,1}
}
)";
AssertArrayForRootExpressionIs(hlo_text, "%gather");
}
TEST_F(IndexedArrayAnalysisTest, GatherIsNotScalarIndexed2) {
std::string hlo_text = R"(
HloModule SimpleGather
ENTRY main {
operand = s32[3,3,1] parameter(0)
indices = s32[5] parameter(1)
ROOT gather = s32[5,2,3] gather(operand, indices),
offset_dims={1,2},
collapsed_slice_dims={2},
start_index_map={0},
index_vector_dim=1,
slice_sizes={2,3,1}
}
)";
AssertArrayForRootExpressionIs(hlo_text, "%gather");
}
TEST_F(IndexedArrayAnalysisTest, GatherIsNotScalarIndexed3) {
std::string hlo_text = R"(
HloModule SimpleGather
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[5] parameter(1)
ROOT gather = s32[5,2] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,2}
}
)";
AssertArrayForRootExpressionIs(hlo_text, "%gather");
}
TEST_F(IndexedArrayAnalysisTest, GatherOfGather_OneToOne) {
std::string hlo_text = R"(
HloModule SimpleGather
ENTRY main {
operand = s32[3,3] constant({{1,2,3},{1,2,3},{1,2,3}})
indices_a = s32[5] parameter(0)
indices_b = s32[2] parameter(1)
gather_a = s32[5,3] gather(operand, indices_a),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,3}
ROOT gather_b = s32[2,3] gather(gather_a, indices_b),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,3}
}
)";
AssertArrayForRootExpressionIs(
hlo_text,
"(scalar-indexed-const (constant s32[3,3]) (scalar-indexed %indices_a "
"%indices_b 0->[0]) 0->[0])");
}
TEST_F(IndexedArrayAnalysisTest, GatherOfGather_ManyToOneWithOneToOne) {
std::string hlo_text = R"(
HloModule SimpleGather
ENTRY main {
operand = s32[3,2] parameter(0)
indices_a = s32[5,7] parameter(1)
indices_b = s32[2] parameter(2)
gather_a = s32[5,3,7] gather(operand, indices_a),
offset_dims={1},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=2,
slice_sizes={3,1}
ROOT gather_b = s32[5,3,2] gather(gather_a, indices_b),
offset_dims={0,1},
collapsed_slice_dims={2},
start_index_map={2},
index_vector_dim=1,
slice_sizes={5,3,1}
}
)";
AssertArrayForRootExpressionIs(hlo_text,
"(scalar-indexed %operand (scalar-indexed "
"%indices_a %indices_b 1->[1]) 1->[0,2])");
}
TEST_F(IndexedArrayAnalysisTest, GatherOfGather_OneToOneWithManyToOne) {
std::string hlo_text = R"(
HloModule SimpleGather
ENTRY main {
operand = s32[3,6] parameter(0)
indices_a = s32[2] parameter(1)
indices_b = s32[5,7] parameter(2)
gather_a = s32[2,6] gather(operand, indices_a),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,6}
ROOT gather_b = s32[5,6,7] gather(gather_a, indices_b),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=2,
slice_sizes={1,6}
}
)";
AssertArrayForRootExpressionIs(hlo_text,
"(scalar-indexed %operand (scalar-indexed "
"%indices_a %indices_b 0->[0,1]) 0->[0,2])");
}
TEST_F(IndexedArrayAnalysisTest, GatherOfGather_ManyToOneWithManyToOne) {
std::string hlo_text = R"(
HloModule SimpleGather
ENTRY main {
operand = s32[3,2] parameter(0)
indices_a = s32[5,7] parameter(1)
indices_b = s32[4,8] parameter(2)
gather_a = s32[5,3,7] gather(operand, indices_a),
offset_dims={1},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=2,
slice_sizes={3,1}
ROOT gather_b = s32[4,5,3,8] gather(gather_a, indices_b),
offset_dims={1,2},
collapsed_slice_dims={2},
start_index_map={2},
index_vector_dim=2,
slice_sizes={5,3,1}
}
)";
AssertArrayForRootExpressionIs(
hlo_text,
"(scalar-indexed %operand (scalar-indexed %indices_a %indices_b "
"1->[0,2]) 1->[0,1,3])");
}
TEST_F(IndexedArrayAnalysisTest, ReshapeOfGather0) {
std::string hlo_text = R"(
HloModule ReshapeOfGather
ENTRY main {
operand = s32[3,4] constant({{1,2,3,4},{1,2,3,4},{1,2,3,4}})
indices = s32[5] parameter(0)
gather = s32[5,4] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,4}
ROOT reshape = s32[5,2,2] reshape(gather)
}
)";
AssertArrayForRootExpressionIs(
hlo_text, "(scalar-indexed-const (constant s32[3,2,2]) %indices 0->[0])");
}
TEST_F(IndexedArrayAnalysisTest, ReshapeOfGather1) {
std::string hlo_text = R"(
HloModule ReshapeOfGather
ENTRY main {
operand = s32[3,4] constant({{1,2,3,4},{1,2,3,4},{1,2,3,4}})
indices = s32[5,7] parameter(0)
gather = s32[5,4,7] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=2,
slice_sizes={1,4}
ROOT reshape = s32[5,2,2,7] reshape(gather)
}
)";
AssertArrayForRootExpressionIs(
hlo_text,
"(scalar-indexed-const (constant s32[3,2,2]) %indices 0->[0,3])");
}
TEST_F(IndexedArrayAnalysisTest, ReshapeOfGather2) {
std::string hlo_text = R"(
HloModule ReshapeOfGather
ENTRY main {
operand = s32[3,2,6] constant({
{{1,2,3,4,5,6},{1,2,3,4,5,6}},
{{1,2,3,4,5,6},{1,2,3,4,5,6}},
{{1,2,3,4,5,6},{1,2,3,4,5,6}}})
indices = s32[5,7] parameter(0)
gather = s32[5,2,6,7] gather(operand, indices),
offset_dims={1,2},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=2,
slice_sizes={1,2,6}
ROOT reshape = s32[5,3,4,7] reshape(gather)
}
)";
AssertArrayForRootExpressionIs(
hlo_text,
"(scalar-indexed-const (constant s32[3,3,4]) %indices 0->[0,3])");
}
TEST_F(IndexedArrayAnalysisTest, ReshapeOfGather3) {
std::string hlo_text = R"(
HloModule ReshapeOfGather
ENTRY main {
operand = s32[2,6] constant({
{1,2,3,4,5,6},{1,2,3,4,5,6}})
indices = s32[1] parameter(0)
gather = s32[1,6] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,6}
ROOT reshape = s32[1,1,6] reshape(gather)
}
)";
const char* expected_root_expression = R"(
(scalar-indexed-const
(constant s32[2,1,1,6])
(reshape %indices to s32[])
0->[])
)";
AssertArrayForRootExpressionIs(hlo_text, expected_root_expression);
}
TEST_F(IndexedArrayAnalysisTest, ReshapeOfGather4) {
std::string hlo_text = R"(
HloModule ReshapeOfGather
ENTRY main {
operand = s32[2,3]{1,0} constant({ { 1, 2, 3 }, { 1, 2, 3 } })
i.0 = s64[1,3]{1,0} parameter(0)
g.0 = s32[1,3,3]{2,1,0} gather(operand, i.0), offset_dims={2},
collapsed_slice_dims={0}, start_index_map={0},
index_vector_dim=2, slice_sizes={1,3}
i.1 = s64[1] parameter(1)
g.1 = s32[1,1,3]{2,1,0} gather(g.0, i.1), offset_dims={0,2},
collapsed_slice_dims={1}, start_index_map={1},
index_vector_dim=1, slice_sizes={1,1,3}
ROOT reshape = s32[1,3]{1,0} reshape(g.1)
}
)";
const char* expected_root_expression = R"(
(scalar-indexed-const
(constant s32[2,1,3])
(reshape
(scalar-indexed %i.0 %i.1 1->[1])
to s64[])
0->[])
)";
AssertArrayForRootExpressionIs(hlo_text, expected_root_expression);
}
TEST_F(IndexedArrayAnalysisTest, ReshapeOfGather5) {
std::string hlo_text = R"(
HloModule ReshapeOfGather
ENTRY main {
operand = s32[1,6] constant({{1,2,3,4,5,6}})
indices = s32[1] parameter(0)
gather = s32[1,6] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,6}
ROOT reshape = s32[1,1,6] reshape(gather)
}
)";
const char* expected_root_expression = R"(
(scalar-indexed-const
(constant s32[1,1,1,6])
(reshape %indices to s32[])
0->[])
)";
AssertArrayForRootExpressionIs(hlo_text, expected_root_expression);
}
TEST_F(IndexedArrayAnalysisTest, ReshapeOfGather6) {
std::string hlo_text = R"(
HloModule ReshapeOfGather
ENTRY main {
operand = s32[1,2,6] constant({{
{1,2,3,4,5,6},{1,2,3,4,5,6}}})
indices = s32[1] parameter(0)
gather = s32[1,1,6] gather(operand, indices),
offset_dims={1,2},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=1,
slice_sizes={1,1,6}
ROOT reshape = s32[1,1,1,6] reshape(gather)
}
)";
const char* expected_root_expression = R"(
(scalar-indexed-const
(constant s32[2,1,1,1,6] s32[2,1,1,1,6] {
{ { { { 1, 2, 3, 4, 5, 6 } } } },
{ { { { 1, 2, 3, 4, 5, 6 } } } } })
(reshape %indices to s32[])
0->[])
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text,
expected_root_expression);
}
TEST_F(IndexedArrayAnalysisTest, ReshapeOfGather7) {
std::string hlo_text = R"(
HloModule ReshapeOfGather
ENTRY main {
operand = s32[2,6] constant({
{1,2,3,4,5,6},{1,2,3,4,5,6}})
indices = s32[1,5] parameter(0)
gather = s32[1,5,6] gather(operand, indices),
offset_dims={2},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=2,
slice_sizes={1,6}
ROOT reshape = s32[1,1,5,6] reshape(gather)
}
)";
const char* expected_root_expression = R"(
(scalar-indexed-const
(constant s32[2,1,1,6] s32[2,1,1,6] {
{ { { 1, 2, 3, 4, 5, 6 } } },
{ { { 1, 2, 3, 4, 5, 6 } } } })
(reshape %indices to s32[5])
0->[2])
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text,
expected_root_expression);
}
TEST_F(IndexedArrayAnalysisTest, ReshapeOfGatherNoFold0) {
std::string hlo_text = R"(
HloModule ReshapeOfGather
ENTRY main {
operand = s32[3,4] constant({{1,2,3,4},{1,2,3,4},{1,2,3,4}})
indices = s32[5,6] parameter(0)
gather = s32[5,4,6] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=2,
slice_sizes={1,4}
ROOT reshape = s32[5,2,2,2,3] reshape(gather)
}
)";
const char* expected_root_expression = R"(
(reshape
(scalar-indexed-const
(constant s32[3,4])
%indices
0->[0,2])
to s32[5,2,2,2,3])
)";
AssertArrayForRootExpressionIs(hlo_text, expected_root_expression);
}
TEST_F(IndexedArrayAnalysisTest, ReshapeOfGatherNoFold1) {
std::string hlo_text = R"(
HloModule ReshapeOfGather
ENTRY main {
operand = s32[3,5,2] constant({
{{1,2},{3,4},{5,6},{7,8},{9,10}},
{{1,2},{3,4},{5,6},{7,8},{9,10}},
{{1,2},{3,4},{5,6},{7,8},{9,10}}})
indices = s32[7] parameter(0)
gather = s32[3,2,7] gather(operand, indices),
offset_dims={0,1},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=1,
slice_sizes={3,1,2}
ROOT reshape = s32[6,7] reshape(gather)
}
)";
const char* expected_root_expression = R"(
(reshape
(scalar-indexed-const
(constant s32[3,5,2])
%indices
1->[2])
to s32[6,7])
)";
AssertArrayForRootExpressionIs(hlo_text, expected_root_expression);
}
TEST_F(IndexedArrayAnalysisTest, ReshapeOfGatherNoFold2) {
std::string hlo_text = R"(
HloModule ReshapeOfGather
ENTRY main {
operand = s32[3,4,1] constant({
{{1},{2},{3},{4}},
{{1},{2},{3},{4}},
{{1},{2},{3},{4}}})
indices = s32[5,6] parameter(0)
gather = s32[5,4,6,1] gather(operand, indices),
offset_dims={1,3},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=2,
slice_sizes={1,4,1}
ROOT reshape = s32[5,2,2,2,3,1] reshape(gather)
}
)";
const char* expected_root_expression = R"(
(reshape
(scalar-indexed-const
(constant s32[3,4,1])
%indices
0->[0,2])
to s32[5,2,2,2,3,1])
)";
AssertArrayForRootExpressionIs(hlo_text, expected_root_expression);
}
TEST_F(IndexedArrayAnalysisTest, UnaryOpOfGather) {
std::string hlo_text = R"(
HloModule UnaryOpOfGather
ENTRY main {
operand = f32[3,4] constant({{1,2,3,4},{1,3,2,4},{4,3,2,1}})
indices = s32[5] parameter(0)
gather = f32[5,4] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,4}
ROOT tanh = f32[5,4] tanh(gather)
}
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
(scalar-indexed-const (constant f32[3,4] f32[3,4] {
{ 0.761594176, 0.964027584, 0.995054781, 0.999329329 },
{ 0.761594176, 0.995054781, 0.964027584, 0.999329329 },
{ 0.999329329, 0.995054781, 0.964027584, 0.761594176 }
}) %indices 0->[0]))");
}
TEST_F(IndexedArrayAnalysisTest, AddBroadcastedScalarWithGather) {
std::string hlo_text = R"(
HloModule AddBroadcastedScalarWithGather
ENTRY main {
gather_operand = s32[3,4] constant({{1,2,3,4},{1,3,2,4},{4,3,2,1}})
constant = s32[] constant(5)
constant_broadcasted = s32[5,4] broadcast(constant), dimensions={}
indices = s32[5] parameter(0)
gather = s32[5,4] gather(gather_operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,4}
ROOT add = s32[5,4] add(gather, constant_broadcasted)
}
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
(scalar-indexed-const (constant s32[3,4] s32[3,4] {
{ 6, 7, 8, 9 },
{ 6, 8, 7, 9 },
{ 9, 8, 7, 6 }
}) %indices 0->[0]))");
}
TEST_F(IndexedArrayAnalysisTest,
SubtractBroadcastedScalarWithGather_GatherIsLhs) {
std::string hlo_text = R"(
HloModule SubtractBroadcastedScalarWithGather
ENTRY main {
gather_operand = s32[3,4] constant({{1,2,3,4},{1,3,2,4},{4,3,2,1}})
constant = s32[] constant(5)
constant_broadcasted = s32[5,4] broadcast(constant), dimensions={}
indices = s32[5] parameter(0)
gather = s32[5,4] gather(gather_operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,4}
ROOT sub = s32[5,4] subtract(gather, constant_broadcasted)
}
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
(scalar-indexed-const (constant s32[3,4] s32[3,4] {
{ -4, -3, -2, -1 },
{ -4, -2, -3, -1 },
{ -1, -2, -3, -4 }
}) %indices 0->[0]))");
}
TEST_F(IndexedArrayAnalysisTest,
SubtractBroadcastedScalarWithGather_GatherIsRhs) {
std::string hlo_text = R"(
HloModule SubtractBroadcastedScalarWithGather
ENTRY main {
gather_operand = s32[3,4] constant({{1,2,3,4},{1,3,2,4},{4,3,2,1}})
constant = s32[] constant(5)
constant_broadcasted = s32[5,4] broadcast(constant), dimensions={}
indices = s32[5] parameter(0)
gather = s32[5,4] gather(gather_operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,4}
ROOT sub = s32[5,4] subtract(constant_broadcasted, gather)
}
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
(scalar-indexed-const (constant s32[3,4] s32[3,4] {
{ 4, 3, 2, 1 },
{ 4, 2, 3, 1 },
{ 1, 2, 3, 4 }
}) %indices 0->[0]))");
}
TEST_F(IndexedArrayAnalysisTest, AddBroadcastedVectorWithGather) {
std::string hlo_text = R"(
HloModule AddBroadcastedVectorWithGather
ENTRY main {
gather_operand = s32[3,4] constant({{1,2,3,4},{1,3,2,4},{4,3,2,1}})
constant_vect = s32[4] constant({10,11,12,13})
constant_broadcasted = s32[5,4] broadcast(constant_vect), dimensions={1}
indices = s32[5] parameter(0)
gather = s32[5,4] gather(gather_operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,4}
ROOT add = s32[5,4] add(gather, constant_broadcasted)
}
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
(scalar-indexed-const (constant s32[3,4] s32[3,4] {
{ 11, 13, 15, 17 },
{ 11, 14, 14, 17 },
{ 14, 14, 14, 14 }
}) %indices 0->[0]))");
}
TEST_F(IndexedArrayAnalysisTest, AddBroadcastedVectorWithGather_Negative) {
std::string hlo_text = R"(
HloModule AddBroadcastedVectorWithGather
ENTRY main {
gather_operand = s32[3,4] constant({{1,2,3,4},{1,3,2,4},{4,3,2,1}})
constant_vect = s32[5] constant({10,11,12,13,14})
constant_broadcasted = s32[5,4] broadcast(constant_vect), dimensions={0}
indices = s32[5] parameter(0)
gather = s32[5,4] gather(gather_operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,4}
ROOT add = s32[5,4] add(gather, constant_broadcasted)
}
)";
AssertArrayForRootExpressionIs(hlo_text, "%add");
}
TEST_F(IndexedArrayAnalysisTest, RegularUnaryOp) {
std::string hlo_text = R"(
HloModule RegularUnaryOp
ENTRY main {
input = f32[100] parameter(0)
ROOT tanh = f32[100] tanh(input)
}
)";
AssertArrayForRootExpressionIs(hlo_text, "%tanh");
}
TEST_F(IndexedArrayAnalysisTest, RegularBinaryOp) {
std::string hlo_text = R"(
HloModule RegularUnaryOp
ENTRY main {
input0 = f32[100] parameter(0)
input1 = f32[100] parameter(1)
ROOT add = f32[100] add(input0, input1)
}
)";
AssertArrayForRootExpressionIs(hlo_text, "%add");
}
TEST_F(IndexedArrayAnalysisTest, DotOpBasic_0) {
std::string hlo_text = R"(
HloModule DotOp
ENTRY main {
gather_operand = s32[3,4] constant({{1,2,3,4},{5,6,7,8},{9,10,11,12}})
dot_rhs_constant = s32[4,3] constant({{1,2,3},{4,5,6},{7,8,9},{10,11,12}})
indices = s32[5] parameter(0)
dot_lhs = s32[5,4] gather(gather_operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,4}
ROOT dot = s32[5,3] dot(dot_lhs, dot_rhs_constant), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
(scalar-indexed-const
(constant s32[3,3] s32[3,3] {
{ 70, 80, 90 },
{ 158, 184, 210 },
{ 246, 288, 330 } })
%indices 0->[0]))");
}
TEST_F(IndexedArrayAnalysisTest, DotOpBasic_1) {
std::string hlo_text = R"(
HloModule DotOp
ENTRY main {
gather_operand = s32[3,4] constant({{1,2,3,4},{5,6,7,8},{9,10,11,12}})
dot_rhs_constant = s32[3,3] constant({{1,2,3},{4,5,6},{7,8,9}})
indices = s32[5] parameter(0)
dot_lhs = s32[3,5] gather(gather_operand, indices),
offset_dims={0},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=1,
slice_sizes={3,1}
ROOT dot = s32[5,3] dot(dot_lhs, dot_rhs_constant), lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
(scalar-indexed-const
(constant s32[4,3] s32[4,3] {
{ 84, 99, 114 },
{ 96, 114, 132 },
{ 108, 129, 150 },
{ 120, 144, 168 } })
%indices 0->[1]))");
}
TEST_F(IndexedArrayAnalysisTest, DotOpBasic_2) {
std::string hlo_text = R"(
HloModule DotOp
ENTRY main {
gather_operand = s32[3,4] constant({{1,2,3,4},{5,6,7,8},{9,10,11,12}})
dot_lhs_constant = s32[4,3] constant({{1,2,3},{4,5,6},{7,8,9},{10,11,12}})
indices = s32[5] parameter(0)
dot_rhs = s32[3,5] gather(gather_operand, indices),
offset_dims={0},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=1,
slice_sizes={3,1}
ROOT dot = s32[4,5] dot(dot_lhs_constant, dot_rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
(scalar-indexed-const
(constant s32[4,4] s32[4,4] {
{ 38, 44, 50, 56 },
{ 83, 98, 113, 128 },
{ 128, 152, 176, 200 },
{ 173, 206, 239, 272 } })
%indices 1->[1])
)");
}
TEST_F(IndexedArrayAnalysisTest, DotOpBasic_3) {
std::string hlo_text = R"(
HloModule DotOp
ENTRY main {
gather_operand = s32[4,3] constant({{1,2,3},{4,5,6},{7,8,9},{10,11,12}})
dot_lhs_constant = s32[4,3] constant({{1,2,3},{4,5,6},{7,8,9},{10,11,12}})
indices = s32[5] parameter(0)
dot_rhs = s32[5,3] gather(gather_operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,3}
ROOT dot = s32[4,5] dot(dot_lhs_constant, dot_rhs), lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
(scalar-indexed-const
(constant s32[4,4] s32[4,4] {
{ 14, 32, 50, 68 },
{ 32, 77, 122, 167 },
{ 50, 122, 194, 266 },
{ 68, 167, 266, 365 } })
%indices 1->[0])
)");
}
TEST_F(IndexedArrayAnalysisTest, DotOpWithBatch) {
std::string hlo_text = R"(
HloModule DotOp
ENTRY main {
gather_operand = s32[2,3,2] constant({{{1,2},{3,4},{5,6}},{{7,8},{9,10},{11,12}}})
dot_lhs_constant = s32[2,2,3] constant({{{1,2,3},{4,5,6}},{{7,8,9},{10,11,12}}})
indices = s32[4] parameter(0)
dot_rhs = s32[2,3,4] gather(gather_operand, indices),
offset_dims={0,1},
collapsed_slice_dims={2},
start_index_map={2},
index_vector_dim=1,
slice_sizes={2,3,1}
ROOT dot = s32[2,2,4] dot(dot_lhs_constant, dot_rhs),
lhs_contracting_dims={2}, rhs_contracting_dims={1},
lhs_batch_dims={0}, rhs_batch_dims={0}
}
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
(scalar-indexed-const
(constant s32[2,2,2] s32[2,2,2] {
{ { 22, 28 },
{ 49, 64 } },
{ { 220, 244 },
{ 301, 334 } } })
%indices 3->[2])
)");
}
TEST_F(IndexedArrayAnalysisTest, DotOpNegative) {
std::string hlo_text = R"(
HloModule DotOp
ENTRY main {
gather_operand = s32[3,4] constant({{1,2,3,4},{5,6,7,8},{9,10,11,12}})
dot_rhs_constant = s32[2,3] constant({{1,2,3},{4,5,6}})
indices = s32[2] parameter(0)
dot_lhs = s32[3,2] gather(gather_operand, indices),
offset_dims={0},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=1,
slice_sizes={3,1}
ROOT dot = s32[3,3] dot(dot_lhs, dot_rhs_constant), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text, "%dot");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/indexed_array_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/indexed_array_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8aa133e2-df08-430d-ba92-71ab1f3fd17e | cpp | tensorflow/tensorflow | logging_op_resolver | tensorflow/lite/tools/optimize/calibration/logging_op_resolver.cc | tensorflow/lite/tools/optimize/calibration/logging_op_resolver_test.cc | #include "tensorflow/lite/tools/optimize/calibration/logging_op_resolver.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/tools/optimize/calibration/calibration_common.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace optimize {
namespace calibration {
LoggingOpResolver::LoggingOpResolver(
const BuiltinOpsSet& builtin_ops_to_replace,
const CustomOpsSet& custom_ops_to_replace, const OpResolver& base_resolver,
KernelEvalFuncPtr logging_eval_fn, ErrorReporter* error_reporter) {
std::vector<std::string> unresolved_builtin_ops;
std::vector<std::string> unresolved_custom_ops;
for (const auto& op_and_version : builtin_ops_to_replace) {
const TfLiteRegistration* base_registration =
base_resolver.FindOp(op_and_version.first, op_and_version.second);
if (!base_registration) {
unresolved_builtin_ops.push_back(
EnumNameBuiltinOperator(op_and_version.first));
continue;
}
BuiltinOperatorKey key = op_and_version;
builtin_op_evalfn_map_[key] = base_registration->invoke;
auto logging_registration =
std::make_unique<TfLiteRegistration>(*base_registration);
logging_registration->invoke = logging_eval_fn;
builtin_op_registration_map_[key] = std::move(logging_registration);
}
for (const auto& op_and_version : custom_ops_to_replace) {
const TfLiteRegistration* base_registration = base_resolver.FindOp(
op_and_version.first.c_str(), op_and_version.second);
if (!base_registration) {
if (!IsFlexOp(op_and_version.first.c_str()))
unresolved_custom_ops.push_back(op_and_version.first.c_str());
continue;
}
CustomOperatorKey key = op_and_version;
custom_op_evalfn_map_[key] = base_registration->invoke;
auto logging_registration =
std::make_unique<TfLiteRegistration>(*base_registration);
logging_registration->invoke = logging_eval_fn;
custom_op_registration_map_[key] = std::move(logging_registration);
}
if (!unresolved_builtin_ops.empty() || !unresolved_custom_ops.empty()) {
if (!error_reporter) return;
std::string error_message =
"Failed to initialize op resolver for calibration:";
if (!unresolved_builtin_ops.empty())
absl::StrAppend(&error_message, "\nThere are unresolved builtin ops: [",
absl::StrJoin(unresolved_builtin_ops, ", "), "]");
if (!unresolved_custom_ops.empty()) {
absl::StrAppend(&error_message, "\nThere are unresolved custom ops: [",
absl::StrJoin(unresolved_custom_ops, ", "), "]");
}
TF_LITE_REPORT_ERROR(error_reporter, error_message.c_str());
}
}
const TfLiteRegistration* LoggingOpResolver::FindOp(BuiltinOperator op,
int version) const {
BuiltinOperatorKey key = {op, version};
if (builtin_op_registration_map_.find(key) !=
builtin_op_registration_map_.end()) {
return builtin_op_registration_map_.at(key).get();
}
return nullptr;
}
KernelEvalFuncPtr LoggingOpResolver::GetWrappedKernelInvoke(BuiltinOperator op,
int version) const {
return builtin_op_evalfn_map_.at({op, version});
}
const TfLiteRegistration* LoggingOpResolver::FindOp(const char* op,
int version) const {
CustomOperatorKey key = {op, version};
if (custom_op_registration_map_.find(key) !=
custom_op_registration_map_.end()) {
return custom_op_registration_map_.at(key).get();
}
return nullptr;
}
KernelEvalFuncPtr LoggingOpResolver::GetWrappedKernelInvoke(const char* op,
int version) const {
return custom_op_evalfn_map_.at({op, version});
}
}
}
} | #include "tensorflow/lite/tools/optimize/calibration/logging_op_resolver.h"
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/tools/optimize/calibration/calibration_common.h"
namespace tflite {
namespace optimize {
namespace calibration {
namespace {
TfLiteStatus ConvPrepare(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
TfLiteStatus ConvEval(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
TfLiteStatus AddPrepare(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
TfLiteStatus AddEval(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
TfLiteStatus CustomPrepare(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
TfLiteStatus CustomEval(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
TfLiteStatus WrappingInvoke(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
TEST(LoggingOpResolverTest, KernelInvokesAreReplaced) {
MutableOpResolver base_resolver;
TfLiteRegistration conv_registration = {};
conv_registration.prepare = ConvPrepare;
conv_registration.invoke = ConvEval;
base_resolver.AddBuiltin(BuiltinOperator_CONV_2D, &conv_registration);
TfLiteRegistration add_registration = {};
add_registration.prepare = AddPrepare;
add_registration.invoke = AddEval;
base_resolver.AddBuiltin(BuiltinOperator_ADD, &add_registration);
BuiltinOpsSet ops_to_replace = {
{BuiltinOperator_CONV_2D, 1},
{BuiltinOperator_ADD, 1},
};
LoggingOpResolver resolver(ops_to_replace, CustomOpsSet(), base_resolver,
WrappingInvoke, nullptr);
auto reg = resolver.FindOp(BuiltinOperator_CONV_2D, 1);
EXPECT_EQ(reg->builtin_code, BuiltinOperator_CONV_2D);
EXPECT_TRUE(reg->prepare == ConvPrepare);
EXPECT_TRUE(reg->invoke == WrappingInvoke);
reg = resolver.FindOp(BuiltinOperator_ADD, 1);
EXPECT_EQ(reg->builtin_code, BuiltinOperator_ADD);
EXPECT_TRUE(reg->prepare == AddPrepare);
EXPECT_TRUE(reg->invoke == WrappingInvoke);
}
TEST(LoggingOpResolverTest, OriginalKernelInvokesAreRetained) {
MutableOpResolver base_resolver;
TfLiteRegistration conv_registration = {};
conv_registration.prepare = ConvPrepare;
conv_registration.invoke = ConvEval;
base_resolver.AddBuiltin(BuiltinOperator_CONV_2D, &conv_registration);
TfLiteRegistration add_registration = {};
add_registration.prepare = AddPrepare;
add_registration.invoke = AddEval;
base_resolver.AddBuiltin(BuiltinOperator_ADD, &add_registration);
BuiltinOpsSet ops_to_replace = {
{BuiltinOperator_CONV_2D, 1},
{BuiltinOperator_ADD, 1},
};
LoggingOpResolver resolver(ops_to_replace, CustomOpsSet(), base_resolver,
WrappingInvoke, nullptr);
auto kernel_invoke =
resolver.GetWrappedKernelInvoke(BuiltinOperator_CONV_2D, 1);
EXPECT_TRUE(kernel_invoke == ConvEval);
kernel_invoke = resolver.GetWrappedKernelInvoke(BuiltinOperator_ADD, 1);
EXPECT_TRUE(kernel_invoke == AddEval);
}
TEST(LoggingOpResolverTest, OnlyOpsInReplacementSetAreReplaces) {
MutableOpResolver base_resolver;
TfLiteRegistration conv_registration = {};
conv_registration.prepare = ConvPrepare;
conv_registration.invoke = ConvEval;
base_resolver.AddBuiltin(BuiltinOperator_CONV_2D, &conv_registration);
TfLiteRegistration add_registration = {};
add_registration.prepare = AddPrepare;
add_registration.invoke = AddEval;
base_resolver.AddBuiltin(BuiltinOperator_ADD, &add_registration);
BuiltinOpsSet ops_to_replace = {
{BuiltinOperator_CONV_2D, 1},
};
LoggingOpResolver resolver(ops_to_replace, CustomOpsSet(), base_resolver,
WrappingInvoke, nullptr);
auto reg = resolver.FindOp(BuiltinOperator_CONV_2D, 1);
EXPECT_EQ(reg->builtin_code, BuiltinOperator_CONV_2D);
EXPECT_TRUE(reg->prepare == ConvPrepare);
EXPECT_TRUE(reg->invoke == WrappingInvoke);
reg = resolver.FindOp(BuiltinOperator_ADD, 1);
EXPECT_EQ(nullptr, reg);
}
TEST(LoggingOpResolverTest, CustomOps) {
MutableOpResolver base_resolver;
TfLiteRegistration custom_registration = {};
custom_registration.prepare = CustomPrepare;
custom_registration.invoke = CustomEval;
std::string custom_op_name = "custom";
base_resolver.AddCustom(custom_op_name.c_str(), &custom_registration);
CustomOpsSet ops_to_replace = {
{custom_op_name, 1},
};
LoggingOpResolver resolver(BuiltinOpsSet(), ops_to_replace, base_resolver,
WrappingInvoke, nullptr);
auto reg = resolver.FindOp(custom_op_name.c_str(), 1);
EXPECT_EQ(reg->builtin_code, BuiltinOperator_CUSTOM);
EXPECT_EQ(reg->custom_name, custom_op_name.c_str());
EXPECT_TRUE(reg->prepare == CustomPrepare);
EXPECT_TRUE(reg->invoke == WrappingInvoke);
}
TEST(LoggingOpResolverTest, UnresolvedCustomOps) {
MutableOpResolver base_resolver;
std::string custom_op_name = "unresolved_custom_op";
CustomOpsSet ops_to_replace = {
{custom_op_name, 1},
};
LoggingOpResolver(BuiltinOpsSet(), ops_to_replace, base_resolver,
WrappingInvoke, nullptr);
}
TEST(LoggingOpResolverTest, UnresolvedBuiltinOps) {
MutableOpResolver base_resolver;
BuiltinOpsSet ops_to_replace = {
{BuiltinOperator_CONV_2D, 1},
{BuiltinOperator_ADD, 1},
};
LoggingOpResolver resolver(ops_to_replace, CustomOpsSet(), base_resolver,
WrappingInvoke, nullptr);
}
TEST(LoggingOpResolverTest, FlexOps) {
MutableOpResolver base_resolver;
std::string custom_op_name = "FlexAdd";
CustomOpsSet ops_to_replace = {
{custom_op_name, 1},
};
LoggingOpResolver resolver(BuiltinOpsSet(), ops_to_replace, base_resolver,
WrappingInvoke, nullptr);
auto reg = resolver.FindOp(custom_op_name.c_str(), 1);
EXPECT_TRUE(!reg);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/optimize/calibration/logging_op_resolver.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/optimize/calibration/logging_op_resolver_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e1b70a27-1ffd-49b8-993f-ba87bd51d44b | cpp | tensorflow/tensorflow | dot_operand_converter | third_party/xla/xla/service/gpu/transforms/dot_operand_converter.cc | third_party/xla/xla/service/gpu/transforms/dot_operand_converter_test.cc | #include "xla/service/gpu/transforms/dot_operand_converter.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
namespace xla::gpu {
bool DotOperandConverter::InstructionMatchesPattern(
HloInstruction* instruction) {
if (instruction->opcode() != HloOpcode::kDot) {
return false;
}
HloInstruction* lhs = instruction->mutable_operand(0);
HloInstruction* rhs = instruction->mutable_operand(1);
PrimitiveType lhs_type = lhs->shape().element_type();
PrimitiveType rhs_type = rhs->shape().element_type();
if (lhs_type == rhs_type) {
return false;
}
absl::flat_hash_set<PrimitiveType> non_converting = {F8E4M3FN, F8E5M2};
if (non_converting.contains(lhs_type) && non_converting.contains(rhs_type)) {
return false;
}
PrimitiveType desired_type =
ShapeUtil::HigherPrecisionElementType(lhs->shape(), rhs->shape());
return desired_type == lhs_type || desired_type == rhs_type;
}
absl::StatusOr<HloInstruction*> DotOperandConverter::ExpandInstruction(
HloInstruction* instruction) {
HloInstruction* lhs = instruction->mutable_operand(0);
HloInstruction* rhs = instruction->mutable_operand(1);
PrimitiveType desired_type =
ShapeUtil::HigherPrecisionElementType(lhs->shape(), rhs->shape());
int operand_index = desired_type == lhs->shape().element_type() ? 1 : 0;
HloInstruction* inst_to_replace =
desired_type == lhs->shape().element_type() ? rhs : lhs;
auto upcast_shape = inst_to_replace->shape();
upcast_shape.set_element_type(desired_type);
auto* convert_inst = instruction->AddInstruction(
HloInstruction::CreateConvert(upcast_shape, inst_to_replace));
TF_RETURN_IF_ERROR(instruction->ReplaceOperandWithDifferentShape(
operand_index, convert_inst));
return nullptr;
}
} | #include "xla/service/gpu/transforms/dot_operand_converter.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/primitive_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
namespace op = ::xla::testing::opcode_matchers;
class DotOperandConverterTest : public HloTestBase {
public:
void TestConvert(bool left_less_precise, PrimitiveType lhs_type,
PrimitiveType rhs_type, PrimitiveType result_type) {
absl::string_view module_tmpl = R"(
HloModule module
ENTRY main {
p0 = $0[2,3]{1,0} parameter(0)
p1 = $1[3,2]{1,0} parameter(1)
ROOT dot = $2[2,2]{1,0} dot(p0, p1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
auto module_string = absl::Substitute(
module_tmpl, primitive_util::LowercasePrimitiveTypeName(lhs_type),
primitive_util::LowercasePrimitiveTypeName(rhs_type),
primitive_util::LowercasePrimitiveTypeName(result_type));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool upcasted,
DotOperandConverter().Run(module.get()));
EXPECT_TRUE(upcasted);
if (left_less_precise) {
auto original_lhs = op::Parameter(0);
auto upcasted_lhs =
AllOf(op::Convert(original_lhs),
op::Shape(absl::Substitute(
"$0[2,3]{1,0}",
primitive_util::LowercasePrimitiveTypeName(rhs_type))));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
AllOf(op::Dot(upcasted_lhs, op::Parameter(1)),
op::Shape(absl::Substitute(
"$0[2,2]{1,0}",
primitive_util::LowercasePrimitiveTypeName(result_type)))));
} else {
auto original_rhs = op::Parameter(1);
auto upcasted_rhs =
AllOf(op::Convert(original_rhs),
op::Shape(absl::Substitute(
"$0[3,2]{1,0}",
primitive_util::LowercasePrimitiveTypeName(lhs_type))));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
AllOf(op::Dot(op::Parameter(0), upcasted_rhs),
op::Shape(absl::Substitute(
"$0[2,2]{1,0}",
primitive_util::LowercasePrimitiveTypeName(result_type)))));
}
}
};
TEST_F(DotOperandConverterTest, ConvertsLeftAndRight) {
TestConvert(true, S8, BF16, F32);
TestConvert(false, BF16, S8, F32);
}
TEST_F(DotOperandConverterTest, NoConvertHappensWithSameTypes) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = s8[2,3]{1,0} parameter(0)
p1 = s8[3,2]{1,0} parameter(1)
ROOT dot = bf16[2,2]{1,0} dot(p0, p1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool upcasted,
DotOperandConverter().Run(module.get()));
EXPECT_FALSE(upcasted);
}
TEST_F(DotOperandConverterTest, NoConvertFromF8toF8) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = f8e4m3fn[2,3]{1,0} parameter(0)
p1 = f8e5m2[3,2]{1,0} parameter(1)
ROOT dot = bf16[2,2]{1,0} dot(p0, p1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool upcasted,
DotOperandConverter().Run(module.get()));
EXPECT_FALSE(upcasted);
}
TEST_F(DotOperandConverterTest, CompilerOptimizesUsingDotOperandConverter) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = s8[2,3]{1,0} parameter(0)
p1 = bf16[3,2]{1,0} parameter(1)
ROOT dot = bf16[2,2]{1,0} dot(p0, p1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
GetOptimizedModule(module_string));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/dot_operand_converter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/dot_operand_converter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |